instance_id stringlengths 13 45 | pull_number int64 7 30.1k | repo stringclasses 83 values | version stringclasses 68 values | base_commit stringlengths 40 40 | created_at stringdate 2013-05-16 18:15:55 2025-01-08 15:12:50 | patch stringlengths 347 35.2k | test_patch stringlengths 432 113k | non_py_patch stringlengths 0 18.3k | new_components listlengths 0 40 | FAIL_TO_PASS listlengths 1 2.53k | PASS_TO_PASS listlengths 0 1.7k | problem_statement stringlengths 607 52.7k | hints_text stringlengths 0 57.4k | environment_setup_commit stringclasses 167 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
scikit-learn__scikit-learn-28936 | 28,936 | scikit-learn/scikit-learn | 1.6 | 0f27a26d0f78b07245158f5997066a3b2c1d76ba | 2024-05-02T20:19:03Z | diff --git a/doc/api_reference.py b/doc/api_reference.py
index 1aa6455fb7e44..583909cdcac65 100644
--- a/doc/api_reference.py
+++ b/doc/api_reference.py
@@ -121,6 +121,7 @@ def _get_submodule(module_name, submodule_name):
"TransformerMixin",
"clone",
"is_classifier",
+ "is_clusterer",
"is_regressor",
],
}
diff --git a/doc/whats_new/v1.6.rst b/doc/whats_new/v1.6.rst
index 0e6844155c6fa..53b0eb017fc57 100644
--- a/doc/whats_new/v1.6.rst
+++ b/doc/whats_new/v1.6.rst
@@ -74,6 +74,13 @@ Changelog
:pr:`123456` by :user:`Joe Bloggs <joeongithub>`.
where 123455 is the *pull request* number, not the issue number.
+:mod:`sklearn.base`
+...................
+
+- |Enhancement| Added a function :func:`base.is_clusterer` which determines
+ whether a given estimator is of category clusterer.
+ :pr:`28936` by :user:`Christian Veenhuis <ChVeen>`.
+
Thanks to everyone who has contributed to the maintenance and improvement of
the project since version 1.5, including:
diff --git a/sklearn/base.py b/sklearn/base.py
index 0aa7af1041368..d4245ade4e499 100644
--- a/sklearn/base.py
+++ b/sklearn/base.py
@@ -1374,13 +1374,17 @@ def is_classifier(estimator):
Examples
--------
>>> from sklearn.base import is_classifier
+ >>> from sklearn.cluster import KMeans
>>> from sklearn.svm import SVC, SVR
>>> classifier = SVC()
>>> regressor = SVR()
+ >>> kmeans = KMeans()
>>> is_classifier(classifier)
True
>>> is_classifier(regressor)
False
+ >>> is_classifier(kmeans)
+ False
"""
return getattr(estimator, "_estimator_type", None) == "classifier"
@@ -1401,17 +1405,54 @@ def is_regressor(estimator):
Examples
--------
>>> from sklearn.base import is_regressor
+ >>> from sklearn.cluster import KMeans
>>> from sklearn.svm import SVC, SVR
>>> classifier = SVC()
>>> regressor = SVR()
+ >>> kmeans = KMeans()
>>> is_regressor(classifier)
False
>>> is_regressor(regressor)
True
+ >>> is_regressor(kmeans)
+ False
"""
return getattr(estimator, "_estimator_type", None) == "regressor"
+def is_clusterer(estimator):
+ """Return True if the given estimator is (probably) a clusterer.
+
+ .. versionadded:: 1.6
+
+ Parameters
+ ----------
+ estimator : object
+ Estimator object to test.
+
+ Returns
+ -------
+ out : bool
+ True if estimator is a clusterer and False otherwise.
+
+ Examples
+ --------
+ >>> from sklearn.base import is_clusterer
+ >>> from sklearn.cluster import KMeans
+ >>> from sklearn.svm import SVC, SVR
+ >>> classifier = SVC()
+ >>> regressor = SVR()
+ >>> kmeans = KMeans()
+ >>> is_clusterer(classifier)
+ False
+ >>> is_clusterer(regressor)
+ False
+ >>> is_clusterer(kmeans)
+ True
+ """
+ return getattr(estimator, "_estimator_type", None) == "clusterer"
+
+
def is_outlier_detector(estimator):
"""Return True if the given estimator is (probably) an outlier detector.
| diff --git a/sklearn/tests/test_base.py b/sklearn/tests/test_base.py
index a1cd3b8fc8c7b..917da863ece3b 100644
--- a/sklearn/tests/test_base.py
+++ b/sklearn/tests/test_base.py
@@ -18,13 +18,16 @@
TransformerMixin,
clone,
is_classifier,
+ is_clusterer,
+ is_regressor,
)
+from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.exceptions import InconsistentVersionWarning
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
-from sklearn.svm import SVC
+from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils._mocking import MockDataFrame
from sklearn.utils._set_output import _get_output_config
@@ -259,12 +262,55 @@ def test_get_params():
test.set_params(a__a=2)
-def test_is_classifier():
- svc = SVC()
- assert is_classifier(svc)
- assert is_classifier(GridSearchCV(svc, {"C": [0.1, 1]}))
- assert is_classifier(Pipeline([("svc", svc)]))
- assert is_classifier(Pipeline([("svc_cv", GridSearchCV(svc, {"C": [0.1, 1]}))]))
+@pytest.mark.parametrize(
+ "estimator, expected_result",
+ [
+ (SVC(), True),
+ (GridSearchCV(SVC(), {"C": [0.1, 1]}), True),
+ (Pipeline([("svc", SVC())]), True),
+ (Pipeline([("svc_cv", GridSearchCV(SVC(), {"C": [0.1, 1]}))]), True),
+ (SVR(), False),
+ (GridSearchCV(SVR(), {"C": [0.1, 1]}), False),
+ (Pipeline([("svr", SVR())]), False),
+ (Pipeline([("svr_cv", GridSearchCV(SVR(), {"C": [0.1, 1]}))]), False),
+ ],
+)
+def test_is_classifier(estimator, expected_result):
+ assert is_classifier(estimator) == expected_result
+
+
+@pytest.mark.parametrize(
+ "estimator, expected_result",
+ [
+ (SVR(), True),
+ (GridSearchCV(SVR(), {"C": [0.1, 1]}), True),
+ (Pipeline([("svr", SVR())]), True),
+ (Pipeline([("svr_cv", GridSearchCV(SVR(), {"C": [0.1, 1]}))]), True),
+ (SVC(), False),
+ (GridSearchCV(SVC(), {"C": [0.1, 1]}), False),
+ (Pipeline([("svc", SVC())]), False),
+ (Pipeline([("svc_cv", GridSearchCV(SVC(), {"C": [0.1, 1]}))]), False),
+ ],
+)
+def test_is_regressor(estimator, expected_result):
+ assert is_regressor(estimator) == expected_result
+
+
+@pytest.mark.parametrize(
+ "estimator, expected_result",
+ [
+ (KMeans(), True),
+ (GridSearchCV(KMeans(), {"n_clusters": [3, 8]}), True),
+ (Pipeline([("km", KMeans())]), True),
+ (Pipeline([("km_cv", GridSearchCV(KMeans(), {"n_clusters": [3, 8]}))]), True),
+ (SVC(), False),
+ (GridSearchCV(SVC(), {"C": [0.1, 1]}), False),
+ (Pipeline([("svc", SVC())]), False),
+ (Pipeline([("svc_cv", GridSearchCV(SVC(), {"C": [0.1, 1]}))]), False),
+ ],
+)
+def test_is_clusterer(estimator, expected_result):
+ assert is_clusterer(estimator) == expected_result
def test_set_params():
| diff --git a/doc/whats_new/v1.6.rst b/doc/whats_new/v1.6.rst
index 0e6844155c6fa..53b0eb017fc57 100644
--- a/doc/whats_new/v1.6.rst
+++ b/doc/whats_new/v1.6.rst
@@ -74,6 +74,13 @@ Changelog
:pr:`123456` by :user:`Joe Bloggs <joeongithub>`.
where 123455 is the *pull request* number, not the issue number.
+:mod:`sklearn.base`
+...................
+
+- |Enhancement| Added a function :func:`base.is_clusterer` which determines
+ whether a given estimator is of category clusterer.
+ :pr:`28936` by :user:`Christian Veenhuis <ChVeen>`.
+
Thanks to everyone who has contributed to the maintenance and improvement of
the project since version 1.5, including:
| [
{
"components": [
{
"doc": "Return True if the given estimator is (probably) a clusterer.\n\n.. versionadded:: 1.6\n\nParameters\n----------\nestimator : object\n Estimator object to test.\n\nReturns\n-------\nout : bool\n True if estimator is a clusterer and False otherwise.\n\nExamples\n--... | [
"sklearn/tests/test_base.py::test_clone",
"sklearn/tests/test_base.py::test_clone_2",
"sklearn/tests/test_base.py::test_clone_buggy",
"sklearn/tests/test_base.py::test_clone_empty_array",
"sklearn/tests/test_base.py::test_clone_nan",
"sklearn/tests/test_base.py::test_clone_dict",
"sklearn/tests/test_bas... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
ENH Add missing `base.is_clusterer()` function
#### Reference Issues/PRs
Fixes https://github.com/scikit-learn/scikit-learn/issues/28960
#### What does this implement/fix? Explain your changes.
This PR proposes to add the missing `base.is_clusterer()` function analogously to `base.is_classifier()`.
There is a user demand for this as can be seen in discussion <https://github.com/scikit-learn/scikit-learn/discussions/28904>.
The missing unit test for `base.is_regressor()` is added as well.
#### Any other comments?
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sklearn/base.py]
(definition of is_clusterer:)
def is_clusterer(estimator):
"""Return True if the given estimator is (probably) a clusterer.
.. versionadded:: 1.6
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a clusterer and False otherwise.
Examples
--------
>>> from sklearn.base import is_clusterer
>>> from sklearn.cluster import KMeans
>>> from sklearn.svm import SVC, SVR
>>> classifier = SVC()
>>> regressor = SVR()
>>> kmeans = KMeans()
>>> is_clusterer(classifier)
False
>>> is_clusterer(regressor)
False
>>> is_clusterer(kmeans)
True"""
[end of new definitions in sklearn/base.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 18dc8630a7cbe1b591c12774949058b12157a39a | |
embeddings-benchmark__mteb-606 | 606 | embeddings-benchmark/mteb | null | d6ef5b63ebfb4bd8c8478385d3dc842373d67375 | 2024-04-30T21:21:45Z | diff --git a/docs/mmteb/points/606.jsonl b/docs/mmteb/points/606.jsonl
new file mode 100644
index 0000000000..ad95edb09d
--- /dev/null
+++ b/docs/mmteb/points/606.jsonl
@@ -0,0 +1,2 @@
+{"GitHub": "dokato", "Bug fixes": 2}
+{"GitHub": "KennethEnevoldsen", "Review PR": 2}
\ No newline at end of file
diff --git a/mteb/tasks/Classification/multilingual/ScalaClassification.py b/mteb/tasks/Classification/multilingual/ScalaClassification.py
index 4353372b23..89a8e417c7 100644
--- a/mteb/tasks/Classification/multilingual/ScalaClassification.py
+++ b/mteb/tasks/Classification/multilingual/ScalaClassification.py
@@ -1,167 +1,58 @@
from __future__ import annotations
-from mteb.abstasks import AbsTaskClassification
+from mteb.abstasks import AbsTaskClassification, MultilingualTask
from mteb.abstasks.TaskMetadata import TaskMetadata
-
-class ScalaDaClassification(AbsTaskClassification):
- metadata = TaskMetadata(
- name="ScalaDaClassification",
- description="A modified version of DDT modified for linguistic acceptability classification",
- reference="https://aclanthology.org/2023.nodalida-1.20/",
- dataset={
- "path": "mteb/scala_da_classification",
- "revision": "e60a77795ed5488fb7a03751cf6f2b026fa27a71",
- },
- type="Classification",
- category="s2s",
- eval_splits=["test"],
- eval_langs=["dan-Latn"],
- main_score="accuracy",
- date=None,
- form=None,
- domains=None,
- task_subtypes=None,
- license=None,
- socioeconomic_status=None,
- annotations_creators=None,
- dialect=None,
- text_creation=None,
- bibtex_citation=None,
- n_samples={"test": 1024},
- avg_character_length={"test": 109.4},
- )
-
- @property
- def metadata_dict(self) -> dict[str, str]:
- metadata_dict = super().metadata_dict
- metadata_dict["n_experiments"] = 10
- metadata_dict["samples_per_label"] = 32
- return metadata_dict
-
- def dataset_transform(self):
- # convert label to a 0/1 label
- labels = self.dataset["train"]["label"] # type: ignore
- lab2idx = {lab: idx for idx, lab in enumerate(set(labels))}
- self.dataset = self.dataset.map(
- lambda x: {"label": lab2idx[x["label"]]}, remove_columns=["label"]
- )
-
-
-class ScalaNbClassification(AbsTaskClassification):
- metadata = TaskMetadata(
- name="ScalaNbClassification",
- description="A Norwegian dataset for linguistic acceptability classification for Bokmål",
- reference="https://aclanthology.org/2023.nodalida-1.20/",
- dataset={
- "path": "mteb/scala_nb_classification",
- "revision": "dda7af4696bd8d5150441908ea8ed6e68a357c13",
- },
- type="Classification",
- category="s2s",
- eval_splits=["test"],
- eval_langs=["nob-Latn"],
- main_score="accuracy",
- date=None,
- form=None,
- domains=None,
- task_subtypes=None,
- license=None,
- socioeconomic_status=None,
- annotations_creators=None,
- dialect=None,
- text_creation=None,
- bibtex_citation=None,
- n_samples={"test": 1024},
- avg_character_length={"test": 98.4},
- )
-
- @property
- def metadata_dict(self) -> dict[str, str]:
- metadata_dict = super().metadata_dict
- metadata_dict["n_experiments"] = 10
- metadata_dict["samples_per_label"] = 32
- return metadata_dict
-
- def dataset_transform(self):
- # convert label to a 0/1 label
- labels = self.dataset["train"]["label"] # type: ignore
- lab2idx = {lab: idx for idx, lab in enumerate(set(labels))}
- self.dataset = self.dataset.map(
- lambda x: {"label": lab2idx[x["label"]]}, remove_columns=["label"]
- )
-
-
-class ScalaNnClassification(AbsTaskClassification):
- metadata = TaskMetadata(
- name="ScalaNnClassification",
- description="A Norwegian dataset for linguistic acceptability classification for Nynorsk",
- reference="https://aclanthology.org/2023.nodalida-1.20/",
- dataset={
- "path": "mteb/scala_nn_classification",
- "revision": "d81637ad324afb995ae395a87055380e8118a9c0",
- },
- type="Classification",
- category="s2s",
- eval_splits=["test"],
- eval_langs=["nno-Latn"],
- main_score="accuracy",
- date=None,
- form=None,
- domains=None,
- task_subtypes=None,
- license=None,
- socioeconomic_status=None,
- annotations_creators=None,
- dialect=None,
- text_creation=None,
- bibtex_citation=None,
- n_samples={"test": 1024},
- avg_character_length={"test": 104.8},
- )
-
- @property
- def metadata_dict(self) -> dict[str, str]:
- metadata_dict = super().metadata_dict
- metadata_dict["n_experiments"] = 10
- metadata_dict["samples_per_label"] = 32
- return metadata_dict
-
- def dataset_transform(self):
- # convert label to a 0/1 label
- labels = self.dataset["train"]["label"] # type: ignore
- lab2idx = {lab: idx for idx, lab in enumerate(set(labels))}
- self.dataset = self.dataset.map(
- lambda x: {"label": lab2idx[x["label"]]}, remove_columns=["label"]
- )
+_LANGS = {
+ "Danish": ["dan-Latn"],
+ "Norwegian_b": ["nob-Latn"],
+ "Norwegian_n": ["nno-Latn"],
+ "Swedish": ["swe-Latn"],
+}
-class ScalaSvClassification(AbsTaskClassification):
+class ScalaClassification(AbsTaskClassification, MultilingualTask):
metadata = TaskMetadata(
- name="ScalaSvClassification",
- description="A Swedish dataset for linguistic acceptability classification",
+ name="ScalaClassification",
+ description="""ScaLa a linguistic acceptability dataset for the mainland Scandinavian languages automatically constructed from dependency annotations in Universal Dependencies Treebanks.
+ Published as part of 'ScandEval: A Benchmark for Scandinavian Natural Language Processing'""",
reference="https://aclanthology.org/2023.nodalida-1.20/",
dataset={
- "path": "mteb/scala_sv_classification",
- "revision": "aded78beae37445bf3917102d0b332049cfc8c99",
+ "path": "mteb/multilingual-scala-classification",
+ "revision": "ec85bb6c69679ed15ac66c0bf6e180bf563eb137",
},
type="Classification",
category="s2s",
eval_splits=["test"],
- eval_langs=["swe-Latn"],
+ eval_langs=_LANGS,
main_score="accuracy",
- date=None,
- form=None,
- domains=None,
- task_subtypes=None,
- license=None,
- socioeconomic_status=None,
- annotations_creators=None,
- dialect=None,
- text_creation=None,
- bibtex_citation=None,
- n_samples={"test": 1024},
- avg_character_length={"test": 98.3},
+ date=(
+ "1990-01-01",
+ "2023-01-01",
+ ), # derived from dependency treebank, this a the best guess
+ form=["written"],
+ domains=["Fiction", "News", "Non-fiction", "Blog", "Spoken", "Web"],
+ task_subtypes=["Linguistic acceptability"],
+ license="CC BY-SA 4.0",
+ socioeconomic_status="mixed",
+ annotations_creators="human-annotated",
+ dialect=[],
+ text_creation="created",
+ bibtex_citation="""@inproceedings{nielsen-2023-scandeval,
+ title = "{S}cand{E}val: A Benchmark for {S}candinavian Natural Language Processing",
+ author = "Nielsen, Dan",
+ editor = {Alum{\"a}e, Tanel and
+ Fishel, Mark},
+ booktitle = "Proceedings of the 24th Nordic Conference on Computational Linguistics (NoDaLiDa)",
+ month = may,
+ year = "2023",
+ address = "T{\'o}rshavn, Faroe Islands",
+ publisher = "University of Tartu Library",
+ url = "https://aclanthology.org/2023.nodalida-1.20",
+ pages = "185--201",
+ }""",
+ n_samples={"test": len(_LANGS) * 1024},
+ avg_character_length={"test": 102.72},
)
@property
@@ -172,9 +63,10 @@ def metadata_dict(self) -> dict[str, str]:
return metadata_dict
def dataset_transform(self):
- # convert label to a 0/1 label
- labels = self.dataset["train"]["label"] # type: ignore
- lab2idx = {lab: idx for idx, lab in enumerate(set(labels))}
- self.dataset = self.dataset.map(
- lambda x: {"label": lab2idx[x["label"]]}, remove_columns=["label"]
- )
+ for lang in self.dataset.keys():
+ # convert label to a 0/1 label
+ labels = self.dataset[lang]["train"]["label"] # type: ignore
+ lab2idx = {lab: idx for idx, lab in enumerate(set(labels))}
+ self.dataset[lang] = self.dataset[lang].map(
+ lambda x: {"label": lab2idx[x["label"]]}, remove_columns=["label"]
+ )
diff --git a/results/sentence-transformers__paraphrase-multilingual-MiniLM-L12-v2/MultiScalaClassification.json b/results/sentence-transformers__paraphrase-multilingual-MiniLM-L12-v2/MultiScalaClassification.json
new file mode 100644
index 0000000000..5f50e8bea8
--- /dev/null
+++ b/results/sentence-transformers__paraphrase-multilingual-MiniLM-L12-v2/MultiScalaClassification.json
@@ -0,0 +1,44 @@
+{
+ "dataset_revision": "ec85bb6c69679ed15ac66c0bf6e180bf563eb137",
+ "mteb_dataset_name": "MultiScalaClassification",
+ "mteb_version": "1.6.37",
+ "test": {
+ "Danish": {
+ "accuracy": 0.506005859375,
+ "accuracy_stderr": 0.010265641536147736,
+ "ap": 0.5031344774425407,
+ "ap_stderr": 0.005267834037289047,
+ "f1": 0.5049475749372598,
+ "f1_stderr": 0.00977210425804733,
+ "main_score": 0.506005859375
+ },
+ "Norwegian_b": {
+ "accuracy": 0.502978515625,
+ "accuracy_stderr": 0.007240570513339579,
+ "ap": 0.5015563181064907,
+ "ap_stderr": 0.003660770967452628,
+ "f1": 0.5001963944576493,
+ "f1_stderr": 0.008522792869096333,
+ "main_score": 0.502978515625
+ },
+ "Norwegian_n": {
+ "accuracy": 0.50234375,
+ "accuracy_stderr": 0.00478316285010096,
+ "ap": 0.5011969670310156,
+ "ap_stderr": 0.002416207495987296,
+ "f1": 0.5000250906615159,
+ "f1_stderr": 0.005245799810728871,
+ "main_score": 0.50234375
+ },
+ "Swedish": {
+ "accuracy": 0.50498046875,
+ "accuracy_stderr": 0.005298695299316616,
+ "ap": 0.5025474999570884,
+ "ap_stderr": 0.002695928049184094,
+ "f1": 0.5019362045810741,
+ "f1_stderr": 0.005470547272991301,
+ "main_score": 0.50498046875
+ },
+ "evaluation_time": 46.48
+ }
+}
\ No newline at end of file
diff --git a/scripts/data/scala_classification/create_multiling_data.py b/scripts/data/scala_classification/create_multiling_data.py
new file mode 100644
index 0000000000..01ae78fdc3
--- /dev/null
+++ b/scripts/data/scala_classification/create_multiling_data.py
@@ -0,0 +1,18 @@
+from __future__ import annotations
+
+from datasets import load_dataset
+from huggingface_hub import create_repo
+
+repo_id = "mteb/multilingual-scala-classification"
+create_repo(repo_id, repo_type="dataset")
+
+languages = {
+ "Danish": "da",
+ "Norwegian_b": "nb",
+ "Norwegian_n": "nn",
+ "Swedish": "sv",
+}
+
+for lang in languages.keys():
+ raw_ds = load_dataset(f"mteb/scala_{languages[lang]}_classification")
+ raw_ds.push_to_hub(repo_id=repo_id, config_name=lang)
| diff --git a/tests/test_TaskMetadata.py b/tests/test_TaskMetadata.py
index 5f0cde1c7f..229ece491e 100644
--- a/tests/test_TaskMetadata.py
+++ b/tests/test_TaskMetadata.py
@@ -43,10 +43,7 @@
"MTOPDomainClassification",
"MTOPIntentClassification",
"NordicLangClassification",
- "ScalaDaClassification",
- "ScalaNbClassification",
- "ScalaNnClassification",
- "ScalaSvClassification",
+ "ScalaClassification",
"NoRecClassification",
"NorwegianParliamentClassification",
"PunjabiNewsClassification",
| diff --git a/docs/mmteb/points/606.jsonl b/docs/mmteb/points/606.jsonl
new file mode 100644
index 0000000000..ad95edb09d
--- /dev/null
+++ b/docs/mmteb/points/606.jsonl
@@ -0,0 +1,2 @@
+{"GitHub": "dokato", "Bug fixes": 2}
+{"GitHub": "KennethEnevoldsen", "Review PR": 2}
\ No newline at end of file
diff --git a/results/sentence-transformers__paraphrase-multilingual-MiniLM-L12-v2/MultiScalaClassification.json b/results/sentence-transformers__paraphrase-multilingual-MiniLM-L12-v2/MultiScalaClassification.json
new file mode 100644
index 0000000000..5f50e8bea8
--- /dev/null
+++ b/results/sentence-transformers__paraphrase-multilingual-MiniLM-L12-v2/MultiScalaClassification.json
@@ -0,0 +1,44 @@
+{
+ "dataset_revision": "ec85bb6c69679ed15ac66c0bf6e180bf563eb137",
+ "mteb_dataset_name": "MultiScalaClassification",
+ "mteb_version": "1.6.37",
+ "test": {
+ "Danish": {
+ "accuracy": 0.506005859375,
+ "accuracy_stderr": 0.010265641536147736,
+ "ap": 0.5031344774425407,
+ "ap_stderr": 0.005267834037289047,
+ "f1": 0.5049475749372598,
+ "f1_stderr": 0.00977210425804733,
+ "main_score": 0.506005859375
+ },
+ "Norwegian_b": {
+ "accuracy": 0.502978515625,
+ "accuracy_stderr": 0.007240570513339579,
+ "ap": 0.5015563181064907,
+ "ap_stderr": 0.003660770967452628,
+ "f1": 0.5001963944576493,
+ "f1_stderr": 0.008522792869096333,
+ "main_score": 0.502978515625
+ },
+ "Norwegian_n": {
+ "accuracy": 0.50234375,
+ "accuracy_stderr": 0.00478316285010096,
+ "ap": 0.5011969670310156,
+ "ap_stderr": 0.002416207495987296,
+ "f1": 0.5000250906615159,
+ "f1_stderr": 0.005245799810728871,
+ "main_score": 0.50234375
+ },
+ "Swedish": {
+ "accuracy": 0.50498046875,
+ "accuracy_stderr": 0.005298695299316616,
+ "ap": 0.5025474999570884,
+ "ap_stderr": 0.002695928049184094,
+ "f1": 0.5019362045810741,
+ "f1_stderr": 0.005470547272991301,
+ "main_score": 0.50498046875
+ },
+ "evaluation_time": 46.48
+ }
+}
\ No newline at end of file
| [
{
"components": [
{
"doc": "",
"lines": [
14,
71
],
"name": "ScalaClassification",
"signature": "class ScalaClassification(AbsTaskClassification, MultilingualTask):",
"type": "class"
},
{
"doc": "",
"lines": [
... | [
"tests/test_TaskMetadata.py::test_all_metadata_is_filled"
] | [
"tests/test_TaskMetadata.py::test_given_dataset_config_then_it_is_valid",
"tests/test_TaskMetadata.py::test_given_missing_dataset_path_then_it_throws",
"tests/test_TaskMetadata.py::test_given_missing_revision_path_then_it_throws",
"tests/test_TaskMetadata.py::test_given_none_revision_path_then_it_logs_warning... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Making ScalaClassification multilingual
This PR does not add any new dataset, but rather makes ScaLa classification as a truly multilingual format as pointed in #577.
I added new dataset to: https://huggingface.co/datasets/mteb/multilingual-scala-classification
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in mteb/tasks/Classification/multilingual/ScalaClassification.py]
(definition of ScalaClassification:)
class ScalaClassification(AbsTaskClassification, MultilingualTask):
(definition of ScalaClassification.metadata_dict:)
def metadata_dict(self) -> dict[str, str]:
(definition of ScalaClassification.dataset_transform:)
def dataset_transform(self):
[end of new definitions in mteb/tasks/Classification/multilingual/ScalaClassification.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | b580b95fc91a7e7e675d27c3ae9a9df64ddad169 | |
pgmpy__pgmpy-1753 | 1,753 | pgmpy/pgmpy | null | 7ed0659107c9b3768208d17890a28778001320e9 | 2024-04-30T06:51:14Z | diff --git a/pgmpy/utils/__init__.py b/pgmpy/utils/__init__.py
index 7803135af..adf8bb667 100644
--- a/pgmpy/utils/__init__.py
+++ b/pgmpy/utils/__init__.py
@@ -1,9 +1,8 @@
-from .mathext import cartesian, sample_discrete
-from .state_name import StateNameMixin
from .check_functions import _check_1d_array_object, _check_length_equal
+from .mathext import cartesian, sample_discrete
from .optimizer import optimize, pinverse
-from .utils import get_example_model
-
+from .state_name import StateNameMixin
+from .utils import discretize, get_example_model
__all__ = [
"cartesian",
@@ -14,4 +13,5 @@
"optimize",
"pinverse",
"get_example_model",
+ "discretize",
]
diff --git a/pgmpy/utils/utils.py b/pgmpy/utils/utils.py
index 7d53e1927..35b465a8c 100644
--- a/pgmpy/utils/utils.py
+++ b/pgmpy/utils/utils.py
@@ -1,5 +1,7 @@
import gzip
+import pandas as pd
+
try:
from importlib.resources import files
except:
@@ -112,3 +114,64 @@ def get_example_model(model):
content = f.read()
reader = BIFReader(string=content.decode("utf-8"), n_jobs=1)
return reader.get_model()
+
+
+def discretize(data, cardinality, labels=dict(), method="rounding"):
+ """
+ Discretizes a given continuous dataset.
+
+ Parameters
+ ----------
+ data: pandas.DataFrame
+ The dataset to discretize. All columns must have continuous values.
+
+ cardinality: dict
+ A dictionary of the form (str: int) representing the number of bins
+ to create for each of the variables.
+
+ labels: dict (default: None)
+ A dictionary of the form (str: list) representing the label names for
+ each variable in the discretized dataframe.
+
+ method: rounding or quantile
+ If rounding, equal width bins are created and data is discretized into these bins. Refer pandas.cut for more details.
+ If quantile, creates bins such that each bin has an equal number of datapoints. Refer pandas.qcut for more details.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from pgmpy.utils import discretize
+ >>> rng = np.random.default_rng(42)
+ >>> X = rng.standard_normal(1000)
+ >>> Y = 0.2 * X + rng.standard_normal(1000)
+ >>> Z = 0.4 * X + 0.5 * Y + rng.standard_normal(1000)
+ >>> df = pd.DataFrame({"X": X, "Y": Y, "Z": Z})
+ >>> df_disc = discretize(df, cardinality={'X': 3, 'Y': 3, 'Z': 3}, labels={'X': ['low', 'mid', 'high'], 'Y': ['low', 'mid', 'high'], 'Z': ['low', 'mid', 'high']})
+ >>> df_disc.head()
+ X Y Z
+ 0 mid mid mid
+ 1 mid mid low
+ 2 mid mid mid
+ 3 high mid mid
+ 4 low mid low
+
+ Returns
+ -------
+ pandas.DataFrame: A discretized dataframe.
+ """
+ df_copy = data.copy()
+ if method == "rounding":
+ for column in data.columns:
+ df_copy[column] = pd.cut(
+ df_copy[column],
+ bins=cardinality[column],
+ include_lowest=True,
+ labels=labels.get(column),
+ )
+ elif method == "quantile":
+ for column in data.columns:
+ df_copy[column] = pd.qcut(
+ df_copy[column], q=cardinality[column], labels=labels.get(column)
+ )
+
+ return df_copy
| diff --git a/pgmpy/tests/test_utils/test_utils.py b/pgmpy/tests/test_utils/test_utils.py
index 505d14de1..1ab6e78a0 100644
--- a/pgmpy/tests/test_utils/test_utils.py
+++ b/pgmpy/tests/test_utils/test_utils.py
@@ -1,9 +1,11 @@
-import unittest
import random
+import unittest
+import numpy as np
+import pandas as pd
from tqdm.auto import tqdm
-from pgmpy.utils import get_example_model
+from pgmpy.utils import discretize, get_example_model
class TestDAGCreation(unittest.TestCase):
@@ -40,3 +42,28 @@ def test_get_example_model(self):
for model in tqdm(choices):
m = get_example_model(model=model)
del m
+
+
+class TestDiscretization(unittest.TestCase):
+ def setUp(self):
+ rng = np.random.default_rng(42)
+ X = rng.standard_normal(1000)
+ Y = 0.2 * X + rng.standard_normal(1000)
+ Z = 0.4 * X + 0.5 * Y + rng.standard_normal(1000)
+
+ self.data = pd.DataFrame({"X": X, "Y": Y, "Z": Z})
+
+ def test_rounding_disc(self):
+ df_disc = discretize(
+ data=self.data, cardinality={"X": 5, "Y": 4, "Z": 3}, method="rounding"
+ )
+ self.assertEqual(df_disc["X"].nunique(), 5)
+ self.assertEqual(df_disc["Y"].nunique(), 4)
+ self.assertEqual(df_disc["Z"].nunique(), 3)
+
+ df_disc = discretize(
+ data=self.data, cardinality={"X": 5, "Y": 4, "Z": 3}, method="quantile"
+ )
+ self.assertEqual(df_disc["X"].nunique(), 5)
+ self.assertEqual(df_disc["Y"].nunique(), 4)
+ self.assertEqual(df_disc["Z"].nunique(), 3)
| [
{
"components": [
{
"doc": "Discretizes a given continuous dataset.\n\nParameters\n----------\ndata: pandas.DataFrame\n The dataset to discretize. All columns must have continuous values.\n\ncardinality: dict\n A dictionary of the form (str: int) representing the number of bins\n to creat... | [
"pgmpy/tests/test_utils/test_utils.py::TestDAGCreation::test_get_example_model",
"pgmpy/tests/test_utils/test_utils.py::TestDiscretization::test_rounding_disc"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Adds a function for discretization
### Your checklist for this pull request
Please review the [guidelines for contributing](CONTRIBUTING.md) to this repository.
- [ ] Make sure you are requesting to **pull a topic/feature/bugfix branch** (right side). Don't request your master!
- [ ] Make sure you are making a pull request against the **dev branch** (left side). Also you should start *your branch* off *our dev*.
- [ ] Check the commit's or even all commits' message styles matches our requested structure.
### Issue number(s) that this pull request fixes
- Ref #1752
### List of changes to the codebase in this pull request
-
-
-
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in pgmpy/utils/utils.py]
(definition of discretize:)
def discretize(data, cardinality, labels=dict(), method="rounding"):
"""Discretizes a given continuous dataset.
Parameters
----------
data: pandas.DataFrame
The dataset to discretize. All columns must have continuous values.
cardinality: dict
A dictionary of the form (str: int) representing the number of bins
to create for each of the variables.
labels: dict (default: None)
A dictionary of the form (str: list) representing the label names for
each variable in the discretized dataframe.
method: rounding or quantile
If rounding, equal width bins are created and data is discretized into these bins. Refer pandas.cut for more details.
If quantile, creates bins such that each bin has an equal number of datapoints. Refer pandas.qcut for more details.
Examples
--------
>>> import numpy as np
>>> from pgmpy.utils import discretize
>>> rng = np.random.default_rng(42)
>>> X = rng.standard_normal(1000)
>>> Y = 0.2 * X + rng.standard_normal(1000)
>>> Z = 0.4 * X + 0.5 * Y + rng.standard_normal(1000)
>>> df = pd.DataFrame({"X": X, "Y": Y, "Z": Z})
>>> df_disc = discretize(df, cardinality={'X': 3, 'Y': 3, 'Z': 3}, labels={'X': ['low', 'mid', 'high'], 'Y': ['low', 'mid', 'high'], 'Z': ['low', 'mid', 'high']})
>>> df_disc.head()
X Y Z
0 mid mid mid
1 mid mid low
2 mid mid mid
3 high mid mid
4 low mid low
Returns
-------
pandas.DataFrame: A discretized dataframe."""
[end of new definitions in pgmpy/utils/utils.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | cf8d0f12e2e5be62b01ff8fded85f3f64eab1e84 | ||
huggingface__trl-1598 | 1,598 | huggingface/trl | null | a2adfb836a90d1e37b1253ab43dace05f1241e04 | 2024-04-29T12:39:50Z | diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml
index e4a1041033f..4287624c2ac 100644
--- a/docs/source/_toctree.yml
+++ b/docs/source/_toctree.yml
@@ -47,6 +47,10 @@
title: ORPO Trainer
- local: iterative_sft_trainer
title: Iterative Supervised Fine-Tuning
+ - local: callbacks
+ title: Callback Classes
+ - local: judges
+ title: Judge Classes
- local: text_environments
title: Text Environments
title: API
diff --git a/docs/source/callbacks.mdx b/docs/source/callbacks.mdx
new file mode 100644
index 00000000000..e4d26797c29
--- /dev/null
+++ b/docs/source/callbacks.mdx
@@ -0,0 +1,13 @@
+# Callbacks
+
+## SyncRefModelCallback
+
+[[autodoc]] SyncRefModelCallback
+
+## RichProgressCallback
+
+[[autodoc]] RichProgressCallback
+
+## WinRateCallback
+
+[[autodoc]] WinRateCallback
diff --git a/docs/source/judges.mdx b/docs/source/judges.mdx
new file mode 100644
index 00000000000..23143f9db6d
--- /dev/null
+++ b/docs/source/judges.mdx
@@ -0,0 +1,62 @@
+# Judges
+
+TRL provides judges to easily compare two completions.
+
+Make sure to have installed the required dependencies by running:
+
+```bash
+pip install trl[llm_judge]
+```
+
+## Define your own judge
+
+To define your own judge, you need to subclass [`BaseJudge`] and implement the [`BaseJudge.judge`] method that returns a list of 0/1 indicating which completion is better. Here is a dummy example where we define a simple judge that favors longer completions:
+
+```python
+from trl import BaseJudge
+
+class LengthBasedJudge(BaseJudge):
+ def judge(self, prompts, completion_pairs, shuffle_order=False):
+ return [0 if len(c1) > len(c2) else 1 for c1, c2 in completion_pairs]
+```
+
+You can then use this judge as follows:
+
+```python
+judge = LengthBasedJudge()
+judge.judge(
+ prompts=["What is the capital of France?", "What is the biggest planet in the solar system?"],
+ completion_pairs=[["Paris", "The capital of France is Paris."], ["Jupiter is the biggest planet in the solar system.", "Jupiter"]],
+) # Outputs: [1, 0]
+```
+
+TRL also provides a [`BaseAPIJudge`] class that can be used to define judges that interact with an API. You can subclass [`BaseAPIJudge`] and implement the [`BaseAPIJudge.get_response`] method that should return the response from the API. For an example, see the [`HuggingFaceJudge`] class.
+
+
+## BaseJudge
+
+[[autodoc]] BaseJudge
+
+## BaseAPIJudge
+
+[[autodoc]] BaseAPIJudge
+
+## HuggingFaceJudge
+
+[[autodoc]] HuggingFaceJudge
+
+## MockAPIJudge
+
+[[autodoc]] MockAPIJudge
+
+## MockJudge
+
+[[autodoc]] MockJudge
+
+## OpenAIJudge
+
+[[autodoc]] OpenAIJudge
+
+## PairRMJudge
+
+[[autodoc]] PairRMJudge
diff --git a/setup.py b/setup.py
index cdbce4093bd..373cfeea064 100644
--- a/setup.py
+++ b/setup.py
@@ -84,6 +84,7 @@
"deepspeed": ["deepspeed>=0.9.5"],
"benchmark": ["wandb", "ghapi", "openrlbenchmark==0.2.1a5", "requests", "deepspeed"],
"quantization": ["bitsandbytes<=0.41.1"],
+ "llm_judge": ["openai>=1.23.2", "huggingface_hub>=0.22.2", "llm-blender>=0.0.2"],
}
EXTRAS["dev"] = []
for reqs in EXTRAS.values():
diff --git a/trl/__init__.py b/trl/__init__.py
index 5c3d3e85ea9..f3b281fcf9d 100644
--- a/trl/__init__.py
+++ b/trl/__init__.py
@@ -24,6 +24,8 @@
"is_pil_available",
"is_wandb_available",
"is_xpu_available",
+ "is_llmblender_available",
+ "is_openai_available",
],
"models": [
"AutoModelForCausalLMWithValueHead",
@@ -55,6 +57,14 @@
"SFTTrainer",
"FDivergenceConstants",
"FDivergenceType",
+ "WinRateCallback",
+ "BaseJudge",
+ "BaseAPIJudge",
+ "HuggingFaceJudge",
+ "MockAPIJudge",
+ "MockJudge",
+ "OpenAIJudge",
+ "PairRMJudge",
],
"commands": [],
"commands.cli_utils": ["init_zero_verbose", "SFTScriptArguments", "DPOScriptArguments", "TrlParser"],
@@ -95,6 +105,8 @@
is_pil_available,
is_wandb_available,
is_xpu_available,
+ is_llmblender_available,
+ is_openai_available,
)
from .models import (
AutoModelForCausalLMWithValueHead,
@@ -126,6 +138,14 @@
SFTTrainer,
FDivergenceConstants,
FDivergenceType,
+ WinRateCallback,
+ BaseJudge,
+ BaseAPIJudge,
+ HuggingFaceJudge,
+ MockAPIJudge,
+ MockJudge,
+ OpenAIJudge,
+ PairRMJudge,
)
from .trainer.callbacks import RichProgressCallback, SyncRefModelCallback
from .trainer.utils import get_kbit_device_map, get_peft_config, get_quantization_config
diff --git a/trl/import_utils.py b/trl/import_utils.py
index df44a38aa81..b0810d12efb 100644
--- a/trl/import_utils.py
+++ b/trl/import_utils.py
@@ -101,6 +101,14 @@ def is_sklearn_available() -> bool:
return find_spec("sklearn") is not None
+def is_llmblender_available() -> bool:
+ return find_spec("llm_blender") is not None
+
+
+def is_openai_available() -> bool:
+ return find_spec("openai") is not None
+
+
def is_xpu_available() -> bool:
if is_accelerate_greater_20_0():
import accelerate
diff --git a/trl/trainer/__init__.py b/trl/trainer/__init__.py
index 16617f5e276..365cd658b7e 100644
--- a/trl/trainer/__init__.py
+++ b/trl/trainer/__init__.py
@@ -49,6 +49,16 @@
"sft_trainer": ["SFTTrainer"],
"base": ["BaseTrainer"],
"ddpo_config": ["DDPOConfig"],
+ "callbacks": ["RichProgressCallback", "SyncRefModelCallback", "WinRateCallback"],
+ "judges": [
+ "BaseJudge",
+ "BaseAPIJudge",
+ "HuggingFaceJudge",
+ "MockAPIJudge",
+ "MockJudge",
+ "OpenAIJudge",
+ "PairRMJudge",
+ ],
}
try:
@@ -95,6 +105,8 @@
from .reward_trainer import RewardTrainer, compute_accuracy
from .sft_config import SFTConfig
from .sft_trainer import SFTTrainer
+ from .callbacks import RichProgressCallback, SyncRefModelCallback, WinRateCallback
+ from .judges import BaseJudge, BaseAPIJudge, HuggingFaceJudge, MockAPIJudge, MockJudge, OpenAIJudge, PairRMJudge
try:
if not is_diffusers_available():
diff --git a/trl/trainer/callbacks.py b/trl/trainer/callbacks.py
index 9052008f067..fbf632a48ee 100644
--- a/trl/trainer/callbacks.py
+++ b/trl/trainer/callbacks.py
@@ -11,20 +11,30 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Optional, Union
+from typing import List, Optional, Union
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
-from accelerate.utils import is_deepspeed_available
+from accelerate.utils import gather_object, is_deepspeed_available
from rich.console import Console, Group
from rich.live import Live
from rich.panel import Panel
from rich.progress import Progress
-from transformers import PreTrainedModel
-from transformers.trainer import TrainerCallback
+from transformers import (
+ GenerationConfig,
+ PreTrainedModel,
+ Trainer,
+ TrainerCallback,
+ TrainerControl,
+ TrainerState,
+ TrainingArguments,
+)
from transformers.trainer_utils import has_length
+from ..models.utils import unwrap_model_for_generation
+from .judges import BaseJudge
+
if is_deepspeed_available():
import deepspeed
@@ -138,3 +148,88 @@ def on_train_end(self, args, state, control, **kwargs):
self.rich_console = None
self.training_status = None
self.current_step = None
+
+
+class WinRateCallback(TrainerCallback):
+ """
+ A [`~transformers.TrainerCallback`] that computes the win rate of a model based on a reference.
+
+ Usage:
+ ```python
+ trainer = DPOTrainer(...)
+ win_rate_callback = WinRateCallback(..., trainer=trainer)
+ trainer.add_callback(win_rate_callback)
+ ```
+
+ Args:
+ prompts (`List[str]`):
+ The prompts to generate completions for.
+ judge (`BaseJudge`):
+ The judge to use for comparing completions.
+ trainer (`Trainer`):
+ The trainer.
+ generation_config (`GenerationConfig`, *optional*):
+ The generation config to use for generating completions.
+ batch_size (`int`, *optional*):
+ The batch size to use for generating completions. Defaults to 4.
+ """
+
+ def __init__(
+ self,
+ prompts: List[str],
+ judge: BaseJudge,
+ trainer: Trainer,
+ generation_config: Optional[GenerationConfig] = None,
+ batch_size: int = 4,
+ ):
+ self.prompts = prompts
+ self.generation_config = generation_config
+ self.judge = judge
+ self.ref_completions = []
+ self.trainer = trainer
+ self.eval_dataset = self.trainer.eval_dataset
+ if not hasattr(trainer, "ref_model"):
+ raise AttributeError("Trainer must have a `ref_model` attribute.")
+ self.batch_size = batch_size
+
+ def generate_completions_for_model(self, model, tokenizer, prompts):
+ completions = []
+ with unwrap_model_for_generation(model, self.trainer.accelerator) as unwrapped_model:
+ unwrapped_model.eval()
+ for idx in range(0, len(prompts), self.batch_size):
+ batch = prompts[idx : idx + self.batch_size]
+ tokenized_batch = tokenizer(batch, return_tensors="pt", padding=True, truncation=True).to(model.device)
+ generations = unwrapped_model.generate(
+ **tokenized_batch,
+ generation_config=self.generation_config,
+ )
+ for prompt, generation in zip(tokenized_batch.input_ids, generations):
+ # Remove prompt from generation
+ generation = generation[len(prompt) :]
+ completion = tokenizer.decode(generation, skip_special_tokens=True)
+ completions.append(completion)
+
+ unwrapped_model.train()
+ return completions
+
+ def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
+ tokenizer = kwargs["tokenizer"]
+ tokenizer.padding_side = "left"
+ accelerator = self.trainer.accelerator
+ with accelerator.split_between_processes(self.eval_dataset["prompt"], apply_padding=True) as prompts:
+ self.ref_completions = self.generate_completions_for_model(self.trainer.ref_model, tokenizer, prompts)
+
+ def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
+ model = kwargs["model"]
+ tokenizer = kwargs["tokenizer"]
+ accelerator = self.trainer.accelerator
+ with accelerator.split_between_processes(self.eval_dataset["prompt"], apply_padding=True) as prompts:
+ completions = self.generate_completions_for_model(model, tokenizer, prompts)
+ completion_pairs = list(zip(self.ref_completions, completions))
+ winner_indices = self.judge.judge(self.eval_dataset["prompt"], completion_pairs)
+ winner_indices = gather_object(winner_indices)
+
+ # Logging
+ if self.trainer.accelerator.is_main_process:
+ win_rate = sum(winner_idx == 1 for winner_idx in winner_indices) / len(winner_indices)
+ self.trainer.log({"eval_win_rate": win_rate})
diff --git a/trl/trainer/judges.py b/trl/trainer/judges.py
new file mode 100644
index 00000000000..18a6b1ae2c2
--- /dev/null
+++ b/trl/trainer/judges.py
@@ -0,0 +1,284 @@
+import logging
+import os
+import random
+from abc import ABC, abstractmethod
+from concurrent.futures import ThreadPoolExecutor
+from typing import List, Optional
+
+import numpy as np
+from accelerate import Accelerator
+from huggingface_hub import InferenceClient
+from requests import HTTPError
+
+from ..import_utils import is_llmblender_available, is_openai_available
+
+
+if is_llmblender_available():
+ import llm_blender
+
+if is_openai_available():
+ from openai import BadRequestError, OpenAI
+
+
+DEFAULT_SYSTEM_PROMPT = '''I require a leaderboard for various large language models. I'll provide you with prompts given to these models and their corresponding outputs. Your task is to assess these responses, and select the model that produces the best output from a human perspective.
+
+## Instruction
+
+{{
+ "instruction": """{prompt}""",
+}}
+
+## Model Outputs
+
+Here are the unordered outputs from the models. Each output is associated with a specific model, identified by a unique model identifier.
+
+{{
+ {{
+ "model_identifier": "0",
+ "output": """{response1}"""
+ }},
+ {{
+ "model_identifier": "1",
+ "output": """{response2}"""
+ }}
+}}
+
+## Task
+
+Evaluate the models based on the quality and relevance of their outputs, and select the model that generated the best output. Answer by providing the model identifier of the best model. We will use your output as the name of the best model, so make sure your output only contains one of the following model identifiers and nothing else (no quotes, no spaces, no new lines, ...): 0 or 1.
+
+## Best Model Identifier'''
+
+
+class BaseJudge(ABC):
+ """
+ Base class for LLM judges.
+
+ Example:
+ ```python
+ class MockJudge(BaseJudge):
+ def judge(self, prompts, completion_pairs, shuffle_order=True):
+ return [random.choice([0, 1]) for _ in range(len(prompts))]
+
+ judge = MockJudge()
+ judge.judge(
+ prompts=["What is the capital of France?", "What is the capital of Germany?"],
+ completion_pairs=[["Paris", "Marseille"], ["Munich", "Berlin"]]
+ ) # [0, 0]
+ ```
+ """
+
+ @abstractmethod
+ def judge(self, prompts: List[str], completion_pairs: List[List[str]], shuffle_order: bool = True) -> List[int]:
+ """
+ Judge the completion pairs for the given prompts.
+
+ Args:
+ prompts (`List[str]`): List of prompts.
+ completion_pairs (`List[List[str]]`): List of completion pairs, where each pair is a list of two strings.
+ shuffle_order (`bool`): Whether to shuffle the order of the completion pairs, to avoid positional bias.
+
+ Returns:
+ List of integers, where each integer is the index of the completion pair that is preferred.
+ """
+ raise NotImplementedError("Judge subclasses must implement this method.")
+
+
+class BaseAPIJudge(BaseJudge):
+ """
+ Base class for LLM judges reached via an API.
+
+ The subclasses of this class should implement the `get_response` method to interact with the API.
+
+ Args:
+ system_prompt (`str`, *optional*): The system prompt to be used for the judge. If not provided, a default prompt is used.
+ max_tries (`int`, *optional*): The maximum number of retries for a request. Defaults to 5.
+ max_workers (`int`, *optional*): The maximum number of parallel requests. Defaults to 8.
+
+ Example:
+ ```python
+ class MockAPIJudge(BaseAPIJudge):
+ def get_response(self, content):
+ return random.choice(["0", "1"])
+
+ judge = MockAPIJudge()
+ judge.judge(
+ prompts=["What is the capital of France?", "What is the capital of Germany?"],
+ completion_pairs=[["Paris", "Marseille"], ["Munich", "Berlin"]]
+ ) # [1, 1]
+ ```
+ """
+
+ # TODO: add max_requests parameter to limit the number of requests made
+ def __init__(self, system_prompt: Optional[str] = None, max_tries: int = 5, max_workers: int = 8):
+ if system_prompt is None:
+ system_prompt = DEFAULT_SYSTEM_PROMPT
+ self.system_prompt = system_prompt
+ self.max_tries = max_tries
+ self.thread_pool_executor = ThreadPoolExecutor(max_workers=max_workers)
+
+ def __del__(self) -> None:
+ self.thread_pool_executor.shutdown()
+
+ @abstractmethod
+ def get_response(self, content: str) -> str:
+ """
+ Get the response from the API for the given content.
+
+ Args:
+ content (`str`): The string content.
+
+ Returns:
+ The response from the API as a string.
+ """
+
+ raise NotImplementedError("Judge subclasses must implement this method.")
+
+ def judge_single(self, prompt: str, completion_pair: List[str], shuffle_order: bool = True) -> int:
+ flipped = random.choice([True, False]) if shuffle_order else False
+ completion_pair = completion_pair[::-1] if flipped else completion_pair
+
+ retry = 0
+ while retry < self.max_tries:
+ content = self.system_prompt.format(
+ prompt=prompt, response1=completion_pair[0], response2=completion_pair[1]
+ )
+ reply = self.get_response(content)
+ reply = reply.strip()
+
+ if reply in ["0"]:
+ return 0 if not flipped else 1
+ elif reply in ["1"]:
+ return 1 if not flipped else 0
+ else:
+ logging.info(f"Judge gave response `{reply}` instead of the expected 0 or 1. Retrying.")
+ retry += 1
+
+ logging.info(
+ f"Max retries reached for prompt:\n\n{prompt}\nand completion pair:\n\n{completion_pair}\n\nReturning random choice."
+ )
+ return random.choice([0, 1])
+
+ def judge(self, prompts: List[str], completion_pairs: List[List[str]], shuffle_order: bool = True) -> List[int]:
+ futures = []
+ for prompt, completion_pair in zip(prompts, completion_pairs):
+ future = self.thread_pool_executor.submit(self.judge_single, prompt, completion_pair, shuffle_order)
+ futures.append(future)
+
+ return [f.result() for f in futures]
+
+
+class PairRMJudge(BaseJudge):
+ """
+ LLM judge based on the PairRM model from AllenAI.
+
+ See: https://huggingface.co/llm-blender/PairRM
+ """
+
+ def __init__(self):
+ if not is_llmblender_available():
+ raise ValueError("llm-blender is not installed. Please install it with 'pip install llm-blender'.")
+ self.blender = llm_blender.Blender()
+ self.blender.loadranker("llm-blender/PairRM", device=Accelerator().device)
+
+ def judge(self, prompts: List[str], completion_pairs: List[List[str]], shuffle_order: bool = True) -> List[int]:
+ if shuffle_order:
+ flip_mask = np.random.choice([True, False], size=len(prompts))
+ completion_pairs = [pair[::-1] if flip else pair for flip, pair in zip(flip_mask, completion_pairs)]
+ ranks = self.blender.rank(prompts, completion_pairs)
+ ranks -= 1 # PairRM is 1-indexed, so we subtract 1 to make it 0-indexed
+ if shuffle_order:
+ # Flip back the ranks to the original order
+ ranks[flip_mask] = ranks[flip_mask][:, ::-1]
+ return ranks[:, 0].tolist()
+
+
+class MockJudge(BaseJudge):
+ """
+ Mock judge that randomly selects a model for each completion pair.
+ """
+
+ def judge(self, prompts: List[str], completion_pairs: List[List[str]]) -> List[int]:
+ return [random.choice([0, 1]) for _ in range(len(prompts))]
+
+
+class MockAPIJudge(BaseAPIJudge):
+ """
+ Mock judge that returns a random choice instead of interacting with an API.
+ """
+
+ def get_response(self, content: str) -> str:
+ return random.choice(["0", "1"])
+
+
+class HuggingFaceJudge(BaseAPIJudge):
+ """
+ Judge based on the Hugging Face API.
+
+ Args:
+ model (`str`, *optional*): The model to use for the judge. Defaults to "meta-llama/Meta-Llama-3-70B-Instruct".
+ system_prompt (`str`, *optional*): The system prompt to be used for the judge. If not provided, a default prompt is used.
+ max_tries (`int`, *optional*): The maximum number of retries for a request. Defaults to 5.
+ max_workers (`int`, *optional*): The maximum number of parallel requests. Defaults to 8.
+ token (`str`, *optional*): The Hugging Face API token to use for the InferenceClient.
+ """
+
+ def __init__(
+ self,
+ model="meta-llama/Meta-Llama-3-70B-Instruct",
+ system_prompt: Optional[str] = None,
+ max_tries: int = 5,
+ max_workers: int = 8,
+ token: Optional[str] = None,
+ ):
+ super().__init__(system_prompt=system_prompt, max_tries=max_tries, max_workers=max_workers)
+ self.client = InferenceClient(model=model, token=token)
+
+ def get_response(self, content: str) -> str:
+ try:
+ response = self.client.chat_completion(
+ messages=[{"role": "user", "content": content}],
+ max_tokens=1,
+ stop=["<|eot_id|>"], # For llama-3 models
+ )
+ return response.choices[0].message.content
+ except HTTPError as e:
+ logging.info(f"Unable to reach the Hugging Face API due to error: {e}\nReturning random choice (0,1)")
+ return random.choice(["0", "1"])
+
+
+class OpenAIJudge(BaseAPIJudge):
+ """
+ Judge based on the OpenAI API.
+
+ Args:
+ model (`str`, *optional*): The model to use for the judge. Defaults to "gpt-4-turbo-preview".
+ system_prompt (`str`, *optional*): The system prompt to be used for the judge. If not provided, a default prompt is used.
+ max_tries (`int`, *optional*): The maximum number of retries for a request. Defaults to 5.
+ max_workers (`int`, *optional*): The maximum number of parallel requests. Defaults to 8.
+ """
+
+ def __init__(
+ self,
+ model="gpt-4-turbo-preview",
+ system_prompt: Optional[str] = None,
+ max_tries: int = 5,
+ max_workers: int = 8,
+ ):
+ if not is_openai_available():
+ raise ValueError("OpenAI client is not installed. Please install it with 'pip install openai'.")
+ super().__init__(system_prompt=system_prompt, max_tries=max_tries, max_workers=max_workers)
+ self.client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
+ self.model = model
+
+ def get_response(self, content: str) -> str:
+ try:
+ response = self.client.chat.completions.create(
+ model=self.model,
+ messages=[{"role": "user", "content": content}],
+ max_tokens=1, # TODO: let users configure these variables
+ )
+ return response.choices[0].message.content
+ except BadRequestError as e:
+ logging.warn(f"Unable to reach to OpenAI API due to error: {e}\nReturning random choice (0, 1)")
+ return random.choice(["0", "1"])
| diff --git a/tests/test_judges.py b/tests/test_judges.py
new file mode 100644
index 00000000000..d3920c0b894
--- /dev/null
+++ b/tests/test_judges.py
@@ -0,0 +1,33 @@
+import unittest
+
+from trl import HuggingFaceJudge, MockAPIJudge, MockJudge
+
+
+class TestJudges(unittest.TestCase):
+ def _get_prompts_and_completion_pairs(self):
+ prompts = ["The capital of France is", "The biggest planet in the solar system is"]
+ completion_pairs = [["Paris", "Marseille"], ["Saturn", "Jupiter"]]
+ return prompts, completion_pairs
+
+ def test_mock_judge(self):
+ judge = MockJudge()
+ prompts, completion_pairs = self._get_prompts_and_completion_pairs()
+ ranks = judge.judge(prompts=prompts, completion_pairs=completion_pairs)
+ self.assertEqual(len(ranks), 2)
+ self.assertTrue(all(isinstance(rank, int) for rank in ranks))
+
+ def test_mock_api_judge(self):
+ judge = MockAPIJudge()
+ prompts, completion_pairs = self._get_prompts_and_completion_pairs()
+ ranks = judge.judge(prompts=prompts, completion_pairs=completion_pairs)
+ self.assertEqual(len(ranks), 2)
+ self.assertTrue(all(isinstance(rank, int) for rank in ranks))
+
+ @unittest.skip("This test needs to be run manually since it requires a valid Hugging Face API key.")
+ def test_hugging_face_judge(self):
+ judge = HuggingFaceJudge()
+ prompts, completion_pairs = self._get_prompts_and_completion_pairs()
+ ranks = judge.judge(prompts=prompts, completion_pairs=completion_pairs)
+ self.assertEqual(len(ranks), 2)
+ self.assertTrue(all(isinstance(rank, int) for rank in ranks))
+ self.assertEqual(ranks, [0, 1])
| diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml
index e4a1041033f..4287624c2ac 100644
--- a/docs/source/_toctree.yml
+++ b/docs/source/_toctree.yml
@@ -47,6 +47,10 @@
title: ORPO Trainer
- local: iterative_sft_trainer
title: Iterative Supervised Fine-Tuning
+ - local: callbacks
+ title: Callback Classes
+ - local: judges
+ title: Judge Classes
- local: text_environments
title: Text Environments
title: API
diff --git a/docs/source/callbacks.mdx b/docs/source/callbacks.mdx
new file mode 100644
index 00000000000..e4d26797c29
--- /dev/null
+++ b/docs/source/callbacks.mdx
@@ -0,0 +1,13 @@
+# Callbacks
+
+## SyncRefModelCallback
+
+[[autodoc]] SyncRefModelCallback
+
+## RichProgressCallback
+
+[[autodoc]] RichProgressCallback
+
+## WinRateCallback
+
+[[autodoc]] WinRateCallback
diff --git a/docs/source/judges.mdx b/docs/source/judges.mdx
new file mode 100644
index 00000000000..23143f9db6d
--- /dev/null
+++ b/docs/source/judges.mdx
@@ -0,0 +1,62 @@
+# Judges
+
+TRL provides judges to easily compare two completions.
+
+Make sure to have installed the required dependencies by running:
+
+```bash
+pip install trl[llm_judge]
+```
+
+## Define your own judge
+
+To define your own judge, you need to subclass [`BaseJudge`] and implement the [`BaseJudge.judge`] method that returns a list of 0/1 indicating which completion is better. Here is a dummy example where we define a simple judge that favors longer completions:
+
+```python
+from trl import BaseJudge
+
+class LengthBasedJudge(BaseJudge):
+ def judge(self, prompts, completion_pairs, shuffle_order=False):
+ return [0 if len(c1) > len(c2) else 1 for c1, c2 in completion_pairs]
+```
+
+You can then use this judge as follows:
+
+```python
+judge = LengthBasedJudge()
+judge.judge(
+ prompts=["What is the capital of France?", "What is the biggest planet in the solar system?"],
+ completion_pairs=[["Paris", "The capital of France is Paris."], ["Jupiter is the biggest planet in the solar system.", "Jupiter"]],
+) # Outputs: [1, 0]
+```
+
+TRL also provides a [`BaseAPIJudge`] class that can be used to define judges that interact with an API. You can subclass [`BaseAPIJudge`] and implement the [`BaseAPIJudge.get_response`] method that should return the response from the API. For an example, see the [`HuggingFaceJudge`] class.
+
+
+## BaseJudge
+
+[[autodoc]] BaseJudge
+
+## BaseAPIJudge
+
+[[autodoc]] BaseAPIJudge
+
+## HuggingFaceJudge
+
+[[autodoc]] HuggingFaceJudge
+
+## MockAPIJudge
+
+[[autodoc]] MockAPIJudge
+
+## MockJudge
+
+[[autodoc]] MockJudge
+
+## OpenAIJudge
+
+[[autodoc]] OpenAIJudge
+
+## PairRMJudge
+
+[[autodoc]] PairRMJudge
| [
{
"components": [
{
"doc": "",
"lines": [
104,
105
],
"name": "is_llmblender_available",
"signature": "def is_llmblender_available() -> bool:",
"type": "function"
},
{
"doc": "",
"lines": [
108,
... | [
"tests/test_judges.py::TestJudges::test_mock_api_judge",
"tests/test_judges.py::TestJudges::test_mock_judge"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add WinRateCallback and Judges
Command for testing:
```shell
TRANSFORMERS_VERBOSITY=info ACCELERATE_LOG_LEVEL=info TRL_USE_RICH=true accelerate launch --config_file=examples/accelerate_configs/multi_gpu.yaml --num_processes=8 examples/scripts/dpo_online.py --dataset_name=trl-internal-testing/hh-rlhf-helpful-base-trl-style --dataset_num_proc=4 --model_name_or_path=Qwen/Qwen1.5-0.5B-Chat --per_device_train_batch_size 4 --learning_rate 5e-7 --gradient_accumulation_steps 1 --logging_steps 10 --evaluation_strategy=steps --eval_steps 10 --num_train_epochs=6 --output_dir="scratch/dpo_anthropic_hh" --warmup_steps 150 --report_to wandb --bf16 --logging_first_step --no_remove_unused_columns --sanity_check
```
## TODO
- [x] Implement batched generation
- [x] Add judge for HF inference API
- [x] Add tests
- [x] Add docs
- [ ] Add sampling params for judge LLM
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in trl/import_utils.py]
(definition of is_llmblender_available:)
def is_llmblender_available() -> bool:
(definition of is_openai_available:)
def is_openai_available() -> bool:
[end of new definitions in trl/import_utils.py]
[start of new definitions in trl/trainer/callbacks.py]
(definition of WinRateCallback:)
class WinRateCallback(TrainerCallback):
"""A [`~transformers.TrainerCallback`] that computes the win rate of a model based on a reference.
Usage:
```python
trainer = DPOTrainer(...)
win_rate_callback = WinRateCallback(..., trainer=trainer)
trainer.add_callback(win_rate_callback)
```
Args:
prompts (`List[str]`):
The prompts to generate completions for.
judge (`BaseJudge`):
The judge to use for comparing completions.
trainer (`Trainer`):
The trainer.
generation_config (`GenerationConfig`, *optional*):
The generation config to use for generating completions.
batch_size (`int`, *optional*):
The batch size to use for generating completions. Defaults to 4."""
(definition of WinRateCallback.__init__:)
def __init__( self, prompts: List[str], judge: BaseJudge, trainer: Trainer, generation_config: Optional[GenerationConfig] = None, batch_size: int = 4, ):
(definition of WinRateCallback.generate_completions_for_model:)
def generate_completions_for_model(self, model, tokenizer, prompts):
(definition of WinRateCallback.on_train_begin:)
def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
(definition of WinRateCallback.on_evaluate:)
def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
[end of new definitions in trl/trainer/callbacks.py]
[start of new definitions in trl/trainer/judges.py]
(definition of BaseJudge:)
class BaseJudge(ABC):
"""Base class for LLM judges.
Example:
```python
class MockJudge(BaseJudge):
def judge(self, prompts, completion_pairs, shuffle_order=True):
return [random.choice([0, 1]) for _ in range(len(prompts))]
judge = MockJudge()
judge.judge(
prompts=["What is the capital of France?", "What is the capital of Germany?"],
completion_pairs=[["Paris", "Marseille"], ["Munich", "Berlin"]]
) # [0, 0]
```"""
(definition of BaseJudge.judge:)
def judge(self, prompts: List[str], completion_pairs: List[List[str]], shuffle_order: bool = True) -> List[int]:
"""Judge the completion pairs for the given prompts.
Args:
prompts (`List[str]`): List of prompts.
completion_pairs (`List[List[str]]`): List of completion pairs, where each pair is a list of two strings.
shuffle_order (`bool`): Whether to shuffle the order of the completion pairs, to avoid positional bias.
Returns:
List of integers, where each integer is the index of the completion pair that is preferred."""
(definition of BaseAPIJudge:)
class BaseAPIJudge(BaseJudge):
"""Base class for LLM judges reached via an API.
The subclasses of this class should implement the `get_response` method to interact with the API.
Args:
system_prompt (`str`, *optional*): The system prompt to be used for the judge. If not provided, a default prompt is used.
max_tries (`int`, *optional*): The maximum number of retries for a request. Defaults to 5.
max_workers (`int`, *optional*): The maximum number of parallel requests. Defaults to 8.
Example:
```python
class MockAPIJudge(BaseAPIJudge):
def get_response(self, content):
return random.choice(["0", "1"])
judge = MockAPIJudge()
judge.judge(
prompts=["What is the capital of France?", "What is the capital of Germany?"],
completion_pairs=[["Paris", "Marseille"], ["Munich", "Berlin"]]
) # [1, 1]
```"""
(definition of BaseAPIJudge.__init__:)
def __init__(self, system_prompt: Optional[str] = None, max_tries: int = 5, max_workers: int = 8):
(definition of BaseAPIJudge.__del__:)
def __del__(self) -> None:
(definition of BaseAPIJudge.get_response:)
def get_response(self, content: str) -> str:
"""Get the response from the API for the given content.
Args:
content (`str`): The string content.
Returns:
The response from the API as a string."""
(definition of BaseAPIJudge.judge_single:)
def judge_single(self, prompt: str, completion_pair: List[str], shuffle_order: bool = True) -> int:
(definition of BaseAPIJudge.judge:)
def judge(self, prompts: List[str], completion_pairs: List[List[str]], shuffle_order: bool = True) -> List[int]:
(definition of PairRMJudge:)
class PairRMJudge(BaseJudge):
"""LLM judge based on the PairRM model from AllenAI.
See: https://huggingface.co/llm-blender/PairRM"""
(definition of PairRMJudge.__init__:)
def __init__(self):
(definition of PairRMJudge.judge:)
def judge(self, prompts: List[str], completion_pairs: List[List[str]], shuffle_order: bool = True) -> List[int]:
(definition of MockJudge:)
class MockJudge(BaseJudge):
"""Mock judge that randomly selects a model for each completion pair."""
(definition of MockJudge.judge:)
def judge(self, prompts: List[str], completion_pairs: List[List[str]]) -> List[int]:
(definition of MockAPIJudge:)
class MockAPIJudge(BaseAPIJudge):
"""Mock judge that returns a random choice instead of interacting with an API."""
(definition of MockAPIJudge.get_response:)
def get_response(self, content: str) -> str:
(definition of HuggingFaceJudge:)
class HuggingFaceJudge(BaseAPIJudge):
"""Judge based on the Hugging Face API.
Args:
model (`str`, *optional*): The model to use for the judge. Defaults to "meta-llama/Meta-Llama-3-70B-Instruct".
system_prompt (`str`, *optional*): The system prompt to be used for the judge. If not provided, a default prompt is used.
max_tries (`int`, *optional*): The maximum number of retries for a request. Defaults to 5.
max_workers (`int`, *optional*): The maximum number of parallel requests. Defaults to 8.
token (`str`, *optional*): The Hugging Face API token to use for the InferenceClient."""
(definition of HuggingFaceJudge.__init__:)
def __init__( self, model="meta-llama/Meta-Llama-3-70B-Instruct", system_prompt: Optional[str] = None, max_tries: int = 5, max_workers: int = 8, token: Optional[str] = None, ):
(definition of HuggingFaceJudge.get_response:)
def get_response(self, content: str) -> str:
(definition of OpenAIJudge:)
class OpenAIJudge(BaseAPIJudge):
"""Judge based on the OpenAI API.
Args:
model (`str`, *optional*): The model to use for the judge. Defaults to "gpt-4-turbo-preview".
system_prompt (`str`, *optional*): The system prompt to be used for the judge. If not provided, a default prompt is used.
max_tries (`int`, *optional*): The maximum number of retries for a request. Defaults to 5.
max_workers (`int`, *optional*): The maximum number of parallel requests. Defaults to 8."""
(definition of OpenAIJudge.__init__:)
def __init__( self, model="gpt-4-turbo-preview", system_prompt: Optional[str] = None, max_tries: int = 5, max_workers: int = 8, ):
(definition of OpenAIJudge.get_response:)
def get_response(self, content: str) -> str:
[end of new definitions in trl/trainer/judges.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
YamlConfigParser fails on RewardConfig, DPOConfig etc..
Using the YamlConfigParser with derived classes of `TrainingArguments` throws an error because it assumes all classes that are `isinstance(class, TrainingArguments)` must be `TrainingArguments` and not a derived class
----------
--------------------
</issues> | a0066f47f82f7af0145e3b5ebc06cf2a45b97352 |
tobymao__sqlglot-3367 | 3,367 | tobymao/sqlglot | null | e82a30b6563547daea0bb087e1b6b5bf3b0532d3 | 2024-04-29T11:12:42Z | diff --git a/sqlglot/dialects/mysql.py b/sqlglot/dialects/mysql.py
index 03576d29e5..7b4d0e496a 100644
--- a/sqlglot/dialects/mysql.py
+++ b/sqlglot/dialects/mysql.py
@@ -867,3 +867,16 @@ def chr_sql(self, expression: exp.Chr) -> str:
charset = expression.args.get("charset")
using = f" USING {self.sql(charset)}" if charset else ""
return f"CHAR({this}{using})"
+
+ def timestamptrunc_sql(self, expression: exp.TimestampTrunc) -> str:
+ unit = expression.args.get("unit")
+
+ # Pick an old-enough date to avoid negative timestamp diffs
+ start_ts = "'0000-01-01 00:00:00'"
+
+ # Source: https://stackoverflow.com/a/32955740
+ timestamp_diff = build_date_delta(exp.TimestampDiff)([unit, start_ts, expression.this])
+ interval = exp.Interval(this=timestamp_diff, unit=unit)
+ dateadd = build_date_delta_with_interval(exp.DateAdd)([start_ts, interval])
+
+ return self.sql(dateadd)
| diff --git a/tests/dialects/test_mysql.py b/tests/dialects/test_mysql.py
index e8af5c644e..a03deb2fd6 100644
--- a/tests/dialects/test_mysql.py
+++ b/tests/dialects/test_mysql.py
@@ -1109,3 +1109,23 @@ def test_safe_div(self):
"tsql": "CAST(a AS FLOAT) / NULLIF(b, 0)",
},
)
+
+ def test_timestamp_trunc(self):
+ for dialect in ("postgres", "snowflake", "duckdb", "spark", "databricks"):
+ for unit in (
+ "MILLISECOND",
+ "SECOND",
+ "DAY",
+ "MONTH",
+ "YEAR",
+ ):
+ with self.subTest(f"MySQL -> {dialect} Timestamp Trunc with unit {unit}: "):
+ self.validate_all(
+ f"DATE_ADD('0000-01-01 00:00:00', INTERVAL (TIMESTAMPDIFF({unit}, '0000-01-01 00:00:00', CAST('2001-02-16 20:38:40' AS DATETIME))) {unit})",
+ read={
+ dialect: f"DATE_TRUNC({unit}, TIMESTAMP '2001-02-16 20:38:40')",
+ },
+ write={
+ "mysql": f"DATE_ADD('0000-01-01 00:00:00', INTERVAL (TIMESTAMPDIFF({unit}, '0000-01-01 00:00:00', CAST('2001-02-16 20:38:40' AS DATETIME))) {unit})",
+ },
+ )
| [] | [
"tests/dialects/test_mysql.py::TestMySQL::test_timestamp_trunc"
] | [
"tests/dialects/test_mysql.py::TestMySQL::test_bits_literal",
"tests/dialects/test_mysql.py::TestMySQL::test_canonical_functions",
"tests/dialects/test_mysql.py::TestMySQL::test_convert",
"tests/dialects/test_mysql.py::TestMySQL::test_date_format",
"tests/dialects/test_mysql.py::TestMySQL::test_ddl",
"tes... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat(mysql): Transpile TimestampTrunc
Fixes #3366
MySQL cannot generate `exp.TimestampTrunc` using an existing function; However, one can simulate it with a combination of date add & diff ([source](https://stackoverflow.com/a/32955740))
Docs for `DATE_TRUNC`
-----------
- [Postgres](https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC)
- [Presto](https://prestodb.io/docs/current/functions/datetime.html#date_trunc)
- [Snowflake](https://docs.snowflake.com/en/sql-reference/functions/date_trunc)
- [Databricks / Spark](https://docs.databricks.com/en/sql/language-manual/functions/date_trunc.html)
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Timestamp trunc method issue from postgres to MySQL
I was trying to convert an SQL query from Postgres to MySQL. Check the following code snippet
```
import sqlglot
sqlglot.transpile("SELECT date_trunc('hour', timestamp '2001-02-16 20:38:40') FROM dual", read="postgres", write="mysql")
```
This returns
`["SELECT TIMESTAMP_TRUNC(CAST('2001-02-16 20:38:40' AS DATETIME), HOUR) FROM dual"]`
But MySQL doesn't have TIMESTAMP_TRUNC method. Please look into this.
----------
I have a similar issue
```
import sqlglot
print(sqlglot.transpile("SELECT mt.mode_was, AVG(EXTRACT(epoch FROM (mt.cross_time::TIMESTAMP - mt.second_cross_time::TIMESTAMP))) AS average_time FROM main_table mt GROUP BY mt.mode_was ORDER BY average_time DESC LIMIT 1", read="postgres", write="mysql")[0])
```
this returns
`SELECT mt.mode_was, AVG(EXTRACT(epoch FROM (CAST(mt.cross_time AS DATETIME) - CAST(mt.second_cross_time AS DATETIME)))) AS average_time FROM main_table AS mt GROUP BY mt.mode_was ORDER BY CASE WHEN average_time IS NULL THEN 1 ELSE 0 END DESC, average_time DESC LIMIT 1`
What's the issue @FaizelK?
@georgesittas
epoch is not a thing in mysql, i wanted to convert that query to mysql, but it still kept epoch. when running this my mysql here are the results because MySQL does not support the EXTRACT(epoch FROM :
`You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'epoch FROM (CAST(mt.cross_time AS DATETIME) - CAST(mt.second_cross_time AS DATET' at line 1`
Hi @FaizelK, can you please open a separate issue describing the faulty behavior? Please include a minimum reproducible example, what you would ideally want to transpile to as well as any docs validating that.
Thanks!
--------------------
</issues> | ceb42fabad60312699e4b15936aeebac00e22e4d | |
scikit-learn__scikit-learn-28901 | 28,901 | scikit-learn/scikit-learn | 1.6 | 9012b787bdded56c7d189043b88f8dfc0dbe911a | 2024-04-26T15:41:56Z | diff --git a/doc/whats_new/upcoming_changes/sklearn.pipeline/28901.major-feature.rst b/doc/whats_new/upcoming_changes/sklearn.pipeline/28901.major-feature.rst
new file mode 100644
index 0000000000000..60703872d3980
--- /dev/null
+++ b/doc/whats_new/upcoming_changes/sklearn.pipeline/28901.major-feature.rst
@@ -0,0 +1,3 @@
+- :class:`pipeline.Pipeline` can now transform metadata up to the step requiring the
+ metadata, which can be set using the `transform_input` parameter.
+ By `Adrin Jalali`_
diff --git a/sklearn/pipeline.py b/sklearn/pipeline.py
index 9331a15dea9ab..dbbd810a9d9b6 100644
--- a/sklearn/pipeline.py
+++ b/sklearn/pipeline.py
@@ -31,6 +31,7 @@
MethodMapping,
_raise_for_params,
_routing_enabled,
+ get_routing_for_object,
process_routing,
)
from .utils.metaestimators import _BaseComposition, available_if
@@ -80,6 +81,46 @@ def check(self):
return check
+def _cached_transform(
+ sub_pipeline, *, cache, param_name, param_value, transform_params
+):
+ """Transform a parameter value using a sub-pipeline and cache the result.
+
+ Parameters
+ ----------
+ sub_pipeline : Pipeline
+ The sub-pipeline to be used for transformation.
+ cache : dict
+ The cache dictionary to store the transformed values.
+ param_name : str
+ The name of the parameter to be transformed.
+ param_value : object
+ The value of the parameter to be transformed.
+ transform_params : dict
+ The metadata to be used for transformation. This passed to the
+ `transform` method of the sub-pipeline.
+
+ Returns
+ -------
+ transformed_value : object
+ The transformed value of the parameter.
+ """
+ if param_name not in cache:
+ # If the parameter is a tuple, transform each element of the
+ # tuple. This is needed to support the pattern present in
+ # `lightgbm` and `xgboost` where users can pass multiple
+ # validation sets.
+ if isinstance(param_value, tuple):
+ cache[param_name] = tuple(
+ sub_pipeline.transform(element, **transform_params)
+ for element in param_value
+ )
+ else:
+ cache[param_name] = sub_pipeline.transform(param_value, **transform_params)
+
+ return cache[param_name]
+
+
class Pipeline(_BaseComposition):
"""
A sequence of data transformers with an optional final predictor.
@@ -119,6 +160,20 @@ class Pipeline(_BaseComposition):
must define `fit`. All non-last steps must also define `transform`. See
:ref:`Combining Estimators <combining_estimators>` for more details.
+ transform_input : list of str, default=None
+ The names of the :term:`metadata` parameters that should be transformed by the
+ pipeline before passing it to the step consuming it.
+
+ This enables transforming some input arguments to ``fit`` (other than ``X``)
+ to be transformed by the steps of the pipeline up to the step which requires
+ them. Requirement is defined via :ref:`metadata routing <metadata_routing>`.
+ For instance, this can be used to pass a validation set through the pipeline.
+
+ You can only set this if metadata routing is enabled, which you
+ can enable using ``sklearn.set_config(enable_metadata_routing=True)``.
+
+ .. versionadded:: 1.6
+
memory : str or object with the joblib.Memory interface, default=None
Used to cache the fitted transformers of the pipeline. The last step
will never be cached, even if it is a transformer. By default, no
@@ -184,12 +239,14 @@ class Pipeline(_BaseComposition):
# BaseEstimator interface
_parameter_constraints: dict = {
"steps": [list, Hidden(tuple)],
+ "transform_input": [list, None],
"memory": [None, str, HasMethods(["cache"])],
"verbose": ["boolean"],
}
- def __init__(self, steps, *, memory=None, verbose=False):
+ def __init__(self, steps, *, transform_input=None, memory=None, verbose=False):
self.steps = steps
+ self.transform_input = transform_input
self.memory = memory
self.verbose = verbose
@@ -412,9 +469,92 @@ def _check_method_params(self, method, props, **kwargs):
fit_params_steps[step]["fit_predict"][param] = pval
return fit_params_steps
+ def _get_metadata_for_step(self, *, step_idx, step_params, all_params):
+ """Get params (metadata) for step `name`.
+
+ This transforms the metadata up to this step if required, which is
+ indicated by the `transform_input` parameter.
+
+ If a param in `step_params` is included in the `transform_input` list,
+ it will be transformed.
+
+ Parameters
+ ----------
+ step_idx : int
+ Index of the step in the pipeline.
+
+ step_params : dict
+ Parameters specific to the step. These are routed parameters, e.g.
+ `routed_params[name]`. If a parameter name here is included in the
+ `pipeline.transform_input`, then it will be transformed. Note that
+ these parameters are *after* routing, so the aliases are already
+ resolved.
+
+ all_params : dict
+ All parameters passed by the user. Here this is used to call
+ `transform` on the slice of the pipeline itself.
+
+ Returns
+ -------
+ dict
+ Parameters to be passed to the step. The ones which should be
+ transformed are transformed.
+ """
+ if (
+ self.transform_input is None
+ or not all_params
+ or not step_params
+ or step_idx == 0
+ ):
+ # we only need to process step_params if transform_input is set
+ # and metadata is given by the user.
+ return step_params
+
+ sub_pipeline = self[:step_idx]
+ sub_metadata_routing = get_routing_for_object(sub_pipeline)
+ # here we get the metadata required by sub_pipeline.transform
+ transform_params = {
+ key: value
+ for key, value in all_params.items()
+ if key
+ in sub_metadata_routing.consumes(
+ method="transform", params=all_params.keys()
+ )
+ }
+ transformed_params = dict() # this is to be returned
+ transformed_cache = dict() # used to transform each param once
+ # `step_params` is the output of `process_routing`, so it has a dict for each
+ # method (e.g. fit, transform, predict), which are the args to be passed to
+ # those methods. We need to transform the parameters which are in the
+ # `transform_input`, before returning these dicts.
+ for method, method_params in step_params.items():
+ transformed_params[method] = Bunch()
+ for param_name, param_value in method_params.items():
+ # An example of `(param_name, param_value)` is
+ # `('sample_weight', array([0.5, 0.5, ...]))`
+ if param_name in self.transform_input:
+ # This parameter now needs to be transformed by the sub_pipeline, to
+ # this step. We cache these computations to avoid repeating them.
+ transformed_params[method][param_name] = _cached_transform(
+ sub_pipeline,
+ cache=transformed_cache,
+ param_name=param_name,
+ param_value=param_value,
+ transform_params=transform_params,
+ )
+ else:
+ transformed_params[method][param_name] = param_value
+ return transformed_params
+
# Estimator interface
- def _fit(self, X, y=None, routed_params=None):
+ def _fit(self, X, y=None, routed_params=None, raw_params=None):
+ """Fit the pipeline except the last step.
+
+ routed_params is the output of `process_routing`
+ raw_params is the parameters passed by the user, used when `transform_input`
+ is set by the user, to transform metadata using a sub-pipeline.
+ """
# shallow copy of steps - this should really be steps_
self.steps = list(self.steps)
self._validate_steps()
@@ -437,14 +577,20 @@ def _fit(self, X, y=None, routed_params=None):
else:
cloned_transformer = clone(transformer)
# Fit or load from cache the current transformer
+ step_params = self._get_metadata_for_step(
+ step_idx=step_idx,
+ step_params=routed_params[name],
+ all_params=raw_params,
+ )
+
X, fitted_transformer = fit_transform_one_cached(
cloned_transformer,
X,
y,
- None,
+ weight=None,
message_clsname="Pipeline",
message=self._log_message(step_idx),
- params=routed_params[name],
+ params=step_params,
)
# Replace the transformer of the step with the fitted
# transformer. This is necessary when loading the transformer
@@ -495,11 +641,22 @@ def fit(self, X, y=None, **params):
self : object
Pipeline with fitted steps.
"""
+ if not _routing_enabled() and self.transform_input is not None:
+ raise ValueError(
+ "The `transform_input` parameter can only be set if metadata "
+ "routing is enabled. You can enable metadata routing using "
+ "`sklearn.set_config(enable_metadata_routing=True)`."
+ )
+
routed_params = self._check_method_params(method="fit", props=params)
- Xt = self._fit(X, y, routed_params)
+ Xt = self._fit(X, y, routed_params, raw_params=params)
with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)):
if self._final_estimator != "passthrough":
- last_step_params = routed_params[self.steps[-1][0]]
+ last_step_params = self._get_metadata_for_step(
+ step_idx=len(self) - 1,
+ step_params=routed_params[self.steps[-1][0]],
+ all_params=params,
+ )
self._final_estimator.fit(Xt, y, **last_step_params["fit"])
return self
@@ -562,7 +719,11 @@ def fit_transform(self, X, y=None, **params):
with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)):
if last_step == "passthrough":
return Xt
- last_step_params = routed_params[self.steps[-1][0]]
+ last_step_params = self._get_metadata_for_step(
+ step_idx=len(self) - 1,
+ step_params=routed_params[self.steps[-1][0]],
+ all_params=params,
+ )
if hasattr(last_step, "fit_transform"):
return last_step.fit_transform(
Xt, y, **last_step_params["fit_transform"]
@@ -1270,7 +1431,7 @@ def _name_estimators(estimators):
return list(zip(names, estimators))
-def make_pipeline(*steps, memory=None, verbose=False):
+def make_pipeline(*steps, memory=None, transform_input=None, verbose=False):
"""Construct a :class:`Pipeline` from the given estimators.
This is a shorthand for the :class:`Pipeline` constructor; it does not
@@ -1292,6 +1453,17 @@ def make_pipeline(*steps, memory=None, verbose=False):
or ``steps`` to inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
+ transform_input : list of str, default=None
+ This enables transforming some input arguments to ``fit`` (other than ``X``)
+ to be transformed by the steps of the pipeline up to the step which requires
+ them. Requirement is defined via :ref:`metadata routing <metadata_routing>`.
+ This can be used to pass a validation set through the pipeline for instance.
+
+ You can only set this if metadata routing is enabled, which you
+ can enable using ``sklearn.set_config(enable_metadata_routing=True)``.
+
+ .. versionadded:: 1.6
+
verbose : bool, default=False
If True, the time elapsed while fitting each step will be printed as it
is completed.
@@ -1315,7 +1487,12 @@ def make_pipeline(*steps, memory=None, verbose=False):
Pipeline(steps=[('standardscaler', StandardScaler()),
('gaussiannb', GaussianNB())])
"""
- return Pipeline(_name_estimators(steps), memory=memory, verbose=verbose)
+ return Pipeline(
+ _name_estimators(steps),
+ transform_input=transform_input,
+ memory=memory,
+ verbose=verbose,
+ )
def _transform_one(transformer, X, y, weight, params=None):
| diff --git a/sklearn/tests/metadata_routing_common.py b/sklearn/tests/metadata_routing_common.py
index 174164daada8c..98503652df6f0 100644
--- a/sklearn/tests/metadata_routing_common.py
+++ b/sklearn/tests/metadata_routing_common.py
@@ -347,6 +347,7 @@ def fit(self, X, y=None, sample_weight="default", metadata="default"):
record_metadata_not_default(
self, sample_weight=sample_weight, metadata=metadata
)
+ self.fitted_ = True
return self
def transform(self, X, sample_weight="default", metadata="default"):
diff --git a/sklearn/tests/test_pipeline.py b/sklearn/tests/test_pipeline.py
index a1ba690d0f465..d7a201f3abf6f 100644
--- a/sklearn/tests/test_pipeline.py
+++ b/sklearn/tests/test_pipeline.py
@@ -16,6 +16,7 @@
from sklearn import config_context
from sklearn.base import (
BaseEstimator,
+ ClassifierMixin,
TransformerMixin,
clone,
is_classifier,
@@ -357,7 +358,7 @@ def test_pipeline_raise_set_params_error():
error_msg = re.escape(
"Invalid parameter 'fake' for estimator Pipeline(steps=[('cls',"
" LinearRegression())]). Valid parameters are: ['memory', 'steps',"
- " 'verbose']."
+ " 'transform_input', 'verbose']."
)
with pytest.raises(ValueError, match=error_msg):
pipe.set_params(fake="nope")
@@ -782,6 +783,7 @@ def make():
"memory": None,
"m2__mult": 2,
"last__mult": 5,
+ "transform_input": None,
"verbose": False,
}
@@ -1871,6 +1873,176 @@ def test_pipeline_inverse_transform_Xt_deprecation():
pipe.inverse_transform(Xt=X)
+# transform_input tests
+# =====================
+
+
+@config_context(enable_metadata_routing=True)
+@pytest.mark.parametrize("method", ["fit", "fit_transform"])
+def test_transform_input_pipeline(method):
+ """Test that with transform_input, data is correctly transformed for each step."""
+
+ def get_transformer(registry, sample_weight, metadata):
+ """Get a transformer with requests set."""
+ return (
+ ConsumingTransformer(registry=registry)
+ .set_fit_request(sample_weight=sample_weight, metadata=metadata)
+ .set_transform_request(sample_weight=sample_weight, metadata=metadata)
+ )
+
+ def get_pipeline():
+ """Get a pipeline and corresponding registries.
+
+ The pipeline has 4 steps, with different request values set to test different
+ cases. One is aliased.
+ """
+ registry_1, registry_2, registry_3, registry_4 = (
+ _Registry(),
+ _Registry(),
+ _Registry(),
+ _Registry(),
+ )
+ pipe = make_pipeline(
+ get_transformer(registry_1, sample_weight=True, metadata=True),
+ get_transformer(registry_2, sample_weight=False, metadata=False),
+ get_transformer(registry_3, sample_weight=True, metadata=True),
+ get_transformer(registry_4, sample_weight="other_weights", metadata=True),
+ transform_input=["sample_weight"],
+ )
+ return pipe, registry_1, registry_2, registry_3, registry_4
+
+ def check_metadata(registry, methods, **metadata):
+ """Check that the right metadata was recorded for the given methods."""
+ assert registry
+ for estimator in registry:
+ for method in methods:
+ check_recorded_metadata(
+ estimator,
+ method=method,
+ parent=method,
+ **metadata,
+ )
+
+ X = np.array([[1, 2], [3, 4]])
+ y = np.array([0, 1])
+ sample_weight = np.array([[1, 2]])
+ other_weights = np.array([[30, 40]])
+ metadata = np.array([[100, 200]])
+
+ pipe, registry_1, registry_2, registry_3, registry_4 = get_pipeline()
+ pipe.fit(
+ X,
+ y,
+ sample_weight=sample_weight,
+ other_weights=other_weights,
+ metadata=metadata,
+ )
+
+ check_metadata(
+ registry_1, ["fit", "transform"], sample_weight=sample_weight, metadata=metadata
+ )
+ check_metadata(registry_2, ["fit", "transform"])
+ check_metadata(
+ registry_3,
+ ["fit", "transform"],
+ sample_weight=sample_weight + 2,
+ metadata=metadata,
+ )
+ check_metadata(
+ registry_4,
+ method.split("_"), # ["fit", "transform"] if "fit_transform", ["fit"] otherwise
+ sample_weight=other_weights + 3,
+ metadata=metadata,
+ )
+
+
+@config_context(enable_metadata_routing=True)
+def test_transform_input_explicit_value_check():
+ """Test that the right transformed values are passed to `fit`."""
+
+ class Transformer(TransformerMixin, BaseEstimator):
+ def fit(self, X, y):
+ self.fitted_ = True
+ return self
+
+ def transform(self, X):
+ return X + 1
+
+ class Estimator(ClassifierMixin, BaseEstimator):
+ def fit(self, X, y, X_val=None, y_val=None):
+ assert_array_equal(X, np.array([[1, 2]]))
+ assert_array_equal(y, np.array([0, 1]))
+ assert_array_equal(X_val, np.array([[2, 3]]))
+ assert_array_equal(y_val, np.array([0, 1]))
+ return self
+
+ X = np.array([[0, 1]])
+ y = np.array([0, 1])
+ X_val = np.array([[1, 2]])
+ y_val = np.array([0, 1])
+ pipe = Pipeline(
+ [
+ ("transformer", Transformer()),
+ ("estimator", Estimator().set_fit_request(X_val=True, y_val=True)),
+ ],
+ transform_input=["X_val"],
+ )
+ pipe.fit(X, y, X_val=X_val, y_val=y_val)
+
+
+def test_transform_input_no_slep6():
+ """Make sure the right error is raised if slep6 is not enabled."""
+ X = np.array([[1, 2], [3, 4]])
+ y = np.array([0, 1])
+ msg = "The `transform_input` parameter can only be set if metadata"
+ with pytest.raises(ValueError, match=msg):
+ make_pipeline(DummyTransf(), transform_input=["blah"]).fit(X, y)
+
+
+@config_context(enable_metadata_routing=True)
+def test_transform_tuple_input():
+ """Test that if metadata is a tuple of arrays, both arrays are transformed."""
+
+ class Estimator(ClassifierMixin, BaseEstimator):
+ def fit(self, X, y, X_val=None, y_val=None):
+ assert isinstance(X_val, tuple)
+ assert isinstance(y_val, tuple)
+ # Here we make sure that each X_val is transformed by the transformer
+ assert_array_equal(X_val[0], np.array([[2, 3]]))
+ assert_array_equal(y_val[0], np.array([0, 1]))
+ assert_array_equal(X_val[1], np.array([[11, 12]]))
+ assert_array_equal(y_val[1], np.array([1, 2]))
+ self.fitted_ = True
+ return self
+
+ class Transformer(TransformerMixin, BaseEstimator):
+ def fit(self, X, y):
+ self.fitted_ = True
+ return self
+
+ def transform(self, X):
+ return X + 1
+
+ X = np.array([[1, 2]])
+ y = np.array([0, 1])
+ X_val0 = np.array([[1, 2]])
+ y_val0 = np.array([0, 1])
+ X_val1 = np.array([[10, 11]])
+ y_val1 = np.array([1, 2])
+ pipe = Pipeline(
+ [
+ ("transformer", Transformer()),
+ ("estimator", Estimator().set_fit_request(X_val=True, y_val=True)),
+ ],
+ transform_input=["X_val"],
+ )
+ pipe.fit(X, y, X_val=(X_val0, X_val1), y_val=(y_val0, y_val1))
+
+
+# end of transform_input tests
+# =============================
+
+
# TODO(1.8): change warning to checking for NotFittedError
@pytest.mark.parametrize(
"method",
diff --git a/sklearn/utils/tests/test_pprint.py b/sklearn/utils/tests/test_pprint.py
index bef5836910787..b3df08732d798 100644
--- a/sklearn/utils/tests/test_pprint.py
+++ b/sklearn/utils/tests/test_pprint.py
@@ -304,7 +304,7 @@ def test_pipeline(print_changed_only_false):
penalty='l2', random_state=None,
solver='warn', tol=0.0001, verbose=0,
warm_start=False))],
- verbose=False)"""
+ transform_input=None, verbose=False)"""
expected = expected[1:] # remove first \n
assert pipeline.__repr__() == expected
| diff --git a/doc/whats_new/upcoming_changes/sklearn.pipeline/28901.major-feature.rst b/doc/whats_new/upcoming_changes/sklearn.pipeline/28901.major-feature.rst
new file mode 100644
index 0000000000000..60703872d3980
--- /dev/null
+++ b/doc/whats_new/upcoming_changes/sklearn.pipeline/28901.major-feature.rst
@@ -0,0 +1,3 @@
+- :class:`pipeline.Pipeline` can now transform metadata up to the step requiring the
+ metadata, which can be set using the `transform_input` parameter.
+ By `Adrin Jalali`_
| [
{
"components": [
{
"doc": "Transform a parameter value using a sub-pipeline and cache the result.\n\nParameters\n----------\nsub_pipeline : Pipeline\n The sub-pipeline to be used for transformation.\ncache : dict\n The cache dictionary to store the transformed values.\nparam_name : str\n ... | [
"sklearn/tests/test_pipeline.py::test_pipeline_raise_set_params_error",
"sklearn/tests/test_pipeline.py::test_set_pipeline_step_passthrough[None]",
"sklearn/tests/test_pipeline.py::test_set_pipeline_step_passthrough[passthrough]",
"sklearn/tests/test_pipeline.py::test_transform_input_pipeline[fit]",
"sklear... | [
"sklearn/tests/test_pipeline.py::test_pipeline_invalid_parameters",
"sklearn/tests/test_pipeline.py::test_pipeline_init_tuple",
"sklearn/tests/test_pipeline.py::test_pipeline_methods_anova",
"sklearn/tests/test_pipeline.py::test_pipeline_fit_params",
"sklearn/tests/test_pipeline.py::test_pipeline_sample_wei... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
FEAT allow metadata to be transformed in a Pipeline
Initial proposal: https://github.com/scikit-learn/scikit-learn/pull/28440#issuecomment-1952386217
xref: https://github.com/scikit-learn/scikit-learn/pull/28440#issuecomment-2018493177
This adds `transform_input` as a constructor argument to `Pipeline`, as:
```
transform_input : list of str, default=None
This enables transforming some input arguments to ``fit`` (other than ``X``)
to be transformed by the steps of the pipeline up to the step which requires
them. Requirement is defined via :ref:`metadata routing <metadata_routing>`.
This can be used to pass a validation set through the pipeline for instance.
See the example TBD for more details.
You can only set this if metadata routing is enabled, which you
can enable using ``sklearn.set_config(enable_metadata_routing=True)``.
```
It simply allows to transform metadata with fitted estimators up to the step which needs the metadata.
How does this look?
cc @lorentzenchr @ogrisel @amueller @betatim
Edit by @lorentzenchr:
The implemented version accepts tuples, for example:
```python
pipe.fit(X, y, X_val=(X_val0, X_val1), y_val=(y_val0, y_val1))
```
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sklearn/pipeline.py]
(definition of _cached_transform:)
def _cached_transform( sub_pipeline, *, cache, param_name, param_value, transform_params ):
"""Transform a parameter value using a sub-pipeline and cache the result.
Parameters
----------
sub_pipeline : Pipeline
The sub-pipeline to be used for transformation.
cache : dict
The cache dictionary to store the transformed values.
param_name : str
The name of the parameter to be transformed.
param_value : object
The value of the parameter to be transformed.
transform_params : dict
The metadata to be used for transformation. This passed to the
`transform` method of the sub-pipeline.
Returns
-------
transformed_value : object
The transformed value of the parameter."""
(definition of Pipeline._get_metadata_for_step:)
def _get_metadata_for_step(self, *, step_idx, step_params, all_params):
"""Get params (metadata) for step `name`.
This transforms the metadata up to this step if required, which is
indicated by the `transform_input` parameter.
If a param in `step_params` is included in the `transform_input` list,
it will be transformed.
Parameters
----------
step_idx : int
Index of the step in the pipeline.
step_params : dict
Parameters specific to the step. These are routed parameters, e.g.
`routed_params[name]`. If a parameter name here is included in the
`pipeline.transform_input`, then it will be transformed. Note that
these parameters are *after* routing, so the aliases are already
resolved.
all_params : dict
All parameters passed by the user. Here this is used to call
`transform` on the slice of the pipeline itself.
Returns
-------
dict
Parameters to be passed to the step. The ones which should be
transformed are transformed."""
[end of new definitions in sklearn/pipeline.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 18dc8630a7cbe1b591c12774949058b12157a39a | |
deepset-ai__haystack-7599 | 7,599 | deepset-ai/haystack | null | 8d04e530da24b5e5c8c11af29829714eeea47db2 | 2024-04-25T19:09:50Z | diff --git a/haystack/components/preprocessors/document_splitter.py b/haystack/components/preprocessors/document_splitter.py
index adea7cc3ce..033f55a89a 100644
--- a/haystack/components/preprocessors/document_splitter.py
+++ b/haystack/components/preprocessors/document_splitter.py
@@ -1,5 +1,5 @@
from copy import deepcopy
-from typing import List, Literal
+from typing import Dict, List, Literal, Tuple
from more_itertools import windowed
@@ -53,7 +53,7 @@ def run(self, documents: List[Document]):
:returns: A dictionary with the following key:
- `documents`: List of documents with the split texts. A metadata field "source_id" is added to each
- document to keep track of the original document that was split. Other metadata are copied from the original
+ document to keep track of the original document that was split. Another metadata field "page_number" is added to each number to keep track of the page it belonged to in the original document. Other metadata are copied from the original
document.
:raises TypeError: if the input is not a list of Documents.
@@ -70,10 +70,12 @@ def run(self, documents: List[Document]):
f"DocumentSplitter only works with text documents but document.content for document ID {doc.id} is None."
)
units = self._split_into_units(doc.content, self.split_by)
- text_splits = self._concatenate_units(units, self.split_length, self.split_overlap)
+ text_splits, splits_pages = self._concatenate_units(units, self.split_length, self.split_overlap)
metadata = deepcopy(doc.meta)
metadata["source_id"] = doc.id
- split_docs += [Document(content=txt, meta=metadata) for txt in text_splits]
+ split_docs += self._create_docs_from_splits(
+ text_splits=text_splits, splits_pages=splits_pages, meta=metadata
+ )
return {"documents": split_docs}
def _split_into_units(self, text: str, split_by: Literal["word", "sentence", "passage", "page"]) -> List[str]:
@@ -95,15 +97,40 @@ def _split_into_units(self, text: str, split_by: Literal["word", "sentence", "pa
units[i] += split_at
return units
- def _concatenate_units(self, elements: List[str], split_length: int, split_overlap: int) -> List[str]:
+ def _concatenate_units(
+ self, elements: List[str], split_length: int, split_overlap: int
+ ) -> Tuple[List[str], List[int]]:
"""
- Concatenates the elements into parts of split_length units.
+ Concatenates the elements into parts of split_length units keeping track of the original page number that each element belongs.
"""
text_splits = []
+ splits_pages = []
+ cur_page = 1
segments = windowed(elements, n=split_length, step=split_length - split_overlap)
for seg in segments:
current_units = [unit for unit in seg if unit is not None]
txt = "".join(current_units)
if len(txt) > 0:
text_splits.append(txt)
- return text_splits
+ splits_pages.append(cur_page)
+ processed_units = current_units[: split_length - split_overlap]
+ if self.split_by == "page":
+ num_page_breaks = len(processed_units)
+ else:
+ num_page_breaks = sum(processed_unit.count("\f") for processed_unit in processed_units)
+ cur_page += num_page_breaks
+ return text_splits, splits_pages
+
+ @staticmethod
+ def _create_docs_from_splits(text_splits: List[str], splits_pages: List[int], meta: Dict) -> List[Document]:
+ """
+ Creates Document objects from text splits enriching them with page number and the metadata of the original document.
+ """
+ documents: List[Document] = []
+
+ for i, txt in enumerate(text_splits):
+ meta = deepcopy(meta)
+ doc = Document(content=txt, meta=meta)
+ doc.meta["page_number"] = splits_pages[i]
+ documents.append(doc)
+ return documents
diff --git a/releasenotes/notes/add-page-number-to-document-splitter-162e9dc7443575f0.yaml b/releasenotes/notes/add-page-number-to-document-splitter-162e9dc7443575f0.yaml
new file mode 100644
index 0000000000..8c97663cf1
--- /dev/null
+++ b/releasenotes/notes/add-page-number-to-document-splitter-162e9dc7443575f0.yaml
@@ -0,0 +1,7 @@
+---
+highlights: >
+ Add the "page_number" field to the metadata of all output documents.
+
+enhancements:
+ - |
+ Now the DocumentSplitter adds the "page_number" field to the metadata of all output documents to keep track of the page of the original document it belongs to.
| diff --git a/test/components/preprocessors/test_document_splitter.py b/test/components/preprocessors/test_document_splitter.py
index 479f0d50ce..4874c25be3 100644
--- a/test/components/preprocessors/test_document_splitter.py
+++ b/test/components/preprocessors/test_document_splitter.py
@@ -141,3 +141,98 @@ def test_copy_metadata(self):
for doc, split_doc in zip(documents, result["documents"]):
assert doc.meta.items() <= split_doc.meta.items()
assert split_doc.content == "Text."
+
+ def test_add_page_number_to_metadata_with_no_overlap_word_split(self):
+ splitter = DocumentSplitter(split_by="word", split_length=2)
+ doc1 = Document(content="This is some text.\f This text is on another page.")
+ doc2 = Document(content="This content has two.\f\f page brakes.")
+ result = splitter.run(documents=[doc1, doc2])
+
+ expected_pages = [1, 1, 2, 2, 2, 1, 1, 3]
+ for doc, p in zip(result["documents"], expected_pages):
+ assert doc.meta["page_number"] == p
+
+ def test_add_page_number_to_metadata_with_no_overlap_sentence_split(self):
+ splitter = DocumentSplitter(split_by="sentence", split_length=1)
+ doc1 = Document(content="This is some text.\f This text is on another page.")
+ doc2 = Document(content="This content has two.\f\f page brakes.")
+ result = splitter.run(documents=[doc1, doc2])
+
+ expected_pages = [1, 1, 1, 1]
+ for doc, p in zip(result["documents"], expected_pages):
+ assert doc.meta["page_number"] == p
+
+ def test_add_page_number_to_metadata_with_no_overlap_passage_split(self):
+ splitter = DocumentSplitter(split_by="passage", split_length=1)
+ doc1 = Document(
+ content="This is a text with some words.\f There is a second sentence.\n\nAnd there is a third sentence.\n\nAnd more passages.\n\n\f And another passage."
+ )
+ result = splitter.run(documents=[doc1])
+
+ expected_pages = [1, 2, 2, 2]
+ for doc, p in zip(result["documents"], expected_pages):
+ assert doc.meta["page_number"] == p
+
+ def test_add_page_number_to_metadata_with_no_overlap_page_split(self):
+ splitter = DocumentSplitter(split_by="page", split_length=1)
+ doc1 = Document(
+ content="This is a text with some words. There is a second sentence.\f And there is a third sentence.\f And another passage."
+ )
+ result = splitter.run(documents=[doc1])
+ expected_pages = [1, 2, 3]
+ for doc, p in zip(result["documents"], expected_pages):
+ assert doc.meta["page_number"] == p
+
+ splitter = DocumentSplitter(split_by="page", split_length=2)
+ doc1 = Document(
+ content="This is a text with some words. There is a second sentence.\f And there is a third sentence.\f And another passage."
+ )
+ result = splitter.run(documents=[doc1])
+ expected_pages = [1, 3]
+
+ for doc, p in zip(result["documents"], expected_pages):
+ assert doc.meta["page_number"] == p
+
+ def test_add_page_number_to_metadata_with_overlap_word_split(self):
+ splitter = DocumentSplitter(split_by="word", split_length=3, split_overlap=1)
+ doc1 = Document(content="This is some text. And\f this text is on another page.")
+ doc2 = Document(content="This content has two.\f\f page brakes.")
+ result = splitter.run(documents=[doc1, doc2])
+
+ expected_pages = [1, 1, 1, 2, 2, 1, 1, 3]
+ for doc, p in zip(result["documents"], expected_pages):
+ print(doc.content, doc.meta, p)
+ assert doc.meta["page_number"] == p
+
+ def test_add_page_number_to_metadata_with_overlap_sentence_split(self):
+ splitter = DocumentSplitter(split_by="sentence", split_length=2, split_overlap=1)
+ doc1 = Document(content="This is some text. And this is more text.\f This text is on another page. End.")
+ doc2 = Document(content="This content has two.\f\f page brakes. More text.")
+ result = splitter.run(documents=[doc1, doc2])
+
+ expected_pages = [1, 1, 1, 2, 1, 1]
+ for doc, p in zip(result["documents"], expected_pages):
+ print(doc.content, doc.meta, p)
+ assert doc.meta["page_number"] == p
+
+ def test_add_page_number_to_metadata_with_overlap_passage_split(self):
+ splitter = DocumentSplitter(split_by="passage", split_length=2, split_overlap=1)
+ doc1 = Document(
+ content="This is a text with some words.\f There is a second sentence.\n\nAnd there is a third sentence.\n\nAnd more passages.\n\n\f And another passage."
+ )
+ result = splitter.run(documents=[doc1])
+
+ expected_pages = [1, 2, 2]
+ for doc, p in zip(result["documents"], expected_pages):
+ assert doc.meta["page_number"] == p
+
+ def test_add_page_number_to_metadata_with_overlap_page_split(self):
+ splitter = DocumentSplitter(split_by="page", split_length=2, split_overlap=1)
+ doc1 = Document(
+ content="This is a text with some words. There is a second sentence.\f And there is a third sentence.\f And another passage."
+ )
+ result = splitter.run(documents=[doc1])
+ expected_pages = [1, 2, 3]
+
+ for doc, p in zip(result["documents"], expected_pages):
+ assert doc.meta["page_number"] == p
| diff --git a/releasenotes/notes/add-page-number-to-document-splitter-162e9dc7443575f0.yaml b/releasenotes/notes/add-page-number-to-document-splitter-162e9dc7443575f0.yaml
new file mode 100644
index 0000000000..8c97663cf1
--- /dev/null
+++ b/releasenotes/notes/add-page-number-to-document-splitter-162e9dc7443575f0.yaml
@@ -0,0 +1,7 @@
+---
+highlights: >
+ Add the "page_number" field to the metadata of all output documents.
+
+enhancements:
+ - |
+ Now the DocumentSplitter adds the "page_number" field to the metadata of all output documents to keep track of the page of the original document it belongs to.
| [
{
"components": [
{
"doc": "Creates Document objects from text splits enriching them with page number and the metadata of the original document.",
"lines": [
125,
136
],
"name": "DocumentSplitter._create_docs_from_splits",
"signature": "def _crea... | [
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_add_page_number_to_metadata_with_no_overlap_word_split",
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_add_page_number_to_metadata_with_no_overlap_sentence_split",
"test/components/preproc... | [
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_non_text_document",
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_single_doc",
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_empty_list",
"test/com... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat: add page_number to metadata in DocumentSplitter
### Related Issues
- fixes #6705
### Proposed Changes:
<!--- In case of a bug: Describe what caused the issue and how you solved it -->
<!--- In case of a feature: Describe what did you add and how it works -->
I updated the `DocumentSplitter` methods so that it adds the "page_number" field to the metadata of output documents. This field contains the page number where you can find the document on the original document. The implementation is the same as the one on the [v1.25.x](https://github.com/deepset-ai/haystack/tree/v1.25.x).
### How did you test it?
<!-- unit tests, integration tests, manual verification, instructions for manual tests -->
I added some new unit test for testing this behaviour, but testing was mainly functional as it was based on a previously functioning code.
### Notes for the reviewer
<!-- E.g. point out section where the reviewer -->
This is my first contribution!!! The `.gitignore` change is to counter a VSCode extension I have that I am not able to eliminate the commit.
### Checklist
- I have read the [contributors guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md) and the [code of conduct](https://github.com/deepset-ai/haystack/blob/main/code_of_conduct.txt) ✅
- I have updated the related issue with new insights and changes ✅
- I added unit tests and updated the docstrings ✅
- I've used one of the [conventional commit types](https://www.conventionalcommits.org/en/v1.0.0/) for my PR title: `fix:`, `feat:`, `build:`, `chore:`, `ci:`, `docs:`, `style:`, `refactor:`, `perf:`, `test:`. ✅
- I documented my code ✅
- I ran [pre-commit hooks](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md#installation) and fixed any issue ✅
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/components/preprocessors/document_splitter.py]
(definition of DocumentSplitter._create_docs_from_splits:)
def _create_docs_from_splits(text_splits: List[str], splits_pages: List[int], meta: Dict) -> List[Document]:
"""Creates Document objects from text splits enriching them with page number and the metadata of the original document."""
[end of new definitions in haystack/components/preprocessors/document_splitter.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
feat: Add `page_number` to meta of Documents in `DocumentSplitter`
**Is your feature request related to a problem? Please describe.**
In Haystack v1 we had an option in the Preprocessor to add the original `page_number` to a Document's meta data when it was split into a chunk. This feature made down stream applications of visualizing the retrieved text from original files (e.g. PDFs) very easy and straightforward, so I'd like to see it in Haystack v2 as well.
**Describe the solution you'd like**
I would like to add the option to store the `page_number` in the meta info of the Document in the DocumentSplitter component. I believe we can use a similar/same implementation of calculating this like we did for the Preprocessor.
----------
This issue https://github.com/deepset-ai/haystack/issues/6706 is related since we currently do not keep page break information when converting a PDF file to a Haystack Document.
Hi @sjrl :)
This is my first issue. I'm trying to understand the requisites better. It seems to me that to keep the page number and the associated text, I suppose we have to keep the chunks in the metadata, e.g.:
```
units = self._split_into_units(doc.content, self.split_by)
text_splits = self._concatenate_units(units, self.split_length, self.split_overlap)
metadata = deepcopy(doc.meta)
metadata["source_id"] = doc.id
metadata["page_number"] = units
split_docs += [Document(content=txt, meta=metadata) for txt in text_splits]
```
This has a few drawbacks:
- duplicated text, `doc.content` and `doc.metadata['page_number']` now have the same information, a possible solution would be to have `self._concatenate_units()` being triggered only when `doc.content` is called/needed
- the `metadata["page_number"]` has the page number 0 - but this can be easily fixed
Hi @davidsbatista!
Thanks for taking on this issue :)
I don't think we need to keep the associated text for the use case I am imagining. Basically what we are interested in Haystack terms would like this
1. Load a PDF File
2. Convert a PDF file to a single Document object --> PyPDFToDocument
3. Split the single Document into Chunked Documents (so Document to List of Documents) --> Document Splitter
- In this final step I would like to insert a `page_number` into the each Doc's metadata in the List of Documents that would tell me which page the chunked doc came from based on the original single Document. This tracking of `page_number` was done in Haystack v1 by counting and keeping track of page breaks (`\f`)
Does this make more sense?
Yes, that helps! So essentially, the `DocumentSplitter` should return a `List[Document]`.
But since it has as input:
`def run(self, documents: List[Document]):`
It should return a `List[List[Documents]]`
do you agree?
Hmm I'm not entirely sure. Initially I would say that it makes sense to return `List[List[Documents]]`, but often we want a flattened list to be returned since we will often directly write these documents to a document store which I believe expects `List[Document]` as input.
So I think to keep that workflow working we should return `List[Document]` or have some way of flattening the list. What do you think?
Hi Sebastian, I can pick up this again after finishing some high-priority issues I need to handle - maybe by the end of the week. Just to let you know, I haven't forgot it
No problem! Thanks for the update
Would be interested by a follow up about this 👀 If something I could do ?
@lambda-science there hasn't been any follow up, feel free to start working on it if you feel like it
This is my first issue, I will try add this feature, but don't know if I will manage to. :)
@CarlosFerLo let me know if you need help with this
--------------------
</issues> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 |
deepset-ai__haystack-7574 | 7,574 | deepset-ai/haystack | null | 081757c6b9b1cadae5f783b3e1f872a9ecf87dc2 | 2024-04-22T16:30:03Z | diff --git a/haystack/dataclasses/sparse_embedding.py b/haystack/dataclasses/sparse_embedding.py
index 191f98dbcf..0d3ebfd614 100644
--- a/haystack/dataclasses/sparse_embedding.py
+++ b/haystack/dataclasses/sparse_embedding.py
@@ -20,6 +20,9 @@ def __init__(self, indices: List[int], values: List[float]):
self.indices = indices
self.values = values
+ def __eq__(self, other):
+ return self.indices == other.indices and self.values == other.values
+
def to_dict(self):
"""
Convert the sparse embedding to a dictionary.
diff --git a/releasenotes/notes/sparse-emb-eq-773ef04ae3ed83ea.yaml b/releasenotes/notes/sparse-emb-eq-773ef04ae3ed83ea.yaml
new file mode 100644
index 0000000000..4074680bcf
--- /dev/null
+++ b/releasenotes/notes/sparse-emb-eq-773ef04ae3ed83ea.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ Add an `__eq__` method to `SparseEmbedding` class to compare two `SparseEmbedding` objects.
| diff --git a/test/dataclasses/test_sparse_embedding.py b/test/dataclasses/test_sparse_embedding.py
index f3fc889aa5..0617610189 100644
--- a/test/dataclasses/test_sparse_embedding.py
+++ b/test/dataclasses/test_sparse_embedding.py
@@ -21,3 +21,11 @@ def test_from_dict(self):
se = SparseEmbedding.from_dict({"indices": [0, 2, 4], "values": [0.1, 0.2, 0.3]})
assert se.indices == [0, 2, 4]
assert se.values == [0.1, 0.2, 0.3]
+
+ def test_eq(self):
+ se1 = SparseEmbedding(indices=[0, 2, 4], values=[0.1, 0.2, 0.3])
+ se2 = SparseEmbedding(indices=[0, 2, 4], values=[0.1, 0.2, 0.3])
+ assert se1 == se2
+
+ se3 = SparseEmbedding(indices=[0, 2, 4], values=[0.1, 0.2, 0.4])
+ assert se1 != se3
| diff --git a/releasenotes/notes/sparse-emb-eq-773ef04ae3ed83ea.yaml b/releasenotes/notes/sparse-emb-eq-773ef04ae3ed83ea.yaml
new file mode 100644
index 0000000000..4074680bcf
--- /dev/null
+++ b/releasenotes/notes/sparse-emb-eq-773ef04ae3ed83ea.yaml
@@ -0,0 +1,4 @@
+---
+enhancements:
+ - |
+ Add an `__eq__` method to `SparseEmbedding` class to compare two `SparseEmbedding` objects.
| [
{
"components": [
{
"doc": "",
"lines": [
23,
24
],
"name": "SparseEmbedding.__eq__",
"signature": "def __eq__(self, other):",
"type": "function"
}
],
"file": "haystack/dataclasses/sparse_embedding.py"
}
] | [
"test/dataclasses/test_sparse_embedding.py::TestSparseEmbedding::test_eq"
] | [
"test/dataclasses/test_sparse_embedding.py::TestSparseEmbedding::test_init",
"test/dataclasses/test_sparse_embedding.py::TestSparseEmbedding::test_init_with_wrong_parameters",
"test/dataclasses/test_sparse_embedding.py::TestSparseEmbedding::test_to_dict",
"test/dataclasses/test_sparse_embedding.py::TestSparse... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
add `__eq__` method to `SparseEmbedding`
### Related Issues
While working on hybrid retrieval, I noticed we lack a proper way to compare `SparseEmbedding` objects.
### Proposed Changes:
Add an `__eq__` method to `SparseEmbedding` class to compare two `SparseEmbedding` objects.
### How did you test it?
CI, new test
### Checklist
- I have read the [contributors guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md) and the [code of conduct](https://github.com/deepset-ai/haystack/blob/main/code_of_conduct.txt)
- I have updated the related issue with new insights and changes
- I added unit tests and updated the docstrings
- I've used one of the [conventional commit types](https://www.conventionalcommits.org/en/v1.0.0/) for my PR title: `fix:`, `feat:`, `build:`, `chore:`, `ci:`, `docs:`, `style:`, `refactor:`, `perf:`, `test:`.
- I documented my code
- I ran [pre-commit hooks](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md#installation) and fixed any issue
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/dataclasses/sparse_embedding.py]
(definition of SparseEmbedding.__eq__:)
def __eq__(self, other):
[end of new definitions in haystack/dataclasses/sparse_embedding.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 | |
embeddings-benchmark__mteb-469 | 469 | embeddings-benchmark/mteb | null | 21efc3b1c29ef8e4b47d3ad2d13ea3c04935d03e | 2024-04-20T18:14:12Z | diff --git a/README.md b/README.md
index ecd21c23ea..de204be7c5 100644
--- a/README.md
+++ b/README.md
@@ -133,15 +133,18 @@ Models should implement the following interface, implementing an `encode` functi
```python
class MyModel():
- def encode(self, sentences: list[str], **kwargs) -> list[np.ndarray] | list[torch.Tensor]:
- """
- Returns a list of embeddings for the given sentences.
-
+ def encode(
+ self, sentences: list[str], prompt: str, **kwargs: Any
+ ) -> torch.Tensor | np.ndarray:
+ """Encodes the given sentences using the encoder.
+
Args:
- sentences: List of sentences to encode
+ sentences: The sentences to encode.
+ prompt: The prompt to use. Useful for prompt-based models.
+ **kwargs: Additional arguments to pass to the encoder.
Returns:
- List of embeddings for the given sentences
+ The encoded sentences.
"""
pass
diff --git a/docs/mmteb/points/467.jsonl b/docs/mmteb/points/467.jsonl
new file mode 100644
index 0000000000..d69c949b28
--- /dev/null
+++ b/docs/mmteb/points/467.jsonl
@@ -0,0 +1,2 @@
+{"GitHub": "KennethEnevoldsen", "Coordination": 2}
+{"GitHub": "isaac-chung", "Review PR": 2}
\ No newline at end of file
diff --git a/mteb/encoder_interface.py b/mteb/encoder_interface.py
new file mode 100644
index 0000000000..e806524091
--- /dev/null
+++ b/mteb/encoder_interface.py
@@ -0,0 +1,60 @@
+from __future__ import annotations
+
+from typing import Any, Protocol, runtime_checkable
+
+import numpy as np
+import torch
+
+
+@runtime_checkable
+class Encoder(Protocol):
+ """The interface for an encoder in MTEB."""
+
+ def encode(
+ self, sentences: list[str], prompt: str, **kwargs: Any
+ ) -> torch.Tensor | np.ndarray:
+ """Encodes the given sentences using the encoder.
+
+ Args:
+ sentences: The sentences to encode.
+ prompt: The prompt to use. Useful for prompt-based models.
+ **kwargs: Additional arguments to pass to the encoder.
+
+ Returns:
+ The encoded sentences.
+ """
+ ...
+
+
+class EncoderWithQueryCorpusEncode(Encoder, Protocol):
+ """The interface for an encoder that supports encoding queries and a corpus."""
+
+ def encode_queries(
+ self, queries: list[str], prompt: str, **kwargs: Any
+ ) -> torch.Tensor | np.ndarray:
+ """Encodes the given queries using the encoder.
+
+ Args:
+ queries: The queries to encode.
+ prompt: The prompt to use. Useful for prompt-based models.
+ **kwargs: Additional arguments to pass to the encoder.
+
+ Returns:
+ The encoded queries.
+ """
+ ...
+
+ def encode_corpus(
+ self, corpus: list[str], prompt: str, **kwargs: Any
+ ) -> torch.Tensor | np.ndarray:
+ """Encodes the given corpus using the encoder.
+
+ Args:
+ corpus: The corpus to encode.
+ prompt: The prompt to use. Useful for prompt-based models.
+ **kwargs: Additional arguments to pass to the encoder.
+
+ Returns:
+ The encoded corpus.
+ """
+ ...
diff --git a/mteb/evaluation/evaluators/RetrievalEvaluator.py b/mteb/evaluation/evaluators/RetrievalEvaluator.py
index 28200137f5..40053d955d 100644
--- a/mteb/evaluation/evaluators/RetrievalEvaluator.py
+++ b/mteb/evaluation/evaluators/RetrievalEvaluator.py
@@ -211,6 +211,9 @@ def encode_corpus(self, corpus: List[Dict[str, str]], batch_size: int, **kwargs)
self.corpus_embeddings[kwargs["qid"]] = corpus_embeddings
return corpus_embeddings
+ def encode(self, sentences: List[str], **kwargs):
+ return self.model.encode(sentences, **kwargs)
+
def is_dres_compatible(model):
for method in ["encode_queries", "encode_corpus"]:
| diff --git a/tests/test_encoder_interfaces.py b/tests/test_encoder_interfaces.py
new file mode 100644
index 0000000000..42a308435f
--- /dev/null
+++ b/tests/test_encoder_interfaces.py
@@ -0,0 +1,17 @@
+from sentence_transformers import SentenceTransformer
+
+from mteb.encoder_interface import Encoder, EncoderWithQueryCorpusEncode
+from mteb.evaluation.evaluators.RetrievalEvaluator import DRESModel
+
+
+def test_sentence_is_encoder():
+ model = SentenceTransformer("average_word_embeddings_komninos")
+ assert isinstance(model, Encoder)
+
+
+def test_wrapped_sentence_is_encoder_with_query_corpus_encode():
+ model = SentenceTransformer("average_word_embeddings_komninos")
+ model = DRESModel(model)
+
+ assert isinstance(model, Encoder)
+ assert isinstance(model, EncoderWithQueryCorpusEncode)
| diff --git a/README.md b/README.md
index ecd21c23ea..de204be7c5 100644
--- a/README.md
+++ b/README.md
@@ -133,15 +133,18 @@ Models should implement the following interface, implementing an `encode` functi
```python
class MyModel():
- def encode(self, sentences: list[str], **kwargs) -> list[np.ndarray] | list[torch.Tensor]:
- """
- Returns a list of embeddings for the given sentences.
-
+ def encode(
+ self, sentences: list[str], prompt: str, **kwargs: Any
+ ) -> torch.Tensor | np.ndarray:
+ """Encodes the given sentences using the encoder.
+
Args:
- sentences: List of sentences to encode
+ sentences: The sentences to encode.
+ prompt: The prompt to use. Useful for prompt-based models.
+ **kwargs: Additional arguments to pass to the encoder.
Returns:
- List of embeddings for the given sentences
+ The encoded sentences.
"""
pass
diff --git a/docs/mmteb/points/467.jsonl b/docs/mmteb/points/467.jsonl
new file mode 100644
index 0000000000..d69c949b28
--- /dev/null
+++ b/docs/mmteb/points/467.jsonl
@@ -0,0 +1,2 @@
+{"GitHub": "KennethEnevoldsen", "Coordination": 2}
+{"GitHub": "isaac-chung", "Review PR": 2}
\ No newline at end of file
| [
{
"components": [
{
"doc": "The interface for an encoder in MTEB.",
"lines": [
10,
26
],
"name": "Encoder",
"signature": "class Encoder(Protocol):",
"type": "class"
},
{
"doc": "Encodes the given sentences using the en... | [
"tests/test_encoder_interfaces.py::test_sentence_is_encoder",
"tests/test_encoder_interfaces.py::test_wrapped_sentence_is_encoder_with_query_corpus_encode"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
fix: Added encoder interfaces
Added an outline for interfaces for MTEB.
Hope this can start a discussion on how we want the standard interface to look.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in mteb/encoder_interface.py]
(definition of Encoder:)
class Encoder(Protocol):
"""The interface for an encoder in MTEB."""
(definition of Encoder.encode:)
def encode( self, sentences: list[str], prompt: str, **kwargs: Any ) -> torch.Tensor | np.ndarray:
"""Encodes the given sentences using the encoder.
Args:
sentences: The sentences to encode.
prompt: The prompt to use. Useful for prompt-based models.
**kwargs: Additional arguments to pass to the encoder.
Returns:
The encoded sentences."""
(definition of EncoderWithQueryCorpusEncode:)
class EncoderWithQueryCorpusEncode(Encoder, Protocol):
"""The interface for an encoder that supports encoding queries and a corpus."""
(definition of EncoderWithQueryCorpusEncode.encode_queries:)
def encode_queries( self, queries: list[str], prompt: str, **kwargs: Any ) -> torch.Tensor | np.ndarray:
"""Encodes the given queries using the encoder.
Args:
queries: The queries to encode.
prompt: The prompt to use. Useful for prompt-based models.
**kwargs: Additional arguments to pass to the encoder.
Returns:
The encoded queries."""
(definition of EncoderWithQueryCorpusEncode.encode_corpus:)
def encode_corpus( self, corpus: list[str], prompt: str, **kwargs: Any ) -> torch.Tensor | np.ndarray:
"""Encodes the given corpus using the encoder.
Args:
corpus: The corpus to encode.
prompt: The prompt to use. Useful for prompt-based models.
**kwargs: Additional arguments to pass to the encoder.
Returns:
The encoded corpus."""
[end of new definitions in mteb/encoder_interface.py]
[start of new definitions in mteb/evaluation/evaluators/RetrievalEvaluator.py]
(definition of DRESModel.encode:)
def encode(self, sentences: List[str], **kwargs):
[end of new definitions in mteb/evaluation/evaluators/RetrievalEvaluator.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | b580b95fc91a7e7e675d27c3ae9a9df64ddad169 | |
embeddings-benchmark__mteb-457 | 457 | embeddings-benchmark/mteb | null | 2fef0418d3baf443350d588246867b16184f6af5 | 2024-04-19T22:44:45Z | diff --git a/docs/mmteb/points/457.jsonl b/docs/mmteb/points/457.jsonl
new file mode 100644
index 0000000000..47007bf9d9
--- /dev/null
+++ b/docs/mmteb/points/457.jsonl
@@ -0,0 +1,4 @@
+{"GitHub": "orionw", "Bug fixes": 6}
+{"GitHub": "KennethEnevoldsen", "Review PR": 2}
+{"GitHub": "Muennighoff", "Review PR": 2}
+{"GitHub": "tomaarsen", "Review PR": 2}
\ No newline at end of file
diff --git a/mteb/abstasks/AbsTaskInstructionRetrieval.py b/mteb/abstasks/AbsTaskInstructionRetrieval.py
index 7a2c3d2336..ecb4989c5a 100644
--- a/mteb/abstasks/AbsTaskInstructionRetrieval.py
+++ b/mteb/abstasks/AbsTaskInstructionRetrieval.py
@@ -536,7 +536,7 @@ def _evaluate_monolingual(
# do the results by query and relevant docs only
all_results = []
- for query_id in tqdm.tqdm(list(queries.keys()), leave=True):
+ for query_id in tqdm.tqdm(list(queries.keys()), leave=False, desc="Retrieving"):
cur_queries = {query_id: queries[query_id]}
cur_instructions = {queries[query_id]: instructions[queries[query_id]]}
cur_docs = {
@@ -559,7 +559,7 @@ def _evaluate_monolingual(
"Time taken to retrieve: {:.2f} seconds".format(end_time - start_time)
)
- if kwargs.get("save_qrels", False):
+ if kwargs.get("save_predictions", False):
output_folder = kwargs.get("output_folder", "results")
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
@@ -576,12 +576,10 @@ def _evaluate_monolingual(
}
if lang is None:
qrels_save_path = (
- f"{output_folder}/{self.metadata_dict['name']}_qrels.json"
+ f"{output_folder}/{self.metadata_dict['name']}_predictions.json"
)
else:
- qrels_save_path = (
- f"{output_folder}/{self.metadata_dict['name']}_{lang}_qrels.json"
- )
+ qrels_save_path = f"{output_folder}/{self.metadata_dict['name']}_{lang}_predictions.json"
with open(qrels_save_path, "w") as f:
json.dump(results, f)
diff --git a/mteb/abstasks/AbsTaskRetrieval.py b/mteb/abstasks/AbsTaskRetrieval.py
index 586aa1e5b3..feca88cd48 100644
--- a/mteb/abstasks/AbsTaskRetrieval.py
+++ b/mteb/abstasks/AbsTaskRetrieval.py
@@ -278,7 +278,7 @@ def _evaluate_monolingual(
"Time taken to retrieve: {:.2f} seconds".format(end_time - start_time)
)
- if kwargs.get("save_qrels", False):
+ if kwargs.get("save_predictions", False):
output_folder = kwargs.get("output_folder", "results")
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
@@ -295,12 +295,10 @@ def _evaluate_monolingual(
}
if lang is None:
qrels_save_path = (
- f"{output_folder}/{self.metadata_dict['name']}_qrels.json"
+ f"{output_folder}/{self.metadata_dict['name']}_predictions.json"
)
else:
- qrels_save_path = (
- f"{output_folder}/{self.metadata_dict['name']}_{lang}_qrels.json"
- )
+ qrels_save_path = f"{output_folder}/{self.metadata_dict['name']}_{lang}_predictions.json"
with open(qrels_save_path, "w") as f:
json.dump(results, f)
diff --git a/mteb/evaluation/MTEB.py b/mteb/evaluation/MTEB.py
index 580931d781..643c859fa7 100644
--- a/mteb/evaluation/MTEB.py
+++ b/mteb/evaluation/MTEB.py
@@ -257,6 +257,9 @@ def run(
logger.info(f"\n\n## Evaluating {len(self.tasks)} tasks:")
self.print_selected_tasks()
evaluation_results = {}
+ original_tasks = (
+ self.tasks.copy()
+ ) # save them in case we re-use the object (e.g. for reranking)
while len(self.tasks) > 0:
task = self.tasks[0]
logger.info(
@@ -333,4 +336,6 @@ def run(
# empty memory
del self.tasks[0]
+ # restore original tasks
+ self.tasks = original_tasks
return evaluation_results
diff --git a/mteb/evaluation/evaluators/InstructionRetrievalEvaluator.py b/mteb/evaluation/evaluators/InstructionRetrievalEvaluator.py
index 55466de814..a1b1d1e582 100644
--- a/mteb/evaluation/evaluators/InstructionRetrievalEvaluator.py
+++ b/mteb/evaluation/evaluators/InstructionRetrievalEvaluator.py
@@ -19,11 +19,17 @@ def __call__(
) -> Dict[str, Dict[str, float]]:
if not self.retriever:
raise ValueError("Model/Technique has not been provided!")
- return self.retriever.search(
- corpus,
- queries,
- self.top_k,
- self.score_function,
- instructions=instructions,
- **kwargs,
- )
+
+ if self.is_cross_encoder:
+ return self.retriever.search_cross_encoder(
+ corpus, queries, self.top_k, instructions=instructions, **kwargs
+ )
+ else:
+ return self.retriever.search(
+ corpus,
+ queries,
+ self.top_k,
+ self.score_function,
+ instructions=instructions,
+ **kwargs,
+ )
diff --git a/mteb/evaluation/evaluators/RetrievalEvaluator.py b/mteb/evaluation/evaluators/RetrievalEvaluator.py
index 28200137f5..c910b02d1e 100644
--- a/mteb/evaluation/evaluators/RetrievalEvaluator.py
+++ b/mteb/evaluation/evaluators/RetrievalEvaluator.py
@@ -1,13 +1,15 @@
from __future__ import annotations
import heapq
+import json
import logging
from collections import defaultdict
from typing import Dict, List, Tuple
import pytrec_eval
import torch
-from sentence_transformers import SentenceTransformer
+import tqdm
+from sentence_transformers import CrossEncoder, SentenceTransformer
from sentence_transformers.models import Transformer, WordEmbeddings
from .Evaluator import Evaluator
@@ -19,7 +21,12 @@
# Adapted from https://github.com/beir-cellar/beir/blob/f062f038c4bfd19a8ca942a9910b1e0d218759d4/beir/retrieval/search/dense/exact_search.py#L12
class DenseRetrievalExactSearch:
def __init__(
- self, model, batch_size: int = 128, corpus_chunk_size: int = 50000, **kwargs
+ self,
+ model,
+ batch_size: int = 128,
+ corpus_chunk_size: int = 50000,
+ previous_results: str = None,
+ **kwargs,
):
# Model is class that provides encode_corpus() and encode_queries()
self.model = model
@@ -30,12 +37,21 @@ def __init__(
"dot": "Dot Product",
}
self.corpus_chunk_size = corpus_chunk_size
+ self.previous_results = previous_results
self.show_progress_bar = kwargs.get("show_progress_bar", True)
self.convert_to_tensor = kwargs.get("convert_to_tensor", True)
self.save_corpus_embeddings = kwargs.get("save_corpus_embeddings", False)
self.corpus_embeddings = defaultdict(list)
self.results = {}
+ if self.previous_results is not None:
+ self.previous_results = self.load_results_file()
+
+ if isinstance(self.model, CrossEncoder):
+ # load the predict instance from the CrossEncoder
+ # custom functions can be used by extending the DenseRetrievalExactSearch class
+ self.predict = self.model.predict
+
def search(
self,
corpus: dict[str, dict[str, str]],
@@ -68,7 +84,6 @@ def search(
)
logger.info("Sorting Corpus by document length (Longest first)...")
-
corpus_ids = sorted(
corpus,
key=lambda k: len(corpus[k].get("title", "") + corpus[k].get("text", "")),
@@ -154,15 +169,105 @@ def search(
return self.results
+ def load_results_file(self):
+ # load the first stage results from file in format {qid: {doc_id: score}}
+ with open(self.previous_results, "r") as f:
+ previous_results = json.load(f)
+ assert isinstance(previous_results, dict)
+ assert isinstance(previous_results[list(previous_results.keys())[0]], dict)
+ return previous_results
+
+ def search_cross_encoder(
+ self,
+ corpus: Dict[str, Dict[str, str]],
+ queries: Dict[str, str],
+ top_k: int,
+ instructions: Dict[str, str] | None = None,
+ **kwargs,
+ ) -> Dict[str, Dict[str, float]]:
+ """This function provides support for reranker (or cross-encoder) models that encoder query and document at the same time (typically with attention).
+ Some notable examples include MonoBERT, MonoT5, RankLlama, etc.
+ Note: you must provide the path to the results to rerank to the __init__ function as `previous_results`
+ """
+ pairs = [] # create the pairs for reranking
+ for qid in queries.keys():
+ q_results = self.previous_results[qid]
+ # take the top-k only
+ q_results_sorted = {
+ k: v
+ for k, v in sorted(
+ q_results.items(), key=lambda item: item[1], reverse=True
+ )
+ }
+ top_n = [k for k, v in list(q_results_sorted.items())[:top_k]]
+ query = queries[qid]
+ for doc_id in top_n:
+ corpus_item = (
+ corpus[doc_id].get("title", "") + " " + corpus[doc_id]["text"]
+ ).strip()
+ pairs.append(
+ (
+ query,
+ corpus_item,
+ instructions[query] if instructions is not None else None,
+ qid,
+ doc_id,
+ )
+ )
+
+ logger.info(f"Reranking the top {top_k} in batches... This might take a while!")
+ itr = range(0, len(pairs), self.batch_size)
+
+ results = {qid: {} for qid in queries.keys()}
+ for batch_num, corpus_start_idx in enumerate(
+ tqdm.tqdm(itr, leave=False, disable=not self.show_progress_bar)
+ ):
+ corpus_end_idx = min(corpus_start_idx + self.batch_size, len(pairs))
+ cur_batch = pairs[corpus_start_idx:corpus_end_idx]
+
+ (
+ queries_in_pair,
+ corpus_in_pair,
+ instructions_in_pair,
+ query_ids,
+ corpus_ids,
+ ) = zip(*cur_batch)
+
+ assert (
+ len(queries_in_pair) == len(corpus_in_pair) == len(instructions_in_pair)
+ )
+
+ if isinstance(self.model, CrossEncoder):
+ # can't take instructions, so add them here
+ queries_in_pair = [
+ f"{q} {i}".strip()
+ for i, q in zip(instructions_in_pair, queries_in_pair)
+ ]
+ scores = self.model.predict(list(zip(queries_in_pair, corpus_in_pair)))
+ else:
+ # may use the instructions in a unique way, so give them also
+ scores = self.model.predict(
+ list(zip(queries_in_pair, corpus_in_pair, instructions_in_pair))
+ )
+
+ for i, score in enumerate(scores):
+ results[query_ids[i]][corpus_ids[i]] = float(score)
+
+ return results
+
+ def predict(self, queries, passages, **kwargs):
+ raise NotImplementedError(
+ "You must implement a predict method for your reranker model"
+ )
+
class DRESModel:
"""Dense Retrieval Exact Search (DRES) requires an encode_queries & encode_corpus method.
This class converts a model with just an .encode method into DRES format.
"""
- def __init__(self, model, sep=" ", **kwargs):
+ def __init__(self, model, **kwargs):
self.model = model
- self.sep = sep
self.use_sbert_model = isinstance(model, SentenceTransformer)
self.save_corpus_embeddings = kwargs.get("save_corpus_embeddings", False)
self.corpus_embeddings = {}
@@ -177,7 +282,20 @@ def encode_queries(self, queries: List[str], batch_size: int, **kwargs):
logger.warning(
"Queries will not be truncated. This could lead to memory issues. In that case please lower the batch_size."
)
- return self.model.encode(queries, batch_size=batch_size, **kwargs)
+ if "instructions" in kwargs:
+ if kwargs["instructions"] is not None:
+ queries = [
+ (query + " " + instruction).strip()
+ for query, instruction in zip(queries, kwargs["instructions"])
+ ]
+ new_kwargs = {
+ k: v for k, v in kwargs.items() if k not in ["instructions", "qid"]
+ }
+ else:
+ # can't just delete, cuz assign by reference on kwargs
+ new_kwargs = kwargs
+
+ return self.model.encode(queries, batch_size=batch_size, **new_kwargs)
def encode_corpus(self, corpus: List[Dict[str, str]], batch_size: int, **kwargs):
if (
@@ -189,24 +307,32 @@ def encode_corpus(self, corpus: List[Dict[str, str]], batch_size: int, **kwargs)
if isinstance(corpus, dict):
sentences = [
- (corpus["title"][i] + self.sep + corpus["text"][i]).strip()
+ (corpus["title"][i] + " " + corpus["text"][i]).strip()
if "title" in corpus
else corpus["text"][i].strip()
for i in range(len(corpus["text"]))
]
else:
sentences = [
- (doc["title"] + self.sep + doc["text"]).strip()
+ (doc["title"] + " " + doc["text"]).strip()
if "title" in doc
else doc["text"].strip()
for doc in corpus
]
+ if "instructions" in kwargs: # not used on the doc side
+ new_kwargs = {
+ k: v for k, v in kwargs.items() if k not in ["instructions", "qid"]
+ }
+ else:
+ # can't just delete, cuz assign by reference on kwargs
+ new_kwargs = kwargs
+
corpus_embeddings = self.model.encode(
- sentences, batch_size=batch_size, **kwargs
+ sentences, batch_size=batch_size, **new_kwargs
)
if self.save_corpus_embeddings and "qid" in kwargs:
- if type(corpus_embeddings) == torch.tensor:
+ if isinstance(corpus_embeddings, torch.tensor):
corpus_embeddings = corpus_embeddings.cpu().detach()
self.corpus_embeddings[kwargs["qid"]] = corpus_embeddings
return corpus_embeddings
@@ -220,6 +346,13 @@ def is_dres_compatible(model):
return True
+def is_cross_encoder_compatible(model):
+ op = getattr(model, "predict", None)
+ if not (callable(op)):
+ return False
+ return True
+
+
# Adapted from https://github.com/beir-cellar/beir/blob/f062f038c4bfd19a8ca942a9910b1e0d218759d4/beir/retrieval/evaluation.py#L9
class RetrievalEvaluator(Evaluator):
def __init__(
@@ -230,7 +363,14 @@ def __init__(
**kwargs,
):
super().__init__(**kwargs)
- if is_dres_compatible(retriever):
+ self.is_cross_encoder = False
+ if is_cross_encoder_compatible(retriever):
+ logger.info(
+ "The custom predict function of the model will be used if not a SentenceTransformer CrossEncoder"
+ )
+ self.retriever = DenseRetrievalExactSearch(retriever, **kwargs)
+ self.is_cross_encoder = True
+ elif is_dres_compatible(retriever):
logger.info(
"The custom encode_queries and encode_corpus functions of the model will be used"
)
@@ -238,38 +378,23 @@ def __init__(
else:
self.retriever = DenseRetrievalExactSearch(DRESModel(retriever), **kwargs)
self.k_values = k_values
- self.top_k = max(k_values)
+ self.top_k = (
+ max(k_values) if "top_k" not in kwargs else kwargs["top_k"]
+ ) # can lower it if reranking
self.score_function = score_function
def __call__(
- self, corpus: dict[str, dict[str, str]], queries: dict[str, str], **kwargs
+ self, corpus: dict[str, dict[str, str]], queries: dict[str, str]
) -> dict[str, dict[str, float]]:
if not self.retriever:
raise ValueError("Model/Technique has not been provided!")
- return self.retriever.search(
- corpus, queries, self.top_k, self.score_function, **kwargs
- )
- def rerank(
- self,
- corpus: dict[str, dict[str, str]],
- queries: dict[str, str],
- results: dict[str, dict[str, float]],
- top_k: int,
- ) -> dict[str, dict[str, float]]:
- new_corpus = {}
-
- for query_id in results:
- if len(results[query_id]) > top_k:
- for doc_id, _ in sorted(
- results[query_id].items(), key=lambda item: item[1], reverse=True
- )[:top_k]:
- new_corpus[doc_id] = corpus[doc_id]
- else:
- for doc_id in results[query_id]:
- new_corpus[doc_id] = corpus[doc_id]
-
- return self.retriever.search(new_corpus, queries, top_k, self.score_function)
+ if self.is_cross_encoder:
+ return self.retriever.search_cross_encoder(corpus, queries, self.top_k)
+ else:
+ return self.retriever.search(
+ corpus, queries, self.top_k, self.score_function
+ )
@staticmethod
def evaluate(
diff --git a/results/cross_encoder__ms-marco-TinyBERT-L-2-v2/NFCorpus.json b/results/cross_encoder__ms-marco-TinyBERT-L-2-v2/NFCorpus.json
new file mode 100644
index 0000000000..8162cfdef7
--- /dev/null
+++ b/results/cross_encoder__ms-marco-TinyBERT-L-2-v2/NFCorpus.json
@@ -0,0 +1,43 @@
+{
+ "dataset_revision": "ec0fa4fe99da2ff19ca1214b7966684033a58814",
+ "mteb_dataset_name": "NFCorpus",
+ "mteb_version": "1.6.34",
+ "test": {
+ "evaluation_time": 0.94,
+ "map_at_1": 0.05123,
+ "map_at_10": 0.09931,
+ "map_at_100": 0.09931,
+ "map_at_1000": 0.09931,
+ "map_at_20": 0.09931,
+ "map_at_3": 0.08578,
+ "map_at_5": 0.09931,
+ "mrr_at_1": 0.44272,
+ "mrr_at_10": 0.52394,
+ "mrr_at_100": 0.52394,
+ "mrr_at_1000": 0.52394,
+ "mrr_at_20": 0.52394,
+ "mrr_at_3": 0.51187,
+ "mrr_at_5": 0.52394,
+ "ndcg_at_1": 0.41641,
+ "ndcg_at_10": 0.25973,
+ "ndcg_at_100": 0.17476,
+ "ndcg_at_1000": 0.17186,
+ "ndcg_at_20": 0.21087,
+ "ndcg_at_3": 0.3807,
+ "ndcg_at_5": 0.35035,
+ "precision_at_1": 0.43653,
+ "precision_at_10": 0.1517,
+ "precision_at_100": 0.01517,
+ "precision_at_1000": 0.00152,
+ "precision_at_20": 0.07585,
+ "precision_at_3": 0.36326,
+ "precision_at_5": 0.30341,
+ "recall_at_1": 0.05123,
+ "recall_at_10": 0.11965,
+ "recall_at_100": 0.11965,
+ "recall_at_1000": 0.11965,
+ "recall_at_20": 0.11965,
+ "recall_at_3": 0.09663,
+ "recall_at_5": 0.11965
+ }
+}
\ No newline at end of file
| diff --git a/tests/test_mteb_rerank.py b/tests/test_mteb_rerank.py
new file mode 100644
index 0000000000..3c664efdfa
--- /dev/null
+++ b/tests/test_mteb_rerank.py
@@ -0,0 +1,391 @@
+from __future__ import annotations
+
+import json
+import logging
+import os
+
+from sentence_transformers import CrossEncoder, SentenceTransformer
+
+from mteb import MTEB
+
+logging.basicConfig(level=logging.INFO)
+
+
+def test_mteb_rerank():
+ # Test that reranking works
+ # unfortunately, we need all the query ids to pretend to have this
+ scifact_keys = [
+ "1",
+ "3",
+ "5",
+ "13",
+ "36",
+ "42",
+ "48",
+ "49",
+ "50",
+ "51",
+ "53",
+ "54",
+ "56",
+ "57",
+ "70",
+ "72",
+ "75",
+ "94",
+ "99",
+ "100",
+ "113",
+ "115",
+ "118",
+ "124",
+ "127",
+ "128",
+ "129",
+ "130",
+ "132",
+ "133",
+ "137",
+ "141",
+ "142",
+ "143",
+ "146",
+ "148",
+ "163",
+ "171",
+ "179",
+ "180",
+ "183",
+ "185",
+ "198",
+ "208",
+ "212",
+ "213",
+ "216",
+ "217",
+ "218",
+ "219",
+ "230",
+ "232",
+ "233",
+ "236",
+ "237",
+ "238",
+ "239",
+ "248",
+ "249",
+ "261",
+ "268",
+ "269",
+ "274",
+ "275",
+ "279",
+ "294",
+ "295",
+ "298",
+ "300",
+ "303",
+ "312",
+ "314",
+ "324",
+ "327",
+ "338",
+ "343",
+ "350",
+ "354",
+ "362",
+ "380",
+ "384",
+ "385",
+ "386",
+ "388",
+ "399",
+ "410",
+ "411",
+ "415",
+ "421",
+ "431",
+ "436",
+ "437",
+ "439",
+ "440",
+ "443",
+ "452",
+ "475",
+ "478",
+ "491",
+ "501",
+ "502",
+ "507",
+ "508",
+ "513",
+ "514",
+ "516",
+ "517",
+ "521",
+ "525",
+ "527",
+ "528",
+ "532",
+ "533",
+ "535",
+ "536",
+ "539",
+ "540",
+ "544",
+ "549",
+ "551",
+ "552",
+ "554",
+ "560",
+ "569",
+ "575",
+ "577",
+ "578",
+ "587",
+ "589",
+ "593",
+ "597",
+ "598",
+ "613",
+ "619",
+ "623",
+ "628",
+ "636",
+ "637",
+ "641",
+ "644",
+ "649",
+ "659",
+ "660",
+ "674",
+ "684",
+ "690",
+ "691",
+ "692",
+ "693",
+ "700",
+ "702",
+ "715",
+ "716",
+ "718",
+ "721",
+ "723",
+ "727",
+ "728",
+ "729",
+ "742",
+ "743",
+ "744",
+ "756",
+ "759",
+ "768",
+ "770",
+ "775",
+ "781",
+ "783",
+ "784",
+ "785",
+ "793",
+ "800",
+ "805",
+ "808",
+ "811",
+ "814",
+ "820",
+ "821",
+ "823",
+ "830",
+ "831",
+ "832",
+ "834",
+ "837",
+ "839",
+ "845",
+ "847",
+ "852",
+ "859",
+ "870",
+ "873",
+ "879",
+ "880",
+ "882",
+ "887",
+ "903",
+ "904",
+ "907",
+ "911",
+ "913",
+ "914",
+ "921",
+ "922",
+ "936",
+ "956",
+ "957",
+ "960",
+ "967",
+ "971",
+ "975",
+ "982",
+ "985",
+ "993",
+ "1012",
+ "1014",
+ "1019",
+ "1020",
+ "1021",
+ "1024",
+ "1029",
+ "1041",
+ "1049",
+ "1062",
+ "1086",
+ "1088",
+ "1089",
+ "1099",
+ "1100",
+ "1104",
+ "1107",
+ "1110",
+ "1121",
+ "1130",
+ "1132",
+ "1137",
+ "1140",
+ "1144",
+ "1146",
+ "1150",
+ "1163",
+ "1175",
+ "1179",
+ "1180",
+ "1185",
+ "1187",
+ "1191",
+ "1194",
+ "1196",
+ "1197",
+ "1199",
+ "1200",
+ "1202",
+ "1204",
+ "1207",
+ "1213",
+ "1216",
+ "1221",
+ "1225",
+ "1226",
+ "1232",
+ "1241",
+ "1245",
+ "1259",
+ "1262",
+ "1266",
+ "1270",
+ "1271",
+ "1272",
+ "1273",
+ "1274",
+ "1278",
+ "1279",
+ "1280",
+ "1281",
+ "1282",
+ "1290",
+ "1292",
+ "1298",
+ "1303",
+ "1316",
+ "1319",
+ "1320",
+ "1332",
+ "1335",
+ "1336",
+ "1337",
+ "1339",
+ "1344",
+ "1352",
+ "1359",
+ "1362",
+ "1363",
+ "1368",
+ "1370",
+ "1379",
+ "1382",
+ "1385",
+ "1389",
+ "1395",
+ ]
+ model = CrossEncoder("cross-encoder/ms-marco-TinyBERT-L-2-v2")
+ eval = MTEB(
+ tasks=[
+ "SciFact",
+ ]
+ )
+ # create fake first stage results
+ with open("tmp.json", "w") as f:
+ f.write(
+ json.dumps(
+ {
+ i: {
+ # just two random documents so we can see it works
+ "4983": 0.1,
+ "18670": 0.9,
+ "19238": 0.01,
+ }
+ for i in scifact_keys
+ }
+ )
+ )
+ eval.run(
+ model,
+ output_folder="tests/results",
+ overwrite_results=True,
+ eval_splits=["test"],
+ top_k=2,
+ previous_results="tmp.json",
+ save_predictions=True,
+ )
+ os.remove("tmp.json")
+
+ # read in the results
+ with open("tests/results/SciFact_predictions.json") as f:
+ results = json.load(f)
+
+ # check that only the top two results are re-orderd
+ assert "19238" not in results["1"]
+ assert "4983" in results["1"]
+ assert "18670" in results["1"]
+
+
+def test_reranker_same_ndcg1():
+ de = SentenceTransformer("average_word_embeddings_komninos")
+ ce = CrossEncoder("cross-encoder/ms-marco-TinyBERT-L-2-v2")
+ eval = MTEB(tasks=["SciFact"])
+ eval.run(
+ de,
+ output_folder="tests/results/stage1",
+ overwrite_results=True,
+ save_predictions=True,
+ eval_splits=["test"],
+ )
+ eval.run(
+ ce,
+ output_folder="tests/results/stage2",
+ overwrite_results=True,
+ previous_results="tests/results/stage1/SciFact_predictions.json",
+ save_predictions=False,
+ eval_splits=["test"],
+ top_k=1, # don't allow it to rerank more than 1 so we can check for top_1 being the same
+ )
+
+ # read in stage 1 and stage two and check ndcg@1 is the same
+ with open("tests/results/stage1/SciFact.json") as f:
+ stage1 = json.load(f)
+
+ with open("tests/results/stage2/SciFact.json") as f:
+ stage2 = json.load(f)
+
+ assert (
+ stage1["test"]["ndcg_at_1"] == stage2["test"]["ndcg_at_1"]
+ ), f"{stage1['test']['ndcg_at_1']} != {stage2['test']['ndcg_at_1']}"
| diff --git a/docs/mmteb/points/457.jsonl b/docs/mmteb/points/457.jsonl
new file mode 100644
index 0000000000..47007bf9d9
--- /dev/null
+++ b/docs/mmteb/points/457.jsonl
@@ -0,0 +1,4 @@
+{"GitHub": "orionw", "Bug fixes": 6}
+{"GitHub": "KennethEnevoldsen", "Review PR": 2}
+{"GitHub": "Muennighoff", "Review PR": 2}
+{"GitHub": "tomaarsen", "Review PR": 2}
\ No newline at end of file
diff --git a/results/cross_encoder__ms-marco-TinyBERT-L-2-v2/NFCorpus.json b/results/cross_encoder__ms-marco-TinyBERT-L-2-v2/NFCorpus.json
new file mode 100644
index 0000000000..8162cfdef7
--- /dev/null
+++ b/results/cross_encoder__ms-marco-TinyBERT-L-2-v2/NFCorpus.json
@@ -0,0 +1,43 @@
+{
+ "dataset_revision": "ec0fa4fe99da2ff19ca1214b7966684033a58814",
+ "mteb_dataset_name": "NFCorpus",
+ "mteb_version": "1.6.34",
+ "test": {
+ "evaluation_time": 0.94,
+ "map_at_1": 0.05123,
+ "map_at_10": 0.09931,
+ "map_at_100": 0.09931,
+ "map_at_1000": 0.09931,
+ "map_at_20": 0.09931,
+ "map_at_3": 0.08578,
+ "map_at_5": 0.09931,
+ "mrr_at_1": 0.44272,
+ "mrr_at_10": 0.52394,
+ "mrr_at_100": 0.52394,
+ "mrr_at_1000": 0.52394,
+ "mrr_at_20": 0.52394,
+ "mrr_at_3": 0.51187,
+ "mrr_at_5": 0.52394,
+ "ndcg_at_1": 0.41641,
+ "ndcg_at_10": 0.25973,
+ "ndcg_at_100": 0.17476,
+ "ndcg_at_1000": 0.17186,
+ "ndcg_at_20": 0.21087,
+ "ndcg_at_3": 0.3807,
+ "ndcg_at_5": 0.35035,
+ "precision_at_1": 0.43653,
+ "precision_at_10": 0.1517,
+ "precision_at_100": 0.01517,
+ "precision_at_1000": 0.00152,
+ "precision_at_20": 0.07585,
+ "precision_at_3": 0.36326,
+ "precision_at_5": 0.30341,
+ "recall_at_1": 0.05123,
+ "recall_at_10": 0.11965,
+ "recall_at_100": 0.11965,
+ "recall_at_1000": 0.11965,
+ "recall_at_20": 0.11965,
+ "recall_at_3": 0.09663,
+ "recall_at_5": 0.11965
+ }
+}
\ No newline at end of file
| [
{
"components": [
{
"doc": "",
"lines": [
172,
178
],
"name": "DenseRetrievalExactSearch.load_results_file",
"signature": "def load_results_file(self):",
"type": "function"
},
{
"doc": "This function provides support f... | [
"tests/test_mteb_rerank.py::test_mteb_rerank",
"tests/test_mteb_rerank.py::test_reranker_same_ndcg1"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add rerankers to mteb, following `CrossEncoder`
See #450 for discussion that led to this (thanks everyone!)
This PR:
- Adds CrossEncoders/Rerankers support to mteb for Retrieval and InstructionRetrieval tasks
- Adds tests for rerankers
- Works with the `CrossEncoders` class from `SentenceTransformers`
Example usage:
```python
from mteb import MTEB
from sentence_transformers import CrossEncoder, SentenceTransformer
model = CrossEncoder("cross-encoder/ms-marco-TinyBERT-L-2-v2")
first_stage_model = SentenceTransformer("all-MiniLM-L6-v2")
for task in ["NFCorpus", "News21InstructionRetrieval"]:
eval_splits = ["dev"] if task == "MSMARCO" else ["test"]
evaluation = MTEB(
tasks=[task], task_langs=["en"]
)
evaluation.run(
model, eval_splits=eval_splits, top_k=10, first_stage=first_stage_model
)
```
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in mteb/evaluation/evaluators/RetrievalEvaluator.py]
(definition of DenseRetrievalExactSearch.load_results_file:)
def load_results_file(self):
(definition of DenseRetrievalExactSearch.search_cross_encoder:)
def search_cross_encoder( self, corpus: Dict[str, Dict[str, str]], queries: Dict[str, str], top_k: int, instructions: Dict[str, str] | None = None, **kwargs, ) -> Dict[str, Dict[str, float]]:
"""This function provides support for reranker (or cross-encoder) models that encoder query and document at the same time (typically with attention).
Some notable examples include MonoBERT, MonoT5, RankLlama, etc.
Note: you must provide the path to the results to rerank to the __init__ function as `previous_results`"""
(definition of DenseRetrievalExactSearch.predict:)
def predict(self, queries, passages, **kwargs):
(definition of is_cross_encoder_compatible:)
def is_cross_encoder_compatible(model):
[end of new definitions in mteb/evaluation/evaluators/RetrievalEvaluator.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | b580b95fc91a7e7e675d27c3ae9a9df64ddad169 | |
tobymao__sqlglot-3331 | 3,331 | tobymao/sqlglot | null | 7f9cb2d2fe2c09e94f9dbaafcc0a808428b5b21c | 2024-04-16T20:07:46Z | diff --git a/sqlglot/dialects/prql.py b/sqlglot/dialects/prql.py
index 3ee91a822c..84475b2011 100644
--- a/sqlglot/dialects/prql.py
+++ b/sqlglot/dialects/prql.py
@@ -55,6 +55,20 @@ class Parser(parser.Parser):
"SORT": lambda self, query: self._parse_order_by(query),
}
+ def _parse_equality(self) -> t.Optional[exp.Expression]:
+ eq = self._parse_tokens(self._parse_comparison, self.EQUALITY)
+ if not isinstance(eq, (exp.EQ, exp.NEQ)):
+ return eq
+
+ # https://prql-lang.org/book/reference/spec/null.html
+ if isinstance(eq.expression, exp.Null):
+ is_exp = exp.Is(this=eq.this, expression=eq.expression)
+ return is_exp if isinstance(eq, exp.EQ) else exp.Not(this=is_exp)
+ if isinstance(eq.this, exp.Null):
+ is_exp = exp.Is(this=eq.expression, expression=eq.this)
+ return is_exp if isinstance(eq, exp.EQ) else exp.Not(this=is_exp)
+ return eq
+
def _parse_statement(self) -> t.Optional[exp.Expression]:
expression = self._parse_expression()
expression = expression if expression else self._parse_query()
| diff --git a/tests/dialects/test_prql.py b/tests/dialects/test_prql.py
index 69e2e287fb..1a0eec25c0 100644
--- a/tests/dialects/test_prql.py
+++ b/tests/dialects/test_prql.py
@@ -58,3 +58,11 @@ def test_prql(self):
self.validate_identity(
"from x intersect y", "SELECT * FROM x INTERSECT ALL SELECT * FROM y"
)
+ self.validate_identity(
+ "from x filter a == null filter null != b",
+ "SELECT * FROM x WHERE a IS NULL AND NOT b IS NULL",
+ )
+ self.validate_identity(
+ "from x filter (a > 1 || null != b || c != null)",
+ "SELECT * FROM x WHERE (a > 1 OR NOT b IS NULL OR NOT c IS NULL)",
+ )
| [] | [
"tests/dialects/test_prql.py::TestPRQL::test_prql"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Feat(prql): handle NULL
https://prql-lang.org/book/reference/spec/null.html
Both `NULL == x` or `x == NULL` is allowed in PRQL.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | ceb42fabad60312699e4b15936aeebac00e22e4d | ||
deepset-ai__haystack-7549 | 7,549 | deepset-ai/haystack | null | 48c7c6ad264778aab693bc271061c65e9619b65d | 2024-04-12T23:01:43Z | diff --git a/haystack/document_stores/in_memory/document_store.py b/haystack/document_stores/in_memory/document_store.py
index 3575b3c93a..31a516df7d 100644
--- a/haystack/document_stores/in_memory/document_store.py
+++ b/haystack/document_stores/in_memory/document_store.py
@@ -1,9 +1,10 @@
+import math
import re
-from typing import Any, Dict, Iterable, List, Literal, Optional
+from collections import Counter
+from dataclasses import dataclass
+from typing import Any, Dict, Iterable, List, Literal, Optional, Tuple
import numpy as np
-from haystack_bm25 import rank_bm25
-from tqdm.auto import tqdm
from haystack import default_from_dict, default_to_dict, logging
from haystack.dataclasses import Document
@@ -24,6 +25,19 @@
DOT_PRODUCT_SCALING_FACTOR = 100
+@dataclass
+class BM25DocumentStats:
+ """
+ A dataclass for managing document statistics for BM25 retrieval.
+
+ :param freq_token: A Counter of token frequencies in the document.
+ :param doc_len: Number of tokens in the document.
+ """
+
+ freq_token: Dict[str, int]
+ doc_len: int
+
+
class InMemoryDocumentStore:
"""
Stores data in-memory. It's ephemeral and cannot be saved to disk.
@@ -50,15 +64,206 @@ def __init__(
To choose the most appropriate function, look for information about your embedding model.
"""
self.storage: Dict[str, Document] = {}
- self._bm25_tokenization_regex = bm25_tokenization_regex
+ self.bm25_tokenization_regex = bm25_tokenization_regex
self.tokenizer = re.compile(bm25_tokenization_regex).findall
- algorithm_class = getattr(rank_bm25, bm25_algorithm)
- if algorithm_class is None:
- raise ValueError(f"BM25 algorithm '{bm25_algorithm}' not found.")
- self.bm25_algorithm = algorithm_class
+
+ self.bm25_algorithm = bm25_algorithm
+ self.bm25_algorithm_inst = self._dispatch_bm25()
self.bm25_parameters = bm25_parameters or {}
self.embedding_similarity_function = embedding_similarity_function
+ # Global BM25 statistics
+ self._avg_doc_len: float = 0.0
+ self._freq_vocab_for_idf: Counter = Counter()
+
+ # Per-document statistics
+ self._bm25_attr: Dict[str, BM25DocumentStats] = {}
+
+ def _dispatch_bm25(self):
+ """
+ Select the correct BM25 algorithm based on user specification.
+
+ :returns:
+ The BM25 algorithm method.
+ """
+ table = {"BM25Okapi": self._score_bm25okapi, "BM25L": self._score_bm25l, "BM25Plus": self._score_bm25plus}
+
+ if self.bm25_algorithm not in table:
+ raise ValueError(f"BM25 algorithm '{self.bm25_algorithm}' is not supported.")
+ return table[self.bm25_algorithm]
+
+ def _tokenize_bm25(self, text: str) -> List[str]:
+ """
+ Tokenize text using the BM25 tokenization regex.
+
+ Here we explicitly create a tokenization method to encapsulate
+ all pre-processing logic used to create BM25 tokens, such as
+ lowercasing. This helps track the exact tokenization process
+ used for BM25 scoring at any given time.
+
+ :param text:
+ The text to tokenize.
+ :returns:
+ A list of tokens.
+ """
+ text = text.lower()
+ return self.tokenizer(text)
+
+ def _score_bm25l(self, query: str, documents: List[Document]) -> List[Tuple[Document, float]]:
+ """
+ Calculate BM25L scores for the given query and filtered documents.
+
+ :param query:
+ The query string.
+ :param documents:
+ The list of documents to score, should be produced by
+ the filter_documents method; may be an empty list.
+ :returns:
+ A list of tuples, each containing a Document and its BM25L score.
+ """
+ k = self.bm25_parameters.get("k1", 1.5)
+ b = self.bm25_parameters.get("b", 0.75)
+ delta = self.bm25_parameters.get("delta", 0.5)
+
+ def _compute_idf(tokens: List[str]) -> Dict[str, float]:
+ """Per-token IDF computation for all tokens."""
+ idf = {}
+ n_corpus = len(self._bm25_attr)
+ for tok in tokens:
+ n = self._freq_vocab_for_idf.get(tok, 0)
+ idf[tok] = math.log((n_corpus + 1.0) / (n + 0.5)) * int(n != 0)
+ return idf
+
+ def _compute_tf(token: str, freq: Dict[str, int], doc_len: int) -> float:
+ """Per-token BM25L computation."""
+ freq_term = freq.get(token, 0.0)
+ ctd = freq_term / (1 - b + b * doc_len / self._avg_doc_len)
+ return (1.0 + k) * (ctd + delta) / (k + ctd + delta)
+
+ idf = _compute_idf(self._tokenize_bm25(query))
+ bm25_attr = {doc.id: self._bm25_attr[doc.id] for doc in documents}
+
+ ret = []
+ for doc in documents:
+ doc_stats = bm25_attr[doc.id]
+ freq = doc_stats.freq_token
+ doc_len = doc_stats.doc_len
+
+ score = 0.0
+ for tok in idf.keys(): # pylint: disable=consider-using-dict-items
+ score += idf[tok] * _compute_tf(tok, freq, doc_len)
+ ret.append((doc, score))
+
+ return ret
+
+ def _score_bm25okapi(self, query: str, documents: List[Document]) -> List[Tuple[Document, float]]:
+ """
+ Calculate BM25Okapi scores for the given query and filtered documents.
+
+ :param query:
+ The query string.
+ :param documents:
+ The list of documents to score, should be produced by
+ the filter_documents method; may be an empty list.
+ :returns:
+ A list of tuples, each containing a Document and its BM25L score.
+ """
+ k = self.bm25_parameters.get("k1", 1.5)
+ b = self.bm25_parameters.get("b", 0.75)
+ epsilon = self.bm25_parameters.get("epsilon", 0.25)
+
+ def _compute_idf(tokens: List[str]) -> Dict[str, float]:
+ """Per-token IDF computation for all tokens."""
+ sum_idf = 0.0
+ neg_idf_tokens = []
+
+ # Although this is a global statistic, we compute it here
+ # to make the computation more self-contained. And the
+ # complexity is O(vocab_size), which is acceptable.
+ idf = {}
+ for tok, n in self._freq_vocab_for_idf.items():
+ idf[tok] = math.log((len(self._bm25_attr) - n + 0.5) / (n + 0.5))
+ sum_idf += idf[tok]
+ if idf[tok] < 0:
+ neg_idf_tokens.append(tok)
+
+ eps = epsilon * sum_idf / len(self._freq_vocab_for_idf)
+ for tok in neg_idf_tokens:
+ idf[tok] = eps
+ return {tok: idf.get(tok, 0.0) for tok in tokens}
+
+ def _compute_tf(token: str, freq: Dict[str, int], doc_len: int) -> float:
+ """Per-token BM25L computation."""
+ freq_term = freq.get(token, 0.0)
+ freq_norm = freq_term + k * (1 - b + b * doc_len / self._avg_doc_len)
+ return freq_term * (1.0 + k) / freq_norm
+
+ idf = _compute_idf(self._tokenize_bm25(query))
+ bm25_attr = {doc.id: self._bm25_attr[doc.id] for doc in documents}
+
+ ret = []
+ for doc in documents:
+ doc_stats = bm25_attr[doc.id]
+ freq = doc_stats.freq_token
+ doc_len = doc_stats.doc_len
+
+ score = 0.0
+ for tok in idf.keys():
+ score += idf[tok] * _compute_tf(tok, freq, doc_len)
+ ret.append((doc, score))
+
+ return ret
+
+ def _score_bm25plus(self, query: str, documents: List[Document]) -> List[Tuple[Document, float]]:
+ """
+ Calculate BM25+ scores for the given query and filtered documents.
+
+ This implementation follows the document on BM25 Wikipedia page,
+ which add 1 (smoothing factor) to document frequency when computing IDF.
+
+ :param query:
+ The query string.
+ :param documents:
+ The list of documents to score, should be produced by
+ the filter_documents method; may be an empty list.
+ :returns:
+ A list of tuples, each containing a Document and its BM25+ score.
+ """
+ k = self.bm25_parameters.get("k1", 1.5)
+ b = self.bm25_parameters.get("b", 0.75)
+ delta = self.bm25_parameters.get("delta", 1.0)
+
+ def _compute_idf(tokens: List[str]) -> Dict[str, float]:
+ """Per-token IDF computation."""
+ idf = {}
+ n_corpus = len(self._bm25_attr)
+ for tok in tokens:
+ n = self._freq_vocab_for_idf.get(tok, 0)
+ idf[tok] = math.log(1 + (n_corpus - n + 0.5) / (n + 0.5)) * int(n != 0)
+ return idf
+
+ def _compute_tf(token: str, freq: Dict[str, int], doc_len: float) -> float:
+ """Per-token normalized term frequency."""
+ freq_term = freq.get(token, 0.0)
+ freq_damp = k * (1 - b + b * doc_len / self._avg_doc_len)
+ return freq_term * (1.0 + k) / (freq_term + freq_damp) + delta
+
+ idf = _compute_idf(self._tokenize_bm25(query))
+ bm25_attr = {doc.id: self._bm25_attr[doc.id] for doc in documents}
+
+ ret = []
+ for doc in documents:
+ doc_stats = bm25_attr[doc.id]
+ freq = doc_stats.freq_token
+ doc_len = doc_stats.doc_len
+
+ score = 0.0
+ for tok in idf.keys(): # pylint: disable=consider-using-dict-items
+ score += idf[tok] * _compute_tf(tok, freq, doc_len)
+ ret.append((doc, score))
+
+ return ret
+
def to_dict(self) -> Dict[str, Any]:
"""
Serializes the component to a dictionary.
@@ -68,8 +273,8 @@ def to_dict(self) -> Dict[str, Any]:
"""
return default_to_dict(
self,
- bm25_tokenization_regex=self._bm25_tokenization_regex,
- bm25_algorithm=self.bm25_algorithm.__name__,
+ bm25_tokenization_regex=self.bm25_tokenization_regex,
+ bm25_algorithm=self.bm25_algorithm,
bm25_parameters=self.bm25_parameters,
embedding_similarity_function=self.embedding_similarity_function,
)
@@ -132,7 +337,36 @@ def write_documents(self, documents: List[Document], policy: DuplicatePolicy = D
logger.warning("ID '{document_id}' already exists", document_id=document.id)
written_documents -= 1
continue
+
+ # Since the statistics are updated in an incremental manner,
+ # we need to explicitly remove the existing document to revert
+ # the statistics before updating them with the new document.
+ if document.id in self.storage.keys():
+ self.delete_documents([document.id])
+
+ # This processing logic is extracted from the original bm25_retrieval method.
+ # Since we are creating index incrementally before the first retrieval,
+ # we need to determine what content to use for indexing here, not at query time.
+ if document.content is not None:
+ if document.dataframe is not None:
+ logger.warning(
+ "Document '{document_id}' has both text and dataframe content. "
+ "Using text content for retrieval and skipping dataframe content.",
+ document_id=document.id,
+ )
+ tokens = self._tokenize_bm25(document.content)
+ elif document.dataframe is not None:
+ str_content = document.dataframe.astype(str)
+ csv_content = str_content.to_csv(index=False)
+ tokens = self._tokenize_bm25(csv_content)
+ else:
+ tokens = []
+
self.storage[document.id] = document
+
+ self._bm25_attr[document.id] = BM25DocumentStats(Counter(tokens), len(tokens))
+ self._freq_vocab_for_idf.update(set(tokens))
+ self._avg_doc_len = (len(tokens) + self._avg_doc_len * len(self._bm25_attr)) / (len(self._bm25_attr) + 1)
return written_documents
def delete_documents(self, document_ids: List[str]) -> None:
@@ -146,6 +380,17 @@ def delete_documents(self, document_ids: List[str]) -> None:
continue
del self.storage[doc_id]
+ # Update statistics accordingly
+ doc_stats = self._bm25_attr.pop(doc_id)
+ freq = doc_stats.freq_token
+ doc_len = doc_stats.doc_len
+
+ self._freq_vocab_for_idf.subtract(Counter(freq.keys()))
+ try:
+ self._avg_doc_len = (self._avg_doc_len * (len(self._bm25_attr) + 1) - doc_len) / len(self._bm25_attr)
+ except ZeroDivisionError:
+ self._avg_doc_len = 0
+
def bm25_retrieval(
self, query: str, filters: Optional[Dict[str, Any]] = None, top_k: int = 10, scale_score: bool = False
) -> List[Document]:
@@ -174,65 +419,33 @@ def bm25_retrieval(
filters = {"operator": "AND", "conditions": [content_type_filter, filters]}
else:
filters = content_type_filter
- all_documents = self.filter_documents(filters=filters)
- # Lowercase all documents
- lower_case_documents = []
- for doc in all_documents:
- if doc.content is None and doc.dataframe is None:
- logger.info(
- "Document '{document_id}' has no text or dataframe content. Skipping it.", document_id=doc.id
- )
- else:
- if doc.content is not None:
- lower_case_documents.append(doc.content.lower())
- if doc.dataframe is not None:
- logger.warning(
- "Document '{document_id}' has both text and dataframe content. "
- "Using text content and skipping dataframe content.",
- document_id=doc.id,
- )
- continue
- if doc.dataframe is not None:
- str_content = doc.dataframe.astype(str)
- csv_content = str_content.to_csv(index=False)
- lower_case_documents.append(csv_content.lower())
-
- # Tokenize the entire content of the DocumentStore
- tokenized_corpus = [
- self.tokenizer(doc) for doc in tqdm(lower_case_documents, unit=" docs", desc="Ranking by BM25...")
- ]
- if len(tokenized_corpus) == 0:
+ all_documents = self.filter_documents(filters=filters)
+ if len(all_documents) == 0:
logger.info("No documents found for BM25 retrieval. Returning empty list.")
return []
- # initialize BM25
- bm25_scorer = self.bm25_algorithm(tokenized_corpus, **self.bm25_parameters)
- # tokenize query
- tokenized_query = self.tokenizer(query.lower())
- # get scores for the query against the corpus
- docs_scores = bm25_scorer.get_scores(tokenized_query)
- if scale_score:
- docs_scores = [expit(float(score / BM25_SCALING_FACTOR)) for score in docs_scores]
- # get the last top_k indexes and reverse them
- top_docs_positions = np.argsort(docs_scores)[-top_k:][::-1]
+ results = sorted(self.bm25_algorithm_inst(query, all_documents), key=lambda x: x[1], reverse=True)[:top_k]
# BM25Okapi can return meaningful negative values, so they should not be filtered out when scale_score is False.
# It's the only algorithm supported by rank_bm25 at the time of writing (2024) that can return negative scores.
# see https://github.com/deepset-ai/haystack/pull/6889 for more context.
- negatives_are_valid = self.bm25_algorithm is rank_bm25.BM25Okapi and not scale_score
+ negatives_are_valid = self.bm25_algorithm == "BM25Okapi" and not scale_score
# Create documents with the BM25 score to return them
return_documents = []
- for i in top_docs_positions:
- doc = all_documents[i]
- score = docs_scores[i]
+ for doc, score in results:
+ if scale_score:
+ score = expit(score / BM25_SCALING_FACTOR)
+
if not negatives_are_valid and score <= 0.0:
continue
+
doc_fields = doc.to_dict()
doc_fields["score"] = score
return_document = Document.from_dict(doc_fields)
return_documents.append(return_document)
+
return return_documents
def embedding_retrieval(
diff --git a/pyproject.toml b/pyproject.toml
index 50e0f5a6ed..e8b65ac534 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -47,7 +47,6 @@ classifiers = [
]
dependencies = [
"pandas",
- "haystack-bm25",
"tqdm",
"tenacity",
"lazy-imports",
diff --git a/releasenotes/notes/enhance-inmemorydocumentstore-bm25-incremental-indexing-ebaf43b7163f3a24.yaml b/releasenotes/notes/enhance-inmemorydocumentstore-bm25-incremental-indexing-ebaf43b7163f3a24.yaml
new file mode 100644
index 0000000000..1c8d3f3800
--- /dev/null
+++ b/releasenotes/notes/enhance-inmemorydocumentstore-bm25-incremental-indexing-ebaf43b7163f3a24.yaml
@@ -0,0 +1,7 @@
+---
+enhancements:
+ - |
+ Re-implement `InMemoryDocumentStore` BM25 search with incremental indexing by avoiding re-creating
+ the entire inverse index for every new query. This change also removes the dependency on
+ `haystack_bm25`. Please refer to [PR #7549](https://github.com/deepset-ai/haystack/pull/7549)
+ for the full context.
| diff --git a/test/document_stores/test_in_memory.py b/test/document_stores/test_in_memory.py
index 3b31c13db3..1d633b98f1 100644
--- a/test/document_stores/test_in_memory.py
+++ b/test/document_stores/test_in_memory.py
@@ -3,7 +3,6 @@
import pandas as pd
import pytest
-from haystack_bm25 import rank_bm25
from haystack import Document
from haystack.document_stores.errors import DocumentStoreError, DuplicateDocumentError
@@ -64,9 +63,13 @@ def test_from_dict(self, mock_regex):
store = InMemoryDocumentStore.from_dict(data)
mock_regex.compile.assert_called_with("custom_regex")
assert store.tokenizer
- assert store.bm25_algorithm.__name__ == "BM25Plus"
+ assert store.bm25_algorithm == "BM25Plus"
assert store.bm25_parameters == {"key": "value"}
+ def test_invalid_bm25_algorithm(self):
+ with pytest.raises(ValueError, match="BM25 algorithm 'invalid' is not supported"):
+ InMemoryDocumentStore(bm25_algorithm="invalid")
+
def test_write_documents(self, document_store):
docs = [Document(id="1")]
assert document_store.write_documents(docs) == 1
@@ -113,7 +116,18 @@ def test_bm25_retrieval_with_different_top_k(self, document_store: InMemoryDocum
results = document_store.bm25_retrieval(query="languages", top_k=3)
assert len(results) == 3
- # Test two queries and make sure the results are different
+ def test_bm25_plus_retrieval(self):
+ doc_store = InMemoryDocumentStore(bm25_algorithm="BM25Plus")
+ docs = [
+ Document(content="Hello world"),
+ Document(content="Haystack supports multiple languages"),
+ Document(content="Python is a popular programming language"),
+ ]
+ doc_store.write_documents(docs)
+
+ results = doc_store.bm25_retrieval(query="language", top_k=1)
+ assert len(results) == 1
+ assert results[0].content == "Python is a popular programming language"
def test_bm25_retrieval_with_two_queries(self, document_store: InMemoryDocumentStore):
# Tests if the bm25_retrieval method returns different documents for different queries.
@@ -166,7 +180,7 @@ def test_bm25_retrieval_with_scale_score(self, document_store: InMemoryDocumentS
results = document_store.bm25_retrieval(query="Python", top_k=1, scale_score=False)
assert results[0].score != results1[0].score
- def test_bm25_retrieval_with_non_scaled_BM25Okapi(self, document_store: InMemoryDocumentStore):
+ def test_bm25_retrieval_with_non_scaled_BM25Okapi(self):
# Highly repetitive documents make BM25Okapi return negative scores, which should not be filtered if the
# scores are not scaled
docs = [
@@ -188,9 +202,9 @@ def test_bm25_retrieval_with_non_scaled_BM25Okapi(self, document_store: InMemory
to try the new features as soon as they are merged."""
),
]
+ document_store = InMemoryDocumentStore(bm25_algorithm="BM25Okapi")
document_store.write_documents(docs)
- document_store.bm25_algorithm = rank_bm25.BM25Okapi
results1 = document_store.bm25_retrieval(query="Haystack installation", top_k=10, scale_score=False)
assert len(results1) == 3
assert all(res.score < 0.0 for res in results1)
@@ -215,11 +229,11 @@ def test_bm25_retrieval_with_text_and_table_content(self, document_store: InMemo
table_content = pd.DataFrame({"language": ["Python", "Java"], "use": ["Data Science", "Web Development"]})
document = Document(content="Gardening", dataframe=table_content)
docs = [
- document,
Document(content="Python"),
Document(content="Bird Watching"),
Document(content="Gardening"),
Document(content="Java"),
+ document,
]
document_store.write_documents(docs)
results = document_store.bm25_retrieval(query="Gardening", top_k=2)
| diff --git a/pyproject.toml b/pyproject.toml
index 50e0f5a6ed..e8b65ac534 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -47,7 +47,6 @@ classifiers = [
]
dependencies = [
"pandas",
- "haystack-bm25",
"tqdm",
"tenacity",
"lazy-imports",
diff --git a/releasenotes/notes/enhance-inmemorydocumentstore-bm25-incremental-indexing-ebaf43b7163f3a24.yaml b/releasenotes/notes/enhance-inmemorydocumentstore-bm25-incremental-indexing-ebaf43b7163f3a24.yaml
new file mode 100644
index 0000000000..1c8d3f3800
--- /dev/null
+++ b/releasenotes/notes/enhance-inmemorydocumentstore-bm25-incremental-indexing-ebaf43b7163f3a24.yaml
@@ -0,0 +1,7 @@
+---
+enhancements:
+ - |
+ Re-implement `InMemoryDocumentStore` BM25 search with incremental indexing by avoiding re-creating
+ the entire inverse index for every new query. This change also removes the dependency on
+ `haystack_bm25`. Please refer to [PR #7549](https://github.com/deepset-ai/haystack/pull/7549)
+ for the full context.
| [
{
"components": [
{
"doc": "A dataclass for managing document statistics for BM25 retrieval.\n\n:param freq_token: A Counter of token frequencies in the document.\n:param doc_len: Number of tokens in the document.",
"lines": [
29,
38
],
"name": "BM25Docu... | [
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_from_dict",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_invalid_bm25_algorithm",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_bm25_retrieval_with_text_and_table_content"
] | [
"[",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_no_filters",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_equal",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_equal_with_dataframe",
"test/document_stores/te... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
perf: enhanced `InMemoryDocumentStore` BM25 query efficiency with incremental indexing
### Related Issues
This proposal was first made as a stand-alone Haystack document store integration, which is linked to [issue number 218](https://github.com/deepset-ai/haystack-integrations/issues/218) in [haystack-integrations repo](https://github.com/deepset-ai/haystack-integrations).
### Proposed Changes:
Instead of reindexing with every new query, I choose to perform incremental indexing on document changes. This results in modifications primarily to `write_documents`, `delet_documents`, and `bm25_retrieval`.
<!--- In case of a bug: Describe what caused the issue and how you solved it -->
<!--- In case of a feature: Describe what did you add and how it works -->
### How did you test it?
As suggested by @julian-risch, the change should be non-breaking. Therefore, the test was performed with test cases implemented in `test/document_stores/test_in_memory.py`. 81 test cases passed and 3 cases failed with explainable causes:
- `TestMemoryDocumentStore::test_from_dict`: `self.bm25_algorithm` now points to the string literal of the algorithm name, instead of a `BM25` object. So, it does not have the `.__name__` attribute.
- `TestMemoryDocumentStore::test_bm25_retrieval_with_non_scaled_BM25Okapi`: this is caused by the pytest fixture initializing a BM25L document store and the test case modified the underlying algorithm not from initializer, making the underlying algorithm being BM25L instead of Okapi BM25. Changing the initialized algorithm will result in a pass.
- `TestMemoryDocumentStore::test_bm25_retrieval_with_text_and_table_content`: the non-matching documents have tied scores. The test case got a "lucky pass" because NumPy quick-sort alters the document orders even when the scores are the same.
<!-- unit tests, integration tests, manual verification, instructions for manual tests -->
### Notes for the reviewer
Any suggestion is appreciated `:)`
<!-- E.g. point out section where the reviewer -->
### Checklist
- I have read the [contributors guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md) and the [code of conduct](https://github.com/deepset-ai/haystack/blob/main/code_of_conduct.txt)
- I have updated the related issue with new insights and changes
- I added unit tests and updated the docstrings
- I've used one of the [conventional commit types](https://www.conventionalcommits.org/en/v1.0.0/) for my PR title: `fix:`, `feat:`, `build:`, `chore:`, `ci:`, `docs:`, `style:`, `refactor:`, `perf:`, `test:`.
- I documented my code
- I ran [pre-commit hooks](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md#installation) and fixed any issue
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/document_stores/in_memory/document_store.py]
(definition of BM25DocumentStats:)
class BM25DocumentStats:
"""A dataclass for managing document statistics for BM25 retrieval.
:param freq_token: A Counter of token frequencies in the document.
:param doc_len: Number of tokens in the document."""
(definition of InMemoryDocumentStore._dispatch_bm25:)
def _dispatch_bm25(self):
"""Select the correct BM25 algorithm based on user specification.
:returns:
The BM25 algorithm method."""
(definition of InMemoryDocumentStore._tokenize_bm25:)
def _tokenize_bm25(self, text: str) -> List[str]:
"""Tokenize text using the BM25 tokenization regex.
Here we explicitly create a tokenization method to encapsulate
all pre-processing logic used to create BM25 tokens, such as
lowercasing. This helps track the exact tokenization process
used for BM25 scoring at any given time.
:param text:
The text to tokenize.
:returns:
A list of tokens."""
(definition of InMemoryDocumentStore._score_bm25l:)
def _score_bm25l(self, query: str, documents: List[Document]) -> List[Tuple[Document, float]]:
"""Calculate BM25L scores for the given query and filtered documents.
:param query:
The query string.
:param documents:
The list of documents to score, should be produced by
the filter_documents method; may be an empty list.
:returns:
A list of tuples, each containing a Document and its BM25L score."""
(definition of InMemoryDocumentStore._score_bm25l._compute_idf:)
def _compute_idf(tokens: List[str]) -> Dict[str, float]:
"""Per-token IDF computation for all tokens."""
(definition of InMemoryDocumentStore._score_bm25l._compute_tf:)
def _compute_tf(token: str, freq: Dict[str, int], doc_len: int) -> float:
"""Per-token BM25L computation."""
(definition of InMemoryDocumentStore._score_bm25okapi:)
def _score_bm25okapi(self, query: str, documents: List[Document]) -> List[Tuple[Document, float]]:
"""Calculate BM25Okapi scores for the given query and filtered documents.
:param query:
The query string.
:param documents:
The list of documents to score, should be produced by
the filter_documents method; may be an empty list.
:returns:
A list of tuples, each containing a Document and its BM25L score."""
(definition of InMemoryDocumentStore._score_bm25okapi._compute_idf:)
def _compute_idf(tokens: List[str]) -> Dict[str, float]:
"""Per-token IDF computation for all tokens."""
(definition of InMemoryDocumentStore._score_bm25okapi._compute_tf:)
def _compute_tf(token: str, freq: Dict[str, int], doc_len: int) -> float:
"""Per-token BM25L computation."""
(definition of InMemoryDocumentStore._score_bm25plus:)
def _score_bm25plus(self, query: str, documents: List[Document]) -> List[Tuple[Document, float]]:
"""Calculate BM25+ scores for the given query and filtered documents.
This implementation follows the document on BM25 Wikipedia page,
which add 1 (smoothing factor) to document frequency when computing IDF.
:param query:
The query string.
:param documents:
The list of documents to score, should be produced by
the filter_documents method; may be an empty list.
:returns:
A list of tuples, each containing a Document and its BM25+ score."""
(definition of InMemoryDocumentStore._score_bm25plus._compute_idf:)
def _compute_idf(tokens: List[str]) -> Dict[str, float]:
"""Per-token IDF computation."""
(definition of InMemoryDocumentStore._score_bm25plus._compute_tf:)
def _compute_tf(token: str, freq: Dict[str, int], doc_len: float) -> float:
"""Per-token normalized term frequency."""
[end of new definitions in haystack/document_stores/in_memory/document_store.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 | |
tobymao__sqlglot-3303 | 3,303 | tobymao/sqlglot | null | 3c97d3437ea573fd3764eab05ed619353fced580 | 2024-04-11T16:46:48Z | diff --git a/sqlglot/dataframe/sql/functions.py b/sqlglot/dataframe/sql/functions.py
index b4dd2c6d9a..81b7d61ccd 100644
--- a/sqlglot/dataframe/sql/functions.py
+++ b/sqlglot/dataframe/sql/functions.py
@@ -536,7 +536,7 @@ def year(col: ColumnOrName) -> Column:
def quarter(col: ColumnOrName) -> Column:
- return Column.invoke_anonymous_function(col, "QUARTER")
+ return Column.invoke_expression_over_column(col, expression.Quarter)
def month(col: ColumnOrName) -> Column:
diff --git a/sqlglot/dialects/teradata.py b/sqlglot/dialects/teradata.py
index 5de56d41e7..ce03ae436b 100644
--- a/sqlglot/dialects/teradata.py
+++ b/sqlglot/dialects/teradata.py
@@ -36,6 +36,10 @@ def func(self: Teradata.Generator, expression: exp.DateAdd | exp.DateSub) -> str
return func
+def _quarter_sql(self: Teradata.Generator, expression: exp.Quarter) -> str:
+ return self.sql(exp.Extract(this="QUARTER", expression=expression.this))
+
+
class Teradata(Dialect):
SUPPORTS_SEMI_ANTI_JOIN = False
TYPED_DIVISION = True
@@ -241,6 +245,7 @@ class Generator(generator.Generator):
exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
exp.DateAdd: _date_add_sql("+"),
exp.DateSub: _date_add_sql("-"),
+ exp.Quarter: _quarter_sql,
}
def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 5efa43a196..0c7335168a 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -5486,6 +5486,10 @@ class ApproxQuantile(Quantile):
arg_types = {"this": True, "quantile": True, "accuracy": False, "weight": False}
+class Quarter(Func):
+ pass
+
+
class Rand(Func):
_sql_names = ["RAND", "RANDOM"]
arg_types = {"this": False}
diff --git a/sqlglot/optimizer/annotate_types.py b/sqlglot/optimizer/annotate_types.py
index c85ef1c512..c105341202 100644
--- a/sqlglot/optimizer/annotate_types.py
+++ b/sqlglot/optimizer/annotate_types.py
@@ -212,6 +212,7 @@ class TypeAnnotator(metaclass=_TypeAnnotator):
exp.Month,
exp.Week,
exp.Year,
+ exp.Quarter,
},
exp.DataType.Type.VARCHAR: {
exp.ArrayConcat,
| diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index a16bd993d3..b652541f7c 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -66,6 +66,7 @@ def test_snowflake(self):
self.validate_identity("SELECT DAYOFYEAR(CURRENT_TIMESTAMP())")
self.validate_identity("LISTAGG(data['some_field'], ',')")
self.validate_identity("WEEKOFYEAR(tstamp)")
+ self.validate_identity("SELECT QUARTER(CURRENT_TIMESTAMP())")
self.validate_identity("SELECT SUM(amount) FROM mytable GROUP BY ALL")
self.validate_identity("WITH x AS (SELECT 1 AS foo) SELECT foo FROM IDENTIFIER('x')")
self.validate_identity("WITH x AS (SELECT 1 AS foo) SELECT IDENTIFIER('foo') FROM x")
diff --git a/tests/dialects/test_teradata.py b/tests/dialects/test_teradata.py
index 0635d15d1f..010b683c5a 100644
--- a/tests/dialects/test_teradata.py
+++ b/tests/dialects/test_teradata.py
@@ -292,3 +292,10 @@ def test_time(self):
"bigquery": "EXTRACT(MONTH FROM x)",
},
)
+ self.validate_all(
+ "CAST(TO_CHAR(x, 'Q') AS INT)",
+ read={
+ "snowflake": "quarter(x)",
+ "teradata": "CAST(TO_CHAR(x, 'Q') AS INT)",
+ },
+ )
diff --git a/tests/test_expressions.py b/tests/test_expressions.py
index 4f57299041..85560b8ddb 100644
--- a/tests/test_expressions.py
+++ b/tests/test_expressions.py
@@ -634,6 +634,7 @@ def test_functions(self):
self.assertIsInstance(parse_one("MAX(a)"), exp.Max)
self.assertIsInstance(parse_one("MIN(a)"), exp.Min)
self.assertIsInstance(parse_one("MONTH(a)"), exp.Month)
+ self.assertIsInstance(parse_one("QUARTER(a)"), exp.Quarter)
self.assertIsInstance(parse_one("POSITION(' ' IN a)"), exp.StrPosition)
self.assertIsInstance(parse_one("POW(a, 2)"), exp.Pow)
self.assertIsInstance(parse_one("POWER(a, 2)"), exp.Pow)
| [] | [
"tests/dialects/test_teradata.py::TestTeradata::test_time",
"tests/test_expressions.py::TestExpressions::test_functions"
] | [
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten",
"tests/dialects/test_snowflake.py::TestSnowflake::test_historical_data",
"tests/dialects/test_snowflake.py::Te... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Feat(teradata): handle transpile of quarter function
Add a `QUARTER` expression type. I tried to base this off of the places where `exp.Month` is used but may have missed something
Teradata doesn't have a `QUARTER` function to extract the quarter from a date, so transform it to an `EXTRACT(QUARTER ...)` expression. Teradata also doesn't support `EXTRACT(QUARTER ...)`, but this is already handled in `extract_sql`.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | ceb42fabad60312699e4b15936aeebac00e22e4d | ||
tobymao__sqlglot-3299 | 3,299 | tobymao/sqlglot | null | b28cd89823a38f3a90c57344a44719364d66d723 | 2024-04-11T00:58:44Z | diff --git a/sqlglot/dialects/prql.py b/sqlglot/dialects/prql.py
index afc7a4e4ee..3ee91a822c 100644
--- a/sqlglot/dialects/prql.py
+++ b/sqlglot/dialects/prql.py
@@ -104,6 +104,17 @@ def _parse_take(self, query: exp.Query) -> t.Optional[exp.Query]:
num = self._parse_number() # TODO: TAKE for ranges a..b
return query.limit(num) if num else None
+ def _parse_ordered(
+ self, parse_method: t.Optional[t.Callable] = None
+ ) -> t.Optional[exp.Ordered]:
+ asc = self._match(TokenType.PLUS)
+ desc = self._match(TokenType.DASH) or (asc and False)
+ term = term = super()._parse_ordered(parse_method=parse_method)
+ if term and desc:
+ term.set("desc", True)
+ term.set("nulls_first", False)
+ return term
+
def _parse_order_by(self, query: exp.Select) -> t.Optional[exp.Query]:
l_brace = self._match(TokenType.L_BRACE)
expressions = self._parse_csv(self._parse_ordered)
| diff --git a/tests/dialects/test_prql.py b/tests/dialects/test_prql.py
index b193784318..69e2e287fb 100644
--- a/tests/dialects/test_prql.py
+++ b/tests/dialects/test_prql.py
@@ -5,48 +5,56 @@ class TestPRQL(Validator):
dialect = "prql"
def test_prql(self):
- self.validate_identity("FROM x", "SELECT * FROM x")
- self.validate_identity("FROM x DERIVE a + 1", "SELECT *, a + 1 FROM x")
- self.validate_identity("FROM x DERIVE x = a + 1", "SELECT *, a + 1 AS x FROM x")
- self.validate_identity("FROM x DERIVE {a + 1}", "SELECT *, a + 1 FROM x")
- self.validate_identity("FROM x DERIVE {x = a + 1, b}", "SELECT *, a + 1 AS x, b FROM x")
+ self.validate_identity("from x", "SELECT * FROM x")
+ self.validate_identity("from x derive a + 1", "SELECT *, a + 1 FROM x")
+ self.validate_identity("from x derive x = a + 1", "SELECT *, a + 1 AS x FROM x")
+ self.validate_identity("from x derive {a + 1}", "SELECT *, a + 1 FROM x")
+ self.validate_identity("from x derive {x = a + 1, b}", "SELECT *, a + 1 AS x, b FROM x")
self.validate_identity(
- "FROM x DERIVE {x = a + 1, b} SELECT {y = x, 2}", "SELECT a + 1 AS y, 2 FROM x"
+ "from x derive {x = a + 1, b} select {y = x, 2}", "SELECT a + 1 AS y, 2 FROM x"
)
- self.validate_identity("FROM x TAKE 10", "SELECT * FROM x LIMIT 10")
- self.validate_identity("FROM x TAKE 10 TAKE 5", "SELECT * FROM x LIMIT 5")
- self.validate_identity("FROM x FILTER age > 25", "SELECT * FROM x WHERE age > 25")
+ self.validate_identity("from x take 10", "SELECT * FROM x LIMIT 10")
+ self.validate_identity("from x take 10 take 5", "SELECT * FROM x LIMIT 5")
+ self.validate_identity("from x filter age > 25", "SELECT * FROM x WHERE age > 25")
self.validate_identity(
- "FROM x DERIVE {x = a + 1, b} FILTER age > 25",
+ "from x derive {x = a + 1, b} filter age > 25",
"SELECT *, a + 1 AS x, b FROM x WHERE age > 25",
)
- self.validate_identity("FROM x FILTER dept != 'IT'", "SELECT * FROM x WHERE dept <> 'IT'")
+ self.validate_identity("from x filter dept != 'IT'", "SELECT * FROM x WHERE dept <> 'IT'")
self.validate_identity(
- "FROM x FILTER p == 'product' SELECT { a, b }", "SELECT a, b FROM x WHERE p = 'product'"
+ "from x filter p == 'product' select { a, b }", "SELECT a, b FROM x WHERE p = 'product'"
)
self.validate_identity(
- "FROM x FILTER age > 25 FILTER age < 27", "SELECT * FROM x WHERE age > 25 AND age < 27"
+ "from x filter age > 25 filter age < 27", "SELECT * FROM x WHERE age > 25 AND age < 27"
)
self.validate_identity(
- "FROM x FILTER (age > 25 && age < 27)", "SELECT * FROM x WHERE (age > 25 AND age < 27)"
+ "from x filter (age > 25 && age < 27)", "SELECT * FROM x WHERE (age > 25 AND age < 27)"
)
self.validate_identity(
- "FROM x FILTER (age > 25 || age < 27)", "SELECT * FROM x WHERE (age > 25 OR age < 27)"
+ "from x filter (age > 25 || age < 27)", "SELECT * FROM x WHERE (age > 25 OR age < 27)"
)
self.validate_identity(
- "FROM x FILTER (age > 25 || age < 22) FILTER age > 26 FILTER age < 27",
+ "from x filter (age > 25 || age < 22) filter age > 26 filter age < 27",
"SELECT * FROM x WHERE ((age > 25 OR age < 22) AND age > 26) AND age < 27",
)
self.validate_identity(
- "FROM x SORT age",
+ "from x sort age",
"SELECT * FROM x ORDER BY age",
)
self.validate_identity(
- "FROM x SORT {age, name}",
+ "from x sort {-age}",
+ "SELECT * FROM x ORDER BY age DESC",
+ )
+ self.validate_identity(
+ "from x sort {age, name}",
"SELECT * FROM x ORDER BY age, name",
)
- self.validate_identity("FROM x APPEND y", "SELECT * FROM x UNION ALL SELECT * FROM y")
- self.validate_identity("FROM x REMOVE y", "SELECT * FROM x EXCEPT ALL SELECT * FROM y")
self.validate_identity(
- "FROM x INTERSECT y", "SELECT * FROM x INTERSECT ALL SELECT * FROM y"
+ "from x sort {-age, +name}",
+ "SELECT * FROM x ORDER BY age DESC, name",
+ )
+ self.validate_identity("from x append y", "SELECT * FROM x UNION ALL SELECT * FROM y")
+ self.validate_identity("from x remove y", "SELECT * FROM x EXCEPT ALL SELECT * FROM y")
+ self.validate_identity(
+ "from x intersect y", "SELECT * FROM x INTERSECT ALL SELECT * FROM y"
)
| [] | [
"tests/dialects/test_prql.py::TestPRQL::test_prql"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Feat(prql): Handle DESC with sort
{}
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | ceb42fabad60312699e4b15936aeebac00e22e4d | ||
conan-io__conan-16054 | 16,054 | conan-io/conan | null | e7debe9671cb271a91353019987c7e5fde6a3364 | 2024-04-09T21:43:30Z | diff --git a/conan/cps/__init__.py b/conan/cps/__init__.py
new file mode 100644
index 00000000000..d2efed3aefe
--- /dev/null
+++ b/conan/cps/__init__.py
@@ -0,0 +1,1 @@
+from conan.cps.cps import CPS
diff --git a/conan/cps/cps.py b/conan/cps/cps.py
new file mode 100644
index 00000000000..cec264e6543
--- /dev/null
+++ b/conan/cps/cps.py
@@ -0,0 +1,302 @@
+import glob
+import json
+import os
+from enum import Enum
+
+from conan.api.output import ConanOutput
+from conans.model.build_info import CppInfo
+from conans.model.pkg_type import PackageType
+from conans.util.files import save, load
+
+
+class CPSComponentType(Enum):
+ DYLIB = "dylib"
+ ARCHIVE = "archive"
+ INTERFACE = "interface"
+ EXE = "exe"
+ JAR = "jar"
+ UNKNOWN = "unknown"
+
+ def __str__(self):
+ return self.value
+
+ def __eq__(self, other):
+ # This is useful for comparing with string type at user code, like ``type == "xxx"``
+ return super().__eq__(CPSComponentType(other))
+
+ @staticmethod
+ def from_conan(pkg_type):
+ _package_type_map = {
+ "shared-library": "dylib",
+ "static-library": "archive",
+ "header-library": "interface",
+ "application": "executable"
+ }
+ return CPSComponentType(_package_type_map.get(str(pkg_type), "unknown"))
+
+
+def deduce_full_lib_info(libname, full_lib, cpp_info, pkg_type):
+ if full_lib.get("type") is not None:
+ assert "location" in full_lib, f"If 'type' is specified in library {libname}, 'location' too"
+ return
+
+ # Recipe didn't specify things, need to auto deduce
+ libdirs = [x.replace("\\", "/") for x in cpp_info.libdirs]
+ bindirs = [x.replace("\\", "/") for x in cpp_info.bindirs]
+
+ static_patterns = [f"{libname}.lib", f"{libname}.a", f"lib{libname}.a"]
+ shared_patterns = [f"lib{libname}.so", f"lib{libname}.so.*", f"lib{libname}.dylib",
+ f"lib{libname}.*dylib"]
+ dll_patterns = [f"{libname}.dll"]
+
+ def _find_matching(patterns, dirs):
+ matches = set()
+ for pattern in patterns:
+ for d in dirs:
+ matches.update(glob.glob(f"{d}/{pattern}"))
+ if len(matches) == 1:
+ return next(iter(matches))
+
+ static_location = _find_matching(static_patterns, libdirs)
+ shared_location = _find_matching(shared_patterns, libdirs)
+ dll_location = _find_matching(dll_patterns, bindirs)
+ if static_location:
+ if shared_location:
+ ConanOutput().warning(f"Lib {libname} has both static {static_location} and "
+ f"shared {shared_location} in the same package")
+ if pkg_type is PackageType.STATIC:
+ full_lib["location"] = static_location
+ full_lib["type"] = PackageType.STATIC
+ else:
+ full_lib["location"] = shared_location
+ full_lib["type"] = PackageType.SHARED
+ elif dll_location:
+ full_lib["location"] = dll_location
+ full_lib["link_location"] = static_location
+ full_lib["type"] = PackageType.SHARED
+ else:
+ full_lib["location"] = static_location
+ full_lib["type"] = PackageType.STATIC
+ elif shared_location:
+ full_lib["location"] = shared_location
+ full_lib["type"] = PackageType.SHARED
+ elif dll_location:
+ # Only .dll but no link library
+ full_lib["location"] = dll_location
+ full_lib["type"] = PackageType.SHARED
+ if full_lib.get("type") != pkg_type:
+ ConanOutput().warning(f"Lib {libname} deduced as '{full_lib.get('type')}, "
+ f"but 'package_type={pkg_type}'")
+
+
+class CPSComponent:
+ def __init__(self, component_type=None):
+ self.includes = []
+ self.type = component_type or "unknown"
+ self.definitions = []
+ self.requires = []
+ self.location = None
+ self.link_location = None
+ self.link_libraries = None # system libraries
+
+ def serialize(self):
+ component = {"type": str(self.type)}
+ if self.requires:
+ component["requires"] = self.requires
+ if self.includes:
+ component["includes"] = [x.replace("\\", "/") for x in self.includes]
+ if self.definitions:
+ component["definitions"] = self.definitions
+ if self.location: # TODO: @prefix@
+ component["location"] = self.location
+ if self.link_location:
+ component["link_location"] = self.link_location
+ if self.link_libraries:
+ component["link_libraries"] = self.link_libraries
+ return component
+
+ @staticmethod
+ def deserialize(data):
+ comp = CPSComponent()
+ comp.type = CPSComponentType(data.get("type"))
+ comp.requires = data.get("requires")
+ comp.includes = data.get("includes")
+ comp.definitions = data.get("definitions")
+ comp.location = data.get("location")
+ comp.link_location = data.get("link_location")
+ comp.link_libraries = data.get("link_libraries")
+ return comp
+
+ @staticmethod
+ def from_cpp_info(cpp_info, pkg_type, libname=None):
+ cps_comp = CPSComponent()
+ if not libname:
+ cps_comp.definitions = cpp_info.defines
+ cps_comp.includes = [x.replace("\\", "/") for x in cpp_info.includedirs]
+
+ if not cpp_info.libs:
+ cps_comp.type = CPSComponentType.INTERFACE
+ return cps_comp
+
+ if len(cpp_info.libs) > 1 and not libname: # Multi-lib pkg without components defined
+ cps_comp.type = CPSComponentType.INTERFACE
+ return cps_comp
+
+ libname = libname or next(iter(cpp_info.full_libs))
+ full_lib = cpp_info.full_libs[libname]
+ deduce_full_lib_info(libname, full_lib, cpp_info, pkg_type)
+ cps_comp.type = CPSComponentType.from_conan(full_lib.get("type"))
+ cps_comp.location = full_lib.get("location")
+ cps_comp.link_location = full_lib.get("link_location")
+ cps_comp.link_libraries = cpp_info.system_libs
+ required = cpp_info.requires
+ cps_comp.requires = [f":{c}" if "::" not in c else c.replace("::", ":") for c in required]
+ return cps_comp
+
+
+class CPS:
+ """ represents the CPS file for 1 package
+ """
+ def __init__(self, name=None, version=None):
+ self.name = name
+ self.version = version
+ self.default_components = []
+ self.components = {}
+ self.configurations = []
+ self.requires = []
+ # Supplemental
+ self.description = None
+ self.license = None
+ self.website = None
+
+ def serialize(self):
+ cps = {"cps_version": "0.12.0",
+ "name": self.name,
+ "version": self.version}
+
+ # Supplemental
+ for data in "license", "description", "website":
+ if getattr(self, data, None):
+ cps[data] = getattr(self, data)
+
+ if self.requires:
+ cps["requires"] = self.requires
+
+ if self.configurations:
+ cps["configurations"] = self.configurations
+
+ cps["default_components"] = self.default_components
+ cps["components"] = {}
+ for name, comp in self.components.items():
+ cps["components"][name] = comp.serialize()
+
+ return cps
+
+ @staticmethod
+ def deserialize(data):
+ cps = CPS()
+ cps.name = data.get("name")
+ cps.version = data.get("version")
+ cps.license = data.get("license")
+ cps.description = data.get("description")
+ cps.website = data.get("website")
+ cps.requires = data.get("requires")
+ cps.configurations = data.get("configurations")
+ cps.default_components = data.get("default_components")
+ cps.components = {k: CPSComponent.deserialize(v)
+ for k, v in data.get("components", {}).items()}
+ return cps
+
+ @staticmethod
+ def from_conan(dep):
+ cps = CPS(dep.ref.name, str(dep.ref.version))
+ # supplemental
+ cps.license = dep.license
+ cps.description = dep.description
+ cps.website = dep.homepage
+
+ cps.requires = {d.ref.name: None for d in dep.dependencies.host.values()}
+ if dep.settings.get_safe("build_type"):
+ cps.configurations = [str(dep.settings.build_type).lower()]
+
+ if not dep.cpp_info.has_components:
+ if dep.cpp_info.libs and len(dep.cpp_info.libs) > 1:
+ comp = CPSComponent.from_cpp_info(dep.cpp_info, dep.package_type) # base
+ base_name = f"_{cps.name}"
+ cps.components[base_name] = comp
+ for lib in dep.cpp_info.libs:
+ comp = CPSComponent.from_cpp_info(dep.cpp_info, dep.package_type, lib)
+ comp.requires.insert(0, f":{base_name}") # dep to the common one
+ cps.components[lib] = comp
+ cps.default_components = dep.cpp_info.libs
+ # FIXME: What if one lib is named equal to the package?
+ else:
+ # single component, called same as library
+ component = CPSComponent.from_cpp_info(dep.cpp_info, dep.package_type)
+ if not component.requires and dep.dependencies:
+ for transitive_dep in dep.dependencies.host.items():
+ dep_name = transitive_dep[0].ref.name
+ component.requires.append(f"{dep_name}:{dep_name}")
+
+ # the component will be just the package name
+ cps.default_components = [f"{dep.ref.name}"]
+ cps.components[f"{dep.ref.name}"] = component
+ else:
+ sorted_comps = dep.cpp_info.get_sorted_components()
+ for comp_name, comp in sorted_comps.items():
+ component = CPSComponent.from_cpp_info(comp, dep.package_type)
+ cps.components[comp_name] = component
+ # Now by default all are default_components
+ cps.default_components = [comp_name for comp_name in sorted_comps]
+
+ return cps
+
+ def to_conan(self):
+ def strip_prefix(dirs):
+ return [d.replace("@prefix@/", "") for d in dirs]
+
+ cpp_info = CppInfo()
+ if len(self.components) == 1:
+ comp = next(iter(self.components.values()))
+ cpp_info.includedirs = strip_prefix(comp.includes)
+ cpp_info.defines = comp.definitions
+ if comp.type is CPSComponentType.ARCHIVE:
+ location = comp.location
+ location = location.replace("@prefix@/", "")
+ cpp_info.libdirs = [os.path.dirname(location)]
+ filename = os.path.basename(location)
+ basefile, ext = os.path.splitext(filename)
+ if basefile.startswith("lib") and ext != ".lib":
+ basefile = basefile[3:]
+ cpp_info.libs = [basefile]
+ # FIXME: Missing requires
+ cpp_info.system_libs = comp.link_libraries
+ else:
+ for comp_name, comp in self.components.items():
+ cpp_comp = cpp_info.components[comp_name]
+ cpp_comp.includedirs = strip_prefix(comp.includes)
+ cpp_comp.defines = comp.definitions
+ if comp.type is CPSComponentType.ARCHIVE:
+ location = comp.location
+ location = location.replace("@prefix@/", "")
+ cpp_comp.libdirs = [os.path.dirname(location)]
+ filename = os.path.basename(location)
+ basefile, ext = os.path.splitext(filename)
+ if basefile.startswith("lib") and ext != ".lib":
+ basefile = basefile[3:]
+ cpp_comp.libs = [basefile]
+ for r in comp.requires:
+ cpp_comp.requires.append(r[1:] if r.startswith(":") else r.replace(":", "::"))
+ cpp_comp.system_libs = comp.link_libraries
+
+ return cpp_info
+
+ def save(self, folder):
+ filename = os.path.join(folder, f"{self.name}.cps")
+ save(filename, json.dumps(self.serialize(), indent=2))
+ return filename
+
+ @staticmethod
+ def load(file):
+ contents = load(file)
+ return CPS.deserialize(json.loads(contents))
diff --git a/conan/internal/api/install/generators.py b/conan/internal/api/install/generators.py
index c385820a770..950b714850e 100644
--- a/conan/internal/api/install/generators.py
+++ b/conan/internal/api/install/generators.py
@@ -31,7 +31,8 @@
"MakeDeps": "conan.tools.gnu",
"SConsDeps": "conan.tools.scons",
"QbsDeps": "conan.tools.qbs",
- "QbsProfile": "conan.tools.qbs"
+ "QbsProfile": "conan.tools.qbs",
+ "CPSDeps": "conan.tools.cps"
}
diff --git a/conan/tools/cps/__init__.py b/conan/tools/cps/__init__.py
new file mode 100644
index 00000000000..64674e37160
--- /dev/null
+++ b/conan/tools/cps/__init__.py
@@ -0,0 +1,1 @@
+from conan.tools.cps.cps_deps import CPSDeps
diff --git a/conan/tools/cps/cps_deps.py b/conan/tools/cps/cps_deps.py
new file mode 100644
index 00000000000..578a87c0162
--- /dev/null
+++ b/conan/tools/cps/cps_deps.py
@@ -0,0 +1,53 @@
+from conan.cps.cps import CPS
+from conan.tools.files import save
+
+import json
+import os
+
+
+class CPSDeps:
+ def __init__(self, conanfile):
+ self._conanfile = conanfile
+
+ def _config_name(self):
+ build_vars = ["settings.compiler", "settings.compiler.version", "settings.arch",
+ "settings.compiler.cppstd", "settings.build_type", "options.shared"]
+ ret = []
+ for s in build_vars:
+ group, var = s.split(".", 1)
+ tmp = None
+ if group == "settings":
+ tmp = self._conanfile.settings.get_safe(var)
+ elif group == "options":
+ value = self._conanfile.options.get_safe(var)
+ if value is not None:
+ if var == "shared":
+ tmp = "shared" if value else "static"
+ else:
+ tmp = "{}_{}".format(var, value)
+ if tmp:
+ ret.append(tmp.lower())
+ return "-".join(ret)
+
+ def generate(self):
+ cps_folder = os.path.join(self._conanfile.folders.base_build, "build", "cps")
+ config_name = self._config_name()
+ folder = os.path.join(cps_folder, config_name)
+ self._conanfile.output.info(f"[CPSDeps] folder {cps_folder}")
+ deps = self._conanfile.dependencies.host.items()
+ mapping = {}
+ for _, dep in deps:
+ self._conanfile.output.info(f"[CPSDeps]: dep {dep.ref.name}")
+
+ cps_in_package = os.path.join(dep.package_folder, f"{dep.ref.name}.cps")
+ if os.path.exists(cps_in_package):
+ mapping[dep.ref.name] = cps_in_package
+ continue
+
+ cps = CPS.from_conan(dep)
+ output_file = cps.save(folder)
+ mapping[dep.ref.name] = output_file
+
+ name = f"cpsmap-{config_name}.json"
+ self._conanfile.output.info(f"Generating CPS mapping file: {name}")
+ save(self._conanfile, os.path.join(cps_folder, name), json.dumps(mapping, indent=2))
diff --git a/conans/model/build_info.py b/conans/model/build_info.py
index 536da5ca8fc..4aa0aee2d0f 100644
--- a/conans/model/build_info.py
+++ b/conans/model/build_info.py
@@ -242,6 +242,16 @@ def frameworks(self, value):
def libs(self):
if self._libs is None:
self._libs = []
+ if isinstance(self._libs, dict):
+ return [self._libs.keys()] # Return a list to not break any interface
+ return self._libs
+
+ @property
+ def full_libs(self):
+ if self._libs is None:
+ self._libs = []
+ if isinstance(self._libs, list):
+ return {k: {} for k in self._libs}
return self._libs
@libs.setter
| diff --git a/test/integration/cps/__init__.py b/test/integration/cps/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/test/integration/cps/test_cps.py b/test/integration/cps/test_cps.py
new file mode 100644
index 00000000000..f1ce24f6c19
--- /dev/null
+++ b/test/integration/cps/test_cps.py
@@ -0,0 +1,119 @@
+import json
+import os
+import textwrap
+
+from conan.test.assets.genconanfile import GenConanfile
+from conan.test.utils.tools import TestClient
+
+
+def test_cps():
+ c = TestClient()
+ c.save({"pkg/conanfile.py": GenConanfile("pkg", "0.1").with_settings("build_type")
+ .with_class_attribute("license='MIT'")})
+ c.run("create pkg")
+
+ settings = "-s os=Windows -s compiler=msvc -s compiler.version=191 -s arch=x86_64"
+ c.run(f"install --requires=pkg/0.1 {settings} -g CPSDeps")
+ pkg = json.loads(c.load("build/cps/msvc-191-x86_64-release/pkg.cps"))
+ assert pkg["name"] == "pkg"
+ assert pkg["version"] == "0.1"
+ assert pkg["license"] == "MIT"
+ assert pkg["configurations"] == ["release"]
+ assert pkg["default_components"] == ["pkg"]
+ pkg_comp = pkg["components"]["pkg"]
+ assert pkg_comp["type"] == "interface"
+ mapping = json.loads(c.load("build/cps/cpsmap-msvc-191-x86_64-release.json"))
+ for _, path_cps in mapping.items():
+ assert os.path.exists(path_cps)
+
+
+def test_cps_static_lib():
+ c = TestClient()
+ c.save({"pkg/conanfile.py": GenConanfile("pkg", "0.1").with_package_file("lib/pkg.a", "-")
+ .with_settings("build_type")
+ .with_package_info(cpp_info={"libs": ["pkg"]}, env_info={})})
+ c.run("create pkg")
+
+ settings = "-s os=Windows -s compiler=msvc -s compiler.version=191 -s arch=x86_64"
+ c.run(f"install --requires=pkg/0.1 {settings} -g CPSDeps")
+ pkg = json.loads(c.load("build/cps/msvc-191-x86_64-release/pkg.cps"))
+ assert pkg["name"] == "pkg"
+ assert pkg["version"] == "0.1"
+ assert pkg["configurations"] == ["release"]
+ assert pkg["default_components"] == ["pkg"]
+ pkg_comp = pkg["components"]["pkg"]
+ assert pkg_comp["type"] == "archive"
+ assert pkg_comp["location"] is not None
+
+
+def test_cps_header():
+ c = TestClient()
+ c.save({"pkg/conanfile.py": GenConanfile("pkg", "0.1").with_package_type("header-library")})
+ c.run("create pkg")
+
+ settings = "-s os=Windows -s compiler=msvc -s compiler.version=191 -s arch=x86_64"
+ c.run(f"install --requires=pkg/0.1 {settings} -g CPSDeps")
+ pkg = json.loads(c.load("build/cps/msvc-191-x86_64-release/pkg.cps"))
+ assert pkg["name"] == "pkg"
+ assert pkg["version"] == "0.1"
+ assert "configurations" not in "pkg"
+ assert pkg["default_components"] == ["pkg"]
+ pkg_comp = pkg["components"]["pkg"]
+ assert pkg_comp["type"] == "interface"
+ assert "location" not in pkg_comp
+
+
+def test_cps_in_pkg():
+ c = TestClient()
+ cps = textwrap.dedent("""\
+ {
+ "cps_version": "0.12.0",
+ "name": "zlib",
+ "version": "1.3.1",
+ "configurations": ["release"],
+ "default_components": ["zlib"],
+ "components": {
+ "zlib": {
+ "type": "archive",
+ "includes": ["@prefix@/include"],
+ "location": "@prefix@/lib/zlib.a"
+ }
+ }
+ }
+ """)
+ cps = "".join(cps.splitlines())
+ conanfile = textwrap.dedent(f"""
+ import os
+ from conan.tools.files import save
+ from conan import ConanFile
+ class Pkg(ConanFile):
+ name = "zlib"
+ version = "1.3.1"
+
+ def package(self):
+ cps = '{cps}'
+ cps_path = os.path.join(self.package_folder, "zlib.cps")
+ save(self, cps_path, cps)
+
+ def package_info(self):
+ from conan.cps import CPS
+ self.cpp_info = CPS.load("zlib.cps").to_conan()
+ """)
+ c.save({"pkg/conanfile.py": conanfile})
+ c.run("create pkg")
+
+ settings = "-s os=Windows -s compiler=msvc -s compiler.version=191 -s arch=x86_64"
+ c.run(f"install --requires=zlib/1.3.1 {settings} -g CPSDeps")
+
+ mapping = json.loads(c.load("build/cps/cpsmap-msvc-191-x86_64-release.json"))
+ for _, path_cps in mapping.items():
+ assert os.path.exists(path_cps)
+
+ assert not os.path.exists(os.path.join(c.current_folder, "zlib.cps"))
+ assert not os.path.exists(os.path.join(c.current_folder, "build", "cps", "zlib.cps"))
+
+ c.run(f"install --requires=zlib/1.3.1 {settings} -g CMakeDeps")
+ cmake = c.load("zlib-release-x86_64-data.cmake")
+ assert 'set(zlib_INCLUDE_DIRS_RELEASE "${zlib_PACKAGE_FOLDER_RELEASE}/include")' in cmake
+ assert 'set(zlib_LIB_DIRS_RELEASE "${zlib_PACKAGE_FOLDER_RELEASE}/lib")'
+ assert 'set(zlib_LIBS_RELEASE zlib)' in cmake
diff --git a/test/integration/cps/test_extended_cpp_info.py b/test/integration/cps/test_extended_cpp_info.py
new file mode 100644
index 00000000000..4308fd5b6e1
--- /dev/null
+++ b/test/integration/cps/test_extended_cpp_info.py
@@ -0,0 +1,29 @@
+import json
+import textwrap
+
+from conan.test.utils.tools import TestClient
+
+
+def test_extended_cpp_info():
+ c = TestClient()
+ conanfile = textwrap.dedent("""
+ from conan import ConanFile
+ class Pkg(ConanFile):
+ name = "pkg"
+ version = "0.1"
+ def package_info(self):
+ self.cpp_info.libs = {"mylib": {"location": "my_custom_location",
+ "type": "static-library"}}
+ """)
+ c.save({"conanfile.py": conanfile})
+ c.run("create .")
+
+ settings = "-s os=Windows -s compiler=msvc -s compiler.version=191 -s arch=x86_64"
+ c.run(f"install --requires=pkg/0.1 {settings} -g CPSDeps")
+ pkg = json.loads(c.load("build/cps/msvc-191-x86_64-release/pkg.cps"))
+ assert pkg["name"] == "pkg"
+ assert pkg["version"] == "0.1"
+ assert pkg["default_components"] == ["pkg"]
+ pkg_comp = pkg["components"]["pkg"]
+ assert pkg_comp["type"] == "archive"
+ assert pkg_comp["location"] == "my_custom_location"
| [
{
"components": [
{
"doc": "",
"lines": [
12,
35
],
"name": "CPSComponentType",
"signature": "class CPSComponentType(Enum):",
"type": "class"
},
{
"doc": "",
"lines": [
20,
21
],
... | [
"test/integration/cps/test_cps.py::test_cps",
"test/integration/cps/test_cps.py::test_cps_static_lib",
"test/integration/cps/test_cps.py::test_cps_header",
"test/integration/cps/test_cps.py::test_cps_in_pkg",
"test/integration/cps/test_extended_cpp_info.py::test_extended_cpp_info"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Feature/cps
Changelog: Omit
Docs: Omit
Won't be documented yet
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conan/cps/cps.py]
(definition of CPSComponentType:)
class CPSComponentType(Enum):
(definition of CPSComponentType.__str__:)
def __str__(self):
(definition of CPSComponentType.__eq__:)
def __eq__(self, other):
(definition of CPSComponentType.from_conan:)
def from_conan(pkg_type):
(definition of deduce_full_lib_info:)
def deduce_full_lib_info(libname, full_lib, cpp_info, pkg_type):
(definition of deduce_full_lib_info._find_matching:)
def _find_matching(patterns, dirs):
(definition of CPSComponent:)
class CPSComponent:
(definition of CPSComponent.__init__:)
def __init__(self, component_type=None):
(definition of CPSComponent.serialize:)
def serialize(self):
(definition of CPSComponent.deserialize:)
def deserialize(data):
(definition of CPSComponent.from_cpp_info:)
def from_cpp_info(cpp_info, pkg_type, libname=None):
(definition of CPS:)
class CPS:
"""represents the CPS file for 1 package
"""
(definition of CPS.__init__:)
def __init__(self, name=None, version=None):
(definition of CPS.serialize:)
def serialize(self):
(definition of CPS.deserialize:)
def deserialize(data):
(definition of CPS.from_conan:)
def from_conan(dep):
(definition of CPS.to_conan:)
def to_conan(self):
(definition of CPS.to_conan.strip_prefix:)
def strip_prefix(dirs):
(definition of CPS.save:)
def save(self, folder):
(definition of CPS.load:)
def load(file):
[end of new definitions in conan/cps/cps.py]
[start of new definitions in conan/tools/cps/cps_deps.py]
(definition of CPSDeps:)
class CPSDeps:
(definition of CPSDeps.__init__:)
def __init__(self, conanfile):
(definition of CPSDeps._config_name:)
def _config_name(self):
(definition of CPSDeps.generate:)
def generate(self):
[end of new definitions in conan/tools/cps/cps_deps.py]
[start of new definitions in conans/model/build_info.py]
(definition of _Component.full_libs:)
def full_libs(self):
[end of new definitions in conans/model/build_info.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
deepset-ai__haystack-7520 | 7,520 | deepset-ai/haystack | null | e974a23fa306a38cf482d24b1b7e4efd7789b32a | 2024-04-09T15:54:10Z | diff --git a/haystack/components/evaluators/__init__.py b/haystack/components/evaluators/__init__.py
index 0da03f913c..f69c8257a9 100644
--- a/haystack/components/evaluators/__init__.py
+++ b/haystack/components/evaluators/__init__.py
@@ -2,6 +2,7 @@
from .document_map import DocumentMAPEvaluator
from .document_mrr import DocumentMRREvaluator
from .document_recall import DocumentRecallEvaluator
+from .evaluation_result import EvaluationResult
from .faithfulness import FaithfulnessEvaluator
from .llm_evaluator import LLMEvaluator
from .sas_evaluator import SASEvaluator
@@ -11,6 +12,7 @@
"DocumentMAPEvaluator",
"DocumentMRREvaluator",
"DocumentRecallEvaluator",
+ "EvaluationResult",
"FaithfulnessEvaluator",
"LLMEvaluator",
"SASEvaluator",
diff --git a/haystack/components/evaluators/evaluation_result.py b/haystack/components/evaluators/evaluation_result.py
new file mode 100644
index 0000000000..d2a146f3f1
--- /dev/null
+++ b/haystack/components/evaluators/evaluation_result.py
@@ -0,0 +1,98 @@
+from typing import Any, Dict
+
+from pandas import DataFrame
+from pandas import concat as pd_concat
+
+
+class EvaluationResult:
+ """
+ A class to store the results of an evaluation pipeline.
+
+ data = {
+ "inputs": {
+ "question": ["What is the capital of France?", "What is the capital of Spain?"],
+ "contexts": ["wiki_France", "wiki_Spain"],
+ "predicted_answer": ["Paris", "Madrid"],
+ },
+ "metrics": [
+ {"name": "reciprocal_rank", "scores": [0.378064, 0.534964, 0.216058, 0.778642]},
+ {"name": "context_relevance", "scores": [0.805466, 0.410251, 0.750070, 0.361332]},
+ ],
+ }
+
+ eval_result = EvaluationResult(pipeline_name="testing_pipeline_1", results=data)
+ eval_result.to_pandas()
+ """
+
+ def __init__(self, pipeline_name: str, results: Dict[str, Any]):
+ """
+ Initialize the EvaluationResult object.
+
+ :param pipeline_name: The name of the pipeline that generated the results.
+ :param results: A dictionary containing the results of the evaluators used in the EvaluationPipeline.
+ it should have the following keys:
+ - inputs: A dictionary containing the inputs used in the evaluation.
+ - metrics: A list of dictionaries each containing the following keys:
+ 'name': The name of the metric.
+ 'score': The aggregated score for the metric.
+ 'individual_scores': A list of scores for each query.
+ """
+ self.results = results
+ self.pipeline_name = pipeline_name
+
+ def score_report(self) -> DataFrame:
+ """
+ Transforms the results into a DataFrame with the aggregated scores for each metric.
+
+ :returns:
+ A DataFrame with the aggregated scores.
+
+ """
+ results = {entry["name"]: entry["score"] for entry in self.results["metrics"]}
+ return DataFrame.from_dict(results, orient="index", columns=["score"])
+
+ def to_pandas(self) -> DataFrame:
+ """
+ Creates a DataFrame containing the scores for each query and each metric.
+
+ :returns:
+ A DataFrame with the scores.
+ """
+ inputs_columns = list(self.results["inputs"].keys())
+ inputs_values = list(self.results["inputs"].values())
+ inputs_values = list(map(list, zip(*inputs_values))) # transpose the values
+ df_inputs = DataFrame(inputs_values, columns=inputs_columns)
+
+ scores_columns = [entry["name"] for entry in self.results["metrics"]]
+ scores_values = [entry["individual_scores"] for entry in self.results["metrics"]]
+ scores_values = list(map(list, zip(*scores_values))) # transpose the values
+ df_scores = DataFrame(scores_values, columns=scores_columns)
+
+ return df_inputs.join(df_scores)
+
+ def comparative_individual_scores_report(self, other: "EvaluationResult") -> DataFrame:
+ """
+ Creates a DataFrame with the scores for each metric in the results of two different pipelines.
+
+ :param other: The other EvaluationResults object to compare with.
+ :returns:
+ A DataFrame with the scores from both EvaluationResults objects.
+ """
+ pipe_a_df = self.to_pandas()
+ pipe_b_df = other.to_pandas()
+
+ # check if the columns are the same, i.e.: the same queries and evaluation pipeline
+ columns_a = list(pipe_a_df.columns)
+ columns_b = list(pipe_b_df.columns)
+ if columns_a != columns_b:
+ raise ValueError(f"The two evaluation results do not have the same columns: {columns_a} != {columns_b}")
+
+ # add the pipeline name to the column
+ ignore = ["query_id", "question", "contexts", "answer"]
+ pipe_b_df.drop(columns=ignore, inplace=True, errors="ignore")
+ pipe_b_df.columns = [f"{other.pipeline_name}_{column}" for column in pipe_b_df.columns]
+ pipe_a_df.columns = [f"{self.pipeline_name}_{col}" if col not in ignore else col for col in pipe_a_df.columns]
+
+ results_df = pd_concat([pipe_a_df, pipe_b_df], axis=1)
+
+ return results_df
diff --git a/releasenotes/notes/implemeting-eval-results-API-25b2f8707495bea0.yaml b/releasenotes/notes/implemeting-eval-results-API-25b2f8707495bea0.yaml
new file mode 100644
index 0000000000..2f02640cae
--- /dev/null
+++ b/releasenotes/notes/implemeting-eval-results-API-25b2f8707495bea0.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Added a new EvaluationResult component.
+ This is a wrapper for all the results coming from the Evaluators, presenting the metric scores as a DataFrame.
| diff --git a/test/components/evaluators/__init__.py b/test/components/evaluators/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test/components/evaluators/test_results_evaluator.py b/test/components/evaluators/test_results_evaluator.py
new file mode 100644
index 0000000000..4e07c9679c
--- /dev/null
+++ b/test/components/evaluators/test_results_evaluator.py
@@ -0,0 +1,170 @@
+from haystack.components.evaluators.evaluation_result import EvaluationResult
+
+
+def test_init_results_evaluator():
+ data = {
+ "inputs": {
+ "query_id": ["53c3b3e6", "225f87f7"],
+ "question": ["What is the capital of France?", "What is the capital of Spain?"],
+ "contexts": ["wiki_France", "wiki_Spain"],
+ "answer": ["Paris", "Madrid"],
+ "predicted_answer": ["Paris", "Madrid"],
+ },
+ "metrics": [
+ {"name": "reciprocal_rank", "scores": [0.378064, 0.534964, 0.216058, 0.778642]},
+ {"name": "single_hit", "scores": [1, 1, 0, 1]},
+ {"name": "multi_hit", "scores": [0.706125, 0.454976, 0.445512, 0.250522]},
+ {"name": "context_relevance", "scores": [0.805466, 0.410251, 0.750070, 0.361332]},
+ {"name": "faithfulness", "scores": [0.135581, 0.695974, 0.749861, 0.041999]},
+ {"name": "semantic_answer_similarity", "scores": [0.971241, 0.159320, 0.019722, 1]},
+ ],
+ }
+
+ _ = EvaluationResult(pipeline_name="testing_pipeline_1", results=data)
+
+
+def test_score_report():
+ data = {
+ "inputs": {
+ "query_id": ["53c3b3e6", "225f87f7"],
+ "question": ["What is the capital of France?", "What is the capital of Spain?"],
+ "contexts": ["wiki_France", "wiki_Spain"],
+ "answer": ["Paris", "Madrid"],
+ "predicted_answer": ["Paris", "Madrid"],
+ },
+ "metrics": [
+ {
+ "name": "reciprocal_rank",
+ "individual_scores": [0.378064, 0.534964, 0.216058, 0.778642],
+ "score": 0.476932,
+ },
+ {"name": "single_hit", "individual_scores": [1, 1, 0, 1], "score": 0.75},
+ {"name": "multi_hit", "individual_scores": [0.706125, 0.454976, 0.445512, 0.250522], "score": 0.46428375},
+ {
+ "name": "context_relevance",
+ "individual_scores": [0.805466, 0.410251, 0.750070, 0.361332],
+ "score": 0.58177975,
+ },
+ {
+ "name": "faithfulness",
+ "individual_scores": [0.135581, 0.695974, 0.749861, 0.041999],
+ "score": 0.40585375,
+ },
+ {
+ "name": "semantic_answer_similarity",
+ "individual_scores": [0.971241, 0.159320, 0.019722, 1],
+ "score": 0.53757075,
+ },
+ ],
+ }
+
+ evaluator = EvaluationResult(pipeline_name="testing_pipeline_1", results=data)
+ result = evaluator.score_report().to_json()
+ assert result == (
+ '{"score":{"reciprocal_rank":0.476932,"single_hit":0.75,"multi_hit":0.46428375,'
+ '"context_relevance":0.58177975,"faithfulness":0.40585375,'
+ '"semantic_answer_similarity":0.53757075}}'
+ )
+
+
+def test_to_pandas():
+ data = {
+ "inputs": {
+ "query_id": ["53c3b3e6", "225f87f7", "53c3b3e6", "225f87f7"],
+ "question": [
+ "What is the capital of France?",
+ "What is the capital of Spain?",
+ "What is the capital of Luxembourg?",
+ "What is the capital of Portugal?",
+ ],
+ "contexts": ["wiki_France", "wiki_Spain", "wiki_Luxembourg", "wiki_Portugal"],
+ "answer": ["Paris", "Madrid", "Luxembourg", "Lisbon"],
+ "predicted_answer": ["Paris", "Madrid", "Luxembourg", "Lisbon"],
+ },
+ "metrics": [
+ {"name": "reciprocal_rank", "individual_scores": [0.378064, 0.534964, 0.216058, 0.778642]},
+ {"name": "single_hit", "individual_scores": [1, 1, 0, 1]},
+ {"name": "multi_hit", "individual_scores": [0.706125, 0.454976, 0.445512, 0.250522]},
+ {"name": "context_relevance", "individual_scores": [0.805466, 0.410251, 0.750070, 0.361332]},
+ {"name": "faithfulness", "individual_scores": [0.135581, 0.695974, 0.749861, 0.041999]},
+ {"name": "semantic_answer_similarity", "individual_scores": [0.971241, 0.159320, 0.019722, 1]},
+ ],
+ }
+
+ evaluator = EvaluationResult(pipeline_name="testing_pipeline_1", results=data)
+ assert evaluator.to_pandas().to_json() == (
+ '{"query_id":{"0":"53c3b3e6","1":"225f87f7","2":"53c3b3e6","3":"225f87f7"},'
+ '"question":{"0":"What is the capital of France?","1":"What is the capital of Spain?",'
+ '"2":"What is the capital of Luxembourg?","3":"What is the capital of Portugal?"},'
+ '"contexts":{"0":"wiki_France","1":"wiki_Spain","2":"wiki_Luxembourg","3":"wiki_Portugal"},'
+ '"answer":{"0":"Paris","1":"Madrid","2":"Luxembourg","3":"Lisbon"},'
+ '"predicted_answer":{"0":"Paris","1":"Madrid","2":"Luxembourg","3":"Lisbon"},'
+ '"reciprocal_rank":{"0":0.378064,"1":0.534964,"2":0.216058,"3":0.778642},'
+ '"single_hit":{"0":1,"1":1,"2":0,"3":1},'
+ '"multi_hit":{"0":0.706125,"1":0.454976,"2":0.445512,"3":0.250522},'
+ '"context_relevance":{"0":0.805466,"1":0.410251,"2":0.75007,"3":0.361332},'
+ '"faithfulness":{"0":0.135581,"1":0.695974,"2":0.749861,"3":0.041999},'
+ '"semantic_answer_similarity":{"0":0.971241,"1":0.15932,"2":0.019722,"3":1.0}}'
+ )
+
+
+def test_comparative_individual_scores_report():
+ data_1 = {
+ "inputs": {
+ "query_id": ["53c3b3e6", "225f87f7"],
+ "question": ["What is the capital of France?", "What is the capital of Spain?"],
+ "contexts": ["wiki_France", "wiki_Spain"],
+ "answer": ["Paris", "Madrid"],
+ "predicted_answer": ["Paris", "Madrid"],
+ },
+ "metrics": [
+ {"name": "reciprocal_rank", "individual_scores": [0.378064, 0.534964, 0.216058, 0.778642]},
+ {"name": "single_hit", "individual_scores": [1, 1, 0, 1]},
+ {"name": "multi_hit", "individual_scores": [0.706125, 0.454976, 0.445512, 0.250522]},
+ {"name": "context_relevance", "individual_scores": [0.805466, 0.410251, 0.750070, 0.361332]},
+ {"name": "faithfulness", "individual_scores": [0.135581, 0.695974, 0.749861, 0.041999]},
+ {"name": "semantic_answer_similarity", "individual_scores": [0.971241, 0.159320, 0.019722, 1]},
+ ],
+ }
+
+ data_2 = {
+ "inputs": {
+ "query_id": ["53c3b3e6", "225f87f7"],
+ "question": ["What is the capital of France?", "What is the capital of Spain?"],
+ "contexts": ["wiki_France", "wiki_Spain"],
+ "answer": ["Paris", "Madrid"],
+ "predicted_answer": ["Paris", "Madrid"],
+ },
+ "metrics": [
+ {"name": "reciprocal_rank", "individual_scores": [0.378064, 0.534964, 0.216058, 0.778642]},
+ {"name": "single_hit", "individual_scores": [1, 1, 0, 1]},
+ {"name": "multi_hit", "individual_scores": [0.706125, 0.454976, 0.445512, 0.250522]},
+ {"name": "context_relevance", "individual_scores": [0.805466, 0.410251, 0.750070, 0.361332]},
+ {"name": "faithfulness", "individual_scores": [0.135581, 0.695974, 0.749861, 0.041999]},
+ {"name": "semantic_answer_similarity", "individual_scores": [0.971241, 0.159320, 0.019722, 1]},
+ ],
+ }
+
+ evaluator_1 = EvaluationResult(pipeline_name="testing_pipeline_1", results=data_1)
+ evaluator_2 = EvaluationResult(pipeline_name="testing_pipeline_2", results=data_2)
+ results = evaluator_1.comparative_individual_scores_report(evaluator_2)
+
+ assert results.to_json() == (
+ '{"query_id":{"0":"53c3b3e6","1":"225f87f7"},'
+ '"question":{"0":"What is the capital of France?","1":"What is the capital of Spain?"},'
+ '"contexts":{"0":"wiki_France","1":"wiki_Spain"},"answer":{"0":"Paris","1":"Madrid"},'
+ '"testing_pipeline_1_predicted_answer":{"0":"Paris","1":"Madrid"},'
+ '"testing_pipeline_1_reciprocal_rank":{"0":0.378064,"1":0.534964},'
+ '"testing_pipeline_1_single_hit":{"0":1,"1":1},'
+ '"testing_pipeline_1_multi_hit":{"0":0.706125,"1":0.454976},'
+ '"testing_pipeline_1_context_relevance":{"0":0.805466,"1":0.410251},'
+ '"testing_pipeline_1_faithfulness":{"0":0.135581,"1":0.695974},'
+ '"testing_pipeline_1_semantic_answer_similarity":{"0":0.971241,"1":0.15932},'
+ '"testing_pipeline_2_predicted_answer":{"0":"Paris","1":"Madrid"},'
+ '"testing_pipeline_2_reciprocal_rank":{"0":0.378064,"1":0.534964},'
+ '"testing_pipeline_2_single_hit":{"0":1,"1":1},'
+ '"testing_pipeline_2_multi_hit":{"0":0.706125,"1":0.454976},'
+ '"testing_pipeline_2_context_relevance":{"0":0.805466,"1":0.410251},'
+ '"testing_pipeline_2_faithfulness":{"0":0.135581,"1":0.695974},'
+ '"testing_pipeline_2_semantic_answer_similarity":{"0":0.971241,"1":0.15932}}'
+ )
| diff --git a/releasenotes/notes/implemeting-eval-results-API-25b2f8707495bea0.yaml b/releasenotes/notes/implemeting-eval-results-API-25b2f8707495bea0.yaml
new file mode 100644
index 0000000000..2f02640cae
--- /dev/null
+++ b/releasenotes/notes/implemeting-eval-results-API-25b2f8707495bea0.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Added a new EvaluationResult component.
+ This is a wrapper for all the results coming from the Evaluators, presenting the metric scores as a DataFrame.
| [
{
"components": [
{
"doc": "A class to store the results of an evaluation pipeline.\n\ndata = {\n \"inputs\": {\n \"question\": [\"What is the capital of France?\", \"What is the capital of Spain?\"],\n \"contexts\": [\"wiki_France\", \"wiki_Spain\"],\n \"predicted_answer\"... | [
"test/components/evaluators/test_results_evaluator.py::test_init_results_evaluator",
"test/components/evaluators/test_results_evaluator.py::test_score_report",
"test/components/evaluators/test_results_evaluator.py::test_to_pandas",
"test/components/evaluators/test_results_evaluator.py::test_comparative_indivi... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat: implementing evalualtion results API
### Related Issues
- handles #7508
### Proposed Changes:
- a new Evaluator component to present evaluation results
### How did you test it?
- unit tests for each new method
### Notes for the reviewer
<!-- E.g. point out section where the reviewer -->
### Checklist
- I have read the [contributors guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md) and the [code of conduct](https://github.com/deepset-ai/haystack/blob/main/code_of_conduct.txt)
- I have updated the related issue with new insights and changes
- I added unit tests and updated the docstrings
- I've used one of the [conventional commit types](https://www.conventionalcommits.org/en/v1.0.0/) for my PR title: `fix:`, `feat:`, `build:`, `chore:`, `ci:`, `docs:`, `style:`, `refactor:`, `perf:`, `test:`.
- I documented my code
- I ran [pre-commit hooks](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md#installation) and fixed any issue
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/components/evaluators/evaluation_result.py]
(definition of EvaluationResult:)
class EvaluationResult:
"""A class to store the results of an evaluation pipeline.
data = {
"inputs": {
"question": ["What is the capital of France?", "What is the capital of Spain?"],
"contexts": ["wiki_France", "wiki_Spain"],
"predicted_answer": ["Paris", "Madrid"],
},
"metrics": [
{"name": "reciprocal_rank", "scores": [0.378064, 0.534964, 0.216058, 0.778642]},
{"name": "context_relevance", "scores": [0.805466, 0.410251, 0.750070, 0.361332]},
],
}
eval_result = EvaluationResult(pipeline_name="testing_pipeline_1", results=data)
eval_result.to_pandas()"""
(definition of EvaluationResult.__init__:)
def __init__(self, pipeline_name: str, results: Dict[str, Any]):
"""Initialize the EvaluationResult object.
:param pipeline_name: The name of the pipeline that generated the results.
:param results: A dictionary containing the results of the evaluators used in the EvaluationPipeline.
it should have the following keys:
- inputs: A dictionary containing the inputs used in the evaluation.
- metrics: A list of dictionaries each containing the following keys:
'name': The name of the metric.
'score': The aggregated score for the metric.
'individual_scores': A list of scores for each query."""
(definition of EvaluationResult.score_report:)
def score_report(self) -> DataFrame:
"""Transforms the results into a DataFrame with the aggregated scores for each metric.
:returns:
A DataFrame with the aggregated scores."""
(definition of EvaluationResult.to_pandas:)
def to_pandas(self) -> DataFrame:
"""Creates a DataFrame containing the scores for each query and each metric.
:returns:
A DataFrame with the scores."""
(definition of EvaluationResult.comparative_individual_scores_report:)
def comparative_individual_scores_report(self, other: "EvaluationResult") -> DataFrame:
"""Creates a DataFrame with the scores for each metric in the results of two different pipelines.
:param other: The other EvaluationResults object to compare with.
:returns:
A DataFrame with the scores from both EvaluationResults objects."""
[end of new definitions in haystack/components/evaluators/evaluation_result.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 | |
deepset-ai__haystack-7519 | 7,519 | deepset-ai/haystack | null | 3d0f7affed7b192d32d295a6c92bdff5e8f97de4 | 2024-04-09T15:52:30Z | diff --git a/docs/pydoc/config/evaluators_api.yml b/docs/pydoc/config/evaluators_api.yml
index 9acd64efb7..b24b3003e0 100644
--- a/docs/pydoc/config/evaluators_api.yml
+++ b/docs/pydoc/config/evaluators_api.yml
@@ -4,6 +4,7 @@ loaders:
modules:
[
"answer_exact_match",
+ "context_relevance",
"document_map",
"document_mrr",
"document_recall",
diff --git a/haystack/components/evaluators/__init__.py b/haystack/components/evaluators/__init__.py
index f69c8257a9..631691c543 100644
--- a/haystack/components/evaluators/__init__.py
+++ b/haystack/components/evaluators/__init__.py
@@ -1,4 +1,5 @@
from .answer_exact_match import AnswerExactMatchEvaluator
+from .context_relevance import ContextRelevanceEvaluator
from .document_map import DocumentMAPEvaluator
from .document_mrr import DocumentMRREvaluator
from .document_recall import DocumentRecallEvaluator
@@ -9,6 +10,7 @@
__all__ = [
"AnswerExactMatchEvaluator",
+ "ContextRelevanceEvaluator",
"DocumentMAPEvaluator",
"DocumentMRREvaluator",
"DocumentRecallEvaluator",
diff --git a/haystack/components/evaluators/context_relevance.py b/haystack/components/evaluators/context_relevance.py
new file mode 100644
index 0000000000..d78ccfc747
--- /dev/null
+++ b/haystack/components/evaluators/context_relevance.py
@@ -0,0 +1,154 @@
+from typing import Any, Dict, List, Optional
+
+from numpy import mean as np_mean
+
+from haystack import default_from_dict
+from haystack.components.evaluators.llm_evaluator import LLMEvaluator
+from haystack.core.component import component
+from haystack.utils import Secret, deserialize_secrets_inplace
+
+# Private global variable for default examples to include in the prompt if the user does not provide any examples
+_DEFAULT_EXAMPLES = [
+ {
+ "inputs": {
+ "questions": "What is the capital of Germany?",
+ "contexts": ["Berlin is the capital of Germany and was founded in 1244."],
+ },
+ "outputs": {
+ "statements": ["Berlin is the capital of Germany.", "Berlin was founded in 1244."],
+ "statement_scores": [1, 0],
+ },
+ },
+ {
+ "inputs": {"questions": "What is the capital of France?", "contexts": ["Berlin is the capital of Germany."]},
+ "outputs": {"statements": ["Berlin is the capital of Germany."], "statement_scores": [0]},
+ },
+ {
+ "inputs": {"questions": "What is the capital of Italy?", "contexts": ["Rome is the capital of Italy."]},
+ "outputs": {"statements": ["Rome is the capital of Italy."], "statement_scores": [1]},
+ },
+]
+
+
+class ContextRelevanceEvaluator(LLMEvaluator):
+ """
+ Evaluator that checks if a provided context is relevant to the question.
+
+ An LLM separates the answer into multiple statements and checks whether the statement can be inferred from the
+ context or not. The final score for the full answer is a number from 0.0 to 1.0. It represents the proportion of
+ statements that can be inferred from the provided contexts.
+
+ Usage example:
+ ```python
+ from haystack.components.evaluators import ContextRelevanceEvaluator
+
+ questions = ["Who created the Python language?"]
+ contexts = [
+ [
+ "Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming language. Its design philosophy emphasizes code readability, and its language constructs aim to help programmers write clear, logical code for both small and large-scale software projects."
+ ],
+ ]
+
+ evaluator = ContextRelevanceEvaluator()
+ result = evaluator.run(questions=questions, contexts=contexts)
+ print(result["score"])
+ # 1.0
+ print(result["individual_scores"])
+ # [1.0]
+ print(result["results"])
+ # [{'statements': ['Python, created by Guido van Rossum in the late 1980s.'], 'statement_scores': [1], 'score': 1.0}]
+ ```
+ """
+
+ def __init__(
+ self,
+ examples: Optional[List[Dict[str, Any]]] = None,
+ api: str = "openai",
+ api_key: Secret = Secret.from_env_var("OPENAI_API_KEY"),
+ ):
+ """
+ Creates an instance of ContextRelevanceEvaluator.
+
+ :param examples:
+ Optional few-shot examples conforming to the expected input and output format of ContextRelevanceEvaluator.
+ Default examples will be used if none are provided.
+ Each example must be a dictionary with keys "inputs" and "outputs".
+ "inputs" must be a dictionary with keys "questions" and "contexts".
+ "outputs" must be a dictionary with "statements" and "statement_scores".
+ Expected format:
+ [{
+ "inputs": {
+ "questions": "What is the capital of Italy?", "contexts": ["Rome is the capital of Italy."],
+ },
+ "outputs": {
+ "statements": ["Rome is the capital of Italy."],
+ "statement_scores": [1],
+ },
+ }]
+ :param api:
+ The API to use for calling an LLM through a Generator.
+ Supported APIs: "openai".
+ :param api_key:
+ The API key.
+
+ """
+ self.instructions = (
+ "Your task is to judge how relevant the provided context is for answering a question. "
+ "First, please extract statements from the provided context. "
+ "Second, calculate a relevance score for each statement in the context. "
+ "The score is 1 if the statement is relevant to answer the question or 0 if it is not relevant."
+ )
+ self.inputs = [("questions", List[str]), ("contexts", List[List[str]])]
+ self.outputs = ["statements", "statement_scores"]
+ self.examples = examples or _DEFAULT_EXAMPLES
+ self.api = api
+ self.api_key = api_key
+
+ super().__init__(
+ instructions=self.instructions,
+ inputs=self.inputs,
+ outputs=self.outputs,
+ examples=self.examples,
+ api=self.api,
+ api_key=self.api_key,
+ )
+
+ @component.output_types(results=List[Dict[str, Any]])
+ def run(self, questions: List[str], contexts: List[List[str]]) -> Dict[str, Any]:
+ """
+ Run the LLM evaluator.
+
+ :param questions:
+ A list of questions.
+ :param contexts:
+ A list of lists of contexts. Each list of contexts corresponds to one question.
+ :returns:
+ A dictionary with the following outputs:
+ - `score`: Mean context relevance score over all the provided input questions.
+ - `individual_scores`: A list of context relevance scores for each input question.
+ - `results`: A list of dictionaries with `statements` and `statement_scores` for each input context.
+ """
+ result = super().run(questions=questions, contexts=contexts)
+
+ # calculate average statement relevance score per query
+ for res in result["results"]:
+ res["score"] = np_mean(res["statement_scores"])
+
+ # calculate average context relevance score over all queries
+ result["score"] = np_mean([res["score"] for res in result["results"]])
+ result["individual_scores"] = [res["score"] for res in result["results"]]
+
+ return result
+
+ @classmethod
+ def from_dict(cls, data: Dict[str, Any]) -> "ContextRelevanceEvaluator":
+ """
+ Deserialize this component from a dictionary.
+
+ :param data:
+ The dictionary representation of this component.
+ :returns:
+ The deserialized component instance.
+ """
+ deserialize_secrets_inplace(data["init_parameters"], keys=["api_key"])
+ return default_from_dict(cls, data)
diff --git a/releasenotes/notes/context-relevance-04063b9dc9fe7379.yaml b/releasenotes/notes/context-relevance-04063b9dc9fe7379.yaml
new file mode 100644
index 0000000000..2ab79f87cf
--- /dev/null
+++ b/releasenotes/notes/context-relevance-04063b9dc9fe7379.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Add a new ContextRelevanceEvaluator component that can be used to evaluate whether retrieved documents are relevant to answer a question with a RAG pipeline.
+ Given a question and a list of retrieved document contents (contexts), an LLM is used to score to what extent the provided context is relevant. The score ranges from 0 to 1.
| diff --git a/test/components/evaluators/test_context_relevance_evaluator.py b/test/components/evaluators/test_context_relevance_evaluator.py
new file mode 100644
index 0000000000..8bd1a3cfd7
--- /dev/null
+++ b/test/components/evaluators/test_context_relevance_evaluator.py
@@ -0,0 +1,142 @@
+import os
+from typing import List
+
+import pytest
+
+from haystack.components.evaluators import ContextRelevanceEvaluator
+from haystack.utils.auth import Secret
+
+
+class TestContextRelevanceEvaluator:
+ def test_init_default(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
+ component = ContextRelevanceEvaluator()
+ assert component.api == "openai"
+ assert component.generator.client.api_key == "test-api-key"
+ assert component.instructions == (
+ "Your task is to judge how relevant the provided context is for answering a question. "
+ "First, please extract statements from the provided context. "
+ "Second, calculate a relevance score for each statement in the context. "
+ "The score is 1 if the statement is relevant to answer the question or 0 if it is not relevant."
+ )
+ assert component.inputs == [("questions", List[str]), ("contexts", List[List[str]])]
+ assert component.outputs == ["statements", "statement_scores"]
+ assert component.examples == [
+ {
+ "inputs": {
+ "questions": "What is the capital of Germany?",
+ "contexts": ["Berlin is the capital of Germany and was founded in 1244."],
+ },
+ "outputs": {
+ "statements": ["Berlin is the capital of Germany.", "Berlin was founded in 1244."],
+ "statement_scores": [1, 0],
+ },
+ },
+ {
+ "inputs": {
+ "questions": "What is the capital of France?",
+ "contexts": ["Berlin is the capital of Germany."],
+ },
+ "outputs": {"statements": ["Berlin is the capital of Germany."], "statement_scores": [0]},
+ },
+ {
+ "inputs": {"questions": "What is the capital of Italy?", "contexts": ["Rome is the capital of Italy."]},
+ "outputs": {"statements": ["Rome is the capital of Italy."], "statement_scores": [1]},
+ },
+ ]
+
+ def test_init_fail_wo_openai_api_key(self, monkeypatch):
+ monkeypatch.delenv("OPENAI_API_KEY", raising=False)
+ with pytest.raises(ValueError, match="None of the .* environment variables are set"):
+ ContextRelevanceEvaluator()
+
+ def test_init_with_parameters(self):
+ component = ContextRelevanceEvaluator(
+ api_key=Secret.from_token("test-api-key"),
+ api="openai",
+ examples=[
+ {"inputs": {"questions": "Damn, this is straight outta hell!!!"}, "outputs": {"custom_score": 1}},
+ {"inputs": {"questions": "Football is the most popular sport."}, "outputs": {"custom_score": 0}},
+ ],
+ )
+ assert component.generator.client.api_key == "test-api-key"
+ assert component.api == "openai"
+ assert component.examples == [
+ {"inputs": {"questions": "Damn, this is straight outta hell!!!"}, "outputs": {"custom_score": 1}},
+ {"inputs": {"questions": "Football is the most popular sport."}, "outputs": {"custom_score": 0}},
+ ]
+
+ def test_from_dict(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
+
+ data = {
+ "type": "haystack.components.evaluators.context_relevance.ContextRelevanceEvaluator",
+ "init_parameters": {
+ "api_key": {"env_vars": ["OPENAI_API_KEY"], "strict": True, "type": "env_var"},
+ "api": "openai",
+ "examples": [{"inputs": {"questions": "What is football?"}, "outputs": {"score": 0}}],
+ },
+ }
+ component = ContextRelevanceEvaluator.from_dict(data)
+ assert component.api == "openai"
+ assert component.generator.client.api_key == "test-api-key"
+ assert component.examples == [{"inputs": {"questions": "What is football?"}, "outputs": {"score": 0}}]
+
+ def test_run_calculates_mean_score(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
+ component = ContextRelevanceEvaluator()
+
+ def generator_run(self, *args, **kwargs):
+ if "Football" in kwargs["prompt"]:
+ return {"replies": ['{"statements": ["a", "b"], "statement_scores": [1, 0]}']}
+ else:
+ return {"replies": ['{"statements": ["c", "d"], "statement_scores": [1, 1]}']}
+
+ monkeypatch.setattr("haystack.components.generators.openai.OpenAIGenerator.run", generator_run)
+
+ questions = ["Which is the most popular global sport?", "Who created the Python language?"]
+ contexts = [
+ [
+ "The popularity of sports can be measured in various ways, including TV viewership, social media "
+ "presence, number of participants, and economic impact. Football is undoubtedly the world's most "
+ "popular sport with major events like the FIFA World Cup and sports personalities like Ronaldo and "
+ "Messi, drawing a followership of more than 4 billion people."
+ ],
+ [
+ "Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming "
+ "language. Its design philosophy emphasizes code readability, and its language constructs aim to help "
+ "programmers write clear, logical code for both small and large-scale software projects."
+ ],
+ ]
+ results = component.run(questions=questions, contexts=contexts)
+ assert results == {
+ "individual_scores": [0.5, 1],
+ "results": [
+ {"score": 0.5, "statement_scores": [1, 0], "statements": ["a", "b"]},
+ {"score": 1, "statement_scores": [1, 1], "statements": ["c", "d"]},
+ ],
+ "score": 0.75,
+ }
+
+ def test_run_missing_parameters(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
+ component = ContextRelevanceEvaluator()
+ with pytest.raises(TypeError, match="missing 2 required positional arguments"):
+ component.run()
+
+ @pytest.mark.skipif(
+ not os.environ.get("OPENAI_API_KEY", None),
+ reason="Export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test.",
+ )
+ @pytest.mark.integration
+ def test_live_run(self):
+ questions = ["Who created the Python language?"]
+ contexts = [["Python, created by Guido van Rossum, is a high-level general-purpose programming language."]]
+
+ evaluator = ContextRelevanceEvaluator()
+ result = evaluator.run(questions=questions, contexts=contexts)
+ assert result["score"] == 1.0
+ assert result["individual_scores"] == [1.0]
+ assert result["results"][0]["score"] == 1.0
+ assert result["results"][0]["statement_scores"] == [1.0]
+ assert "Guido van Rossum" in result["results"][0]["statements"][0]
| diff --git a/docs/pydoc/config/evaluators_api.yml b/docs/pydoc/config/evaluators_api.yml
index 9acd64efb7..b24b3003e0 100644
--- a/docs/pydoc/config/evaluators_api.yml
+++ b/docs/pydoc/config/evaluators_api.yml
@@ -4,6 +4,7 @@ loaders:
modules:
[
"answer_exact_match",
+ "context_relevance",
"document_map",
"document_mrr",
"document_recall",
diff --git a/releasenotes/notes/context-relevance-04063b9dc9fe7379.yaml b/releasenotes/notes/context-relevance-04063b9dc9fe7379.yaml
new file mode 100644
index 0000000000..2ab79f87cf
--- /dev/null
+++ b/releasenotes/notes/context-relevance-04063b9dc9fe7379.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Add a new ContextRelevanceEvaluator component that can be used to evaluate whether retrieved documents are relevant to answer a question with a RAG pipeline.
+ Given a question and a list of retrieved document contents (contexts), an LLM is used to score to what extent the provided context is relevant. The score ranges from 0 to 1.
| [
{
"components": [
{
"doc": "Evaluator that checks if a provided context is relevant to the question.\n\nAn LLM separates the answer into multiple statements and checks whether the statement can be inferred from the\ncontext or not. The final score for the full answer is a number from 0.0 to 1.0. I... | [
"test/components/evaluators/test_context_relevance_evaluator.py::TestContextRelevanceEvaluator::test_init_default",
"test/components/evaluators/test_context_relevance_evaluator.py::TestContextRelevanceEvaluator::test_init_fail_wo_openai_api_key",
"test/components/evaluators/test_context_relevance_evaluator.py::... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat: Add ContextRelevanceEvaluator component
### Related Issues
- fixes #7022
### Proposed Changes:
- Add new ContextRelevanceEvaluator component
### How did you test it?
- Added new unit tests
- Ran the docstring example locally
- I also ran the following example with a pipeline containing two evaluators locally:
```python
from haystack import Pipeline
from haystack.components.evaluators import ContextRelevanceEvaluator, FaithfulnessEvaluator
pipeline = Pipeline()
context_relevance_evaluator = ContextRelevanceEvaluator()
faithfulness_evaluator = FaithfulnessEvaluator()
pipeline.add_component("context_relevance_evaluator", context_relevance_evaluator)
pipeline.add_component("faithfulness_evaluator", faithfulness_evaluator)
questions = ["Who created the Python language?"]
contexts = [
[
"Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming language. Its design philosophy emphasizes code readability, and its language constructs aim to help programmers write clear, logical code for both small and large-scale software projects."
],
]
responses = ["Python is a high-level general-purpose programming language that was created by George Lucas."]
result = pipeline.run(
{
"context_relevance_evaluator": {"questions": questions, "contexts": contexts},
"faithfulness_evaluator": {"questions": questions, "contexts": contexts, "responses": responses}
}
)
for evaluator in result:
print(result[evaluator]["individual_scores"])
# [1.0]
# [0.5]
for evaluator in result:
print(result[evaluator]["score"])
# 1.0
# 0.5
```
### Notes for the reviewer
### Checklist
- I have read the [contributors guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md) and the [code of conduct](https://github.com/deepset-ai/haystack/blob/main/code_of_conduct.txt)
- I have updated the related issue with new insights and changes
- I added unit tests and updated the docstrings
- I've used one of the [conventional commit types](https://www.conventionalcommits.org/en/v1.0.0/) for my PR title: `fix:`, `feat:`, `build:`, `chore:`, `ci:`, `docs:`, `style:`, `refactor:`, `perf:`, `test:`.
- I documented my code
- I ran [pre-commit hooks](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md#installation) and fixed any issue
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/components/evaluators/context_relevance.py]
(definition of ContextRelevanceEvaluator:)
class ContextRelevanceEvaluator(LLMEvaluator):
"""Evaluator that checks if a provided context is relevant to the question.
An LLM separates the answer into multiple statements and checks whether the statement can be inferred from the
context or not. The final score for the full answer is a number from 0.0 to 1.0. It represents the proportion of
statements that can be inferred from the provided contexts.
Usage example:
```python
from haystack.components.evaluators import ContextRelevanceEvaluator
questions = ["Who created the Python language?"]
contexts = [
[
"Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming language. Its design philosophy emphasizes code readability, and its language constructs aim to help programmers write clear, logical code for both small and large-scale software projects."
],
]
evaluator = ContextRelevanceEvaluator()
result = evaluator.run(questions=questions, contexts=contexts)
print(result["score"])
# 1.0
print(result["individual_scores"])
# [1.0]
print(result["results"])
# [{'statements': ['Python, created by Guido van Rossum in the late 1980s.'], 'statement_scores': [1], 'score': 1.0}]
```"""
(definition of ContextRelevanceEvaluator.__init__:)
def __init__( self, examples: Optional[List[Dict[str, Any]]] = None, api: str = "openai", api_key: Secret = Secret.from_env_var("OPENAI_API_KEY"), ):
"""Creates an instance of ContextRelevanceEvaluator.
:param examples:
Optional few-shot examples conforming to the expected input and output format of ContextRelevanceEvaluator.
Default examples will be used if none are provided.
Each example must be a dictionary with keys "inputs" and "outputs".
"inputs" must be a dictionary with keys "questions" and "contexts".
"outputs" must be a dictionary with "statements" and "statement_scores".
Expected format:
[{
"inputs": {
"questions": "What is the capital of Italy?", "contexts": ["Rome is the capital of Italy."],
},
"outputs": {
"statements": ["Rome is the capital of Italy."],
"statement_scores": [1],
},
}]
:param api:
The API to use for calling an LLM through a Generator.
Supported APIs: "openai".
:param api_key:
The API key."""
(definition of ContextRelevanceEvaluator.run:)
def run(self, questions: List[str], contexts: List[List[str]]) -> Dict[str, Any]:
"""Run the LLM evaluator.
:param questions:
A list of questions.
:param contexts:
A list of lists of contexts. Each list of contexts corresponds to one question.
:returns:
A dictionary with the following outputs:
- `score`: Mean context relevance score over all the provided input questions.
- `individual_scores`: A list of context relevance scores for each input question.
- `results`: A list of dictionaries with `statements` and `statement_scores` for each input context."""
(definition of ContextRelevanceEvaluator.from_dict:)
def from_dict(cls, data: Dict[str, Any]) -> "ContextRelevanceEvaluator":
"""Deserialize this component from a dictionary.
:param data:
The dictionary representation of this component.
:returns:
The deserialized component instance."""
[end of new definitions in haystack/components/evaluators/context_relevance.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Custom LLM-based evaluator in Haystack core
Now that we have integrations for third party LLM eval frameworks, we need to add support for a handful of LLM-based metrics that we officially support as part of core. This will be done by implementing a custom `LLMEvaluator` component that wraps around one or more of our generator APIs. We'll then build a small section of curated metrics on top of this component, all the while allowing the user to change the underlying service (OpenAI, Cohere, etc) and the associated prompts at will
```[tasklist]
### Tasks
- [ ] https://github.com/deepset-ai/haystack/issues/7023
- [ ] https://github.com/deepset-ai/haystack/issues/7024
- [ ] https://github.com/deepset-ai/haystack/issues/7025
```
----------
--------------------
</issues> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 |
google-deepmind__optax-919 | 919 | google-deepmind/optax | null | 9f7446eba856108b98600ff070a2fda6048266af | 2024-04-09T15:13:58Z | diff --git a/docs/api/projections.rst b/docs/api/projections.rst
index 1ea981c41..8d38166b4 100644
--- a/docs/api/projections.rst
+++ b/docs/api/projections.rst
@@ -34,6 +34,7 @@ Available projections
projection_box
projection_hypercube
projection_non_negative
+ projection_simplex
Projection onto a box
~~~~~~~~~~~~~~~~~~~~~
@@ -46,3 +47,7 @@ Projection onto a hypercube
Projection onto the non-negative orthant
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autofunction:: projection_non_negative
+
+Projection onto a simplex
+~~~~~~~~~~~~~~~~~~~~~~~~~
+.. autofunction:: projection_simplex
diff --git a/optax/projections/__init__.py b/optax/projections/__init__.py
index 233a5820d..70fbd949b 100644
--- a/optax/projections/__init__.py
+++ b/optax/projections/__init__.py
@@ -18,3 +18,4 @@
from optax.projections._projections import projection_box
from optax.projections._projections import projection_hypercube
from optax.projections._projections import projection_non_negative
+from optax.projections._projections import projection_simplex
diff --git a/optax/projections/_projections.py b/optax/projections/_projections.py
index 6409d8e48..e557fe4ba 100644
--- a/optax/projections/_projections.py
+++ b/optax/projections/_projections.py
@@ -17,7 +17,10 @@
from typing import Any
+import chex
+
import jax
+from jax import flatten_util
from jax import tree_util as jtu
import jax.numpy as jnp
@@ -88,3 +91,73 @@ def projection_hypercube(pytree: Any, scale: Any = 1.0) -> Any:
projected pytree, with the same structure as ``pytree``.
"""
return projection_box(pytree, lower=0.0, upper=scale)
+
+
+@jax.custom_jvp
+def _projection_unit_simplex(values: chex.Array) -> chex.Array:
+ """Projection onto the unit simplex."""
+ s = 1.0
+ n_features = values.shape[0]
+ u = jnp.sort(values)[::-1]
+ cumsum_u = jnp.cumsum(u)
+ ind = jnp.arange(n_features) + 1
+ cond = s / ind + (u - cumsum_u / ind) > 0
+ idx = jnp.count_nonzero(cond)
+ return jax.nn.relu(s / idx + (values - cumsum_u[idx - 1] / idx))
+
+
+@_projection_unit_simplex.defjvp
+def _projection_unit_simplex_jvp(
+ primals: list[chex.Array], tangents: list[chex.Array]
+) -> tuple[chex.Array, chex.Array]:
+ values, = primals
+ values_dot, = tangents
+ primal_out = _projection_unit_simplex(values)
+ supp = primal_out > 0
+ card = jnp.count_nonzero(supp)
+ tangent_out = supp * values_dot - (jnp.dot(supp, values_dot) / card) * supp
+ return primal_out, tangent_out
+
+
+def projection_simplex(pytree: Any,
+ scale: chex.Numeric = 1.0) -> Any:
+ r"""Projection onto a simplex.
+
+ This function solves the following constrained optimization problem,
+ where ``p`` is the input pytree.
+
+ .. math::
+
+ \underset{p}{\text{argmin}} ~ ||x - p||_2^2 \quad \textrm{subject to} \quad
+ p \ge 0, p^\top 1 = \text{scale}
+
+ By default, the projection is onto the probability simplex (unit simplex).
+
+ Args:
+ pytree: pytree to project.
+ scale: value the projected pytree should sum to (default: 1.0).
+ Returns:
+ projected pytree, a pytree with the same structure as ``pytree``.
+
+ .. versionadded:: 0.2.3
+
+ Example:
+
+ Here is an example using a pytree::
+
+ >>> import jax.numpy as jnp
+ >>> from optax import tree_utils, projections
+ >>> pytree = {"w": jnp.array([2.5, 3.2]), "b": 0.5}
+ >>> tree_utils.tree_sum(pytree)
+ 6.2
+ >>> new_pytree = projections.projection_simplex(pytree)
+ >>> tree_utils.tree_sum(new_pytree)
+ 1.0000002
+ """
+ if scale is None:
+ scale = 1.0
+
+ values, unravel_fn = flatten_util.ravel_pytree(pytree)
+ new_values = scale * _projection_unit_simplex(values / scale)
+
+ return unravel_fn(new_values)
| diff --git a/optax/projections/_projections_test.py b/optax/projections/_projections_test.py
index 8eec26739..b71e28d1a 100644
--- a/optax/projections/_projections_test.py
+++ b/optax/projections/_projections_test.py
@@ -17,12 +17,19 @@
from absl.testing import absltest
from absl.testing import parameterized
-
import chex
+import jax
import jax.numpy as jnp
import numpy as np
-
from optax import projections as proj
+import optax.tree_utils as otu
+
+
+def projection_simplex_jacobian(projection):
+ """Theoretical expression for the Jacobian of projection_simplex."""
+ support = (projection > 0).astype(jnp.int32)
+ cardinality = jnp.count_nonzero(support)
+ return jnp.diag(support) - jnp.outer(support, support) / cardinality
class ProjectionsTest(parameterized.TestCase):
@@ -34,48 +41,49 @@ def test_projection_non_negative(self):
np.testing.assert_array_equal(proj.projection_non_negative(x), expected)
with self.subTest('with a tuple'):
- np.testing.assert_array_equal(proj.projection_non_negative((x, x)),
- (expected, expected))
+ np.testing.assert_array_equal(
+ proj.projection_non_negative((x, x)), (expected, expected)
+ )
with self.subTest('with nested pytree'):
tree_x = (-1.0, {'k1': 1.0, 'k2': (1.0, 1.0)}, 1.0)
tree_expected = (0.0, {'k1': 1.0, 'k2': (1.0, 1.0)}, 1.0)
- chex.assert_trees_all_equal(proj.projection_non_negative(tree_x),
- tree_expected)
+ chex.assert_trees_all_equal(
+ proj.projection_non_negative(tree_x), tree_expected
+ )
def test_projection_box(self):
with self.subTest('lower and upper are scalars'):
lower, upper = 0.0, 2.0
x = jnp.array([-1.0, 2.0, 3.0])
expected = jnp.array([0, 2.0, 2.0])
- np.testing.assert_array_equal(proj.projection_box(x, lower, upper),
- expected)
+ np.testing.assert_array_equal(
+ proj.projection_box(x, lower, upper), expected
+ )
with self.subTest('lower and upper values are arrays'):
lower_arr = jnp.ones(len(x)) * lower
upper_arr = jnp.ones(len(x)) * upper
- np.testing.assert_array_equal(proj.projection_box(x,
- lower_arr,
- upper_arr),
- expected)
+ np.testing.assert_array_equal(
+ proj.projection_box(x, lower_arr, upper_arr), expected
+ )
with self.subTest('lower and upper are tuples of arrays'):
lower_tuple = (lower, lower)
upper_tuple = (upper, upper)
- chex.assert_trees_all_equal(proj.projection_box((x, x),
- lower_tuple,
- upper_tuple),
- (expected, expected))
+ chex.assert_trees_all_equal(
+ proj.projection_box((x, x), lower_tuple, upper_tuple),
+ (expected, expected),
+ )
with self.subTest('lower and upper are pytrees'):
tree = (-1.0, {'k1': 2.0, 'k2': (2.0, 3.0)}, 3.0)
expected = (0.0, {'k1': 2.0, 'k2': (2.0, 2.0)}, 2.0)
lower_tree = (0.0, {'k1': 0.0, 'k2': (0.0, 0.0)}, 0.0)
upper_tree = (2.0, {'k1': 2.0, 'k2': (2.0, 2.0)}, 2.0)
- chex.assert_trees_all_equal(proj.projection_box(tree,
- lower_tree,
- upper_tree),
- expected)
+ chex.assert_trees_all_equal(
+ proj.projection_box(tree, lower_tree, upper_tree), expected
+ )
def test_projection_hypercube(self):
x = jnp.array([-1.0, 2.0, 0.5])
@@ -90,8 +98,74 @@ def test_projection_hypercube(self):
with self.subTest('with array scales'):
scales = jnp.ones(len(x)) * 0.8
- np.testing.assert_array_equal(proj.projection_hypercube(x, scales),
- expected)
+ np.testing.assert_array_equal(
+ proj.projection_hypercube(x, scales), expected
+ )
+
+ @parameterized.parameters(1.0, 0.8)
+ def test_projection_simplex_array(self, scale):
+ rng = np.random.RandomState(0)
+ x = rng.randn(50).astype(np.float32)
+ p = proj.projection_simplex(x, scale)
+
+ np.testing.assert_almost_equal(jnp.sum(p), scale, decimal=4)
+ self.assertTrue(jnp.all(0 <= p))
+ self.assertTrue(jnp.all(p <= scale))
+
+ @parameterized.parameters(1.0, 0.8)
+ def test_projection_simplex_pytree(self, scale):
+ pytree = {'w': jnp.array([2.5, 3.2]), 'b': 0.5}
+ new_pytree = proj.projection_simplex(pytree, scale)
+ np.testing.assert_almost_equal(otu.tree_sum(new_pytree), scale, decimal=4)
+
+ @parameterized.parameters(1.0, 0.8)
+ def test_projection_simplex_edge_case(self, scale):
+ p = proj.projection_simplex(jnp.array([0.0, 0.0, -jnp.inf]), scale)
+ np.testing.assert_array_almost_equal(
+ p, jnp.array([scale / 2, scale / 2, 0.0])
+ )
+
+ def test_projection_simplex_jacobian(self):
+ rng = np.random.RandomState(0)
+
+ x = rng.rand(5).astype(np.float32)
+ v = rng.randn(5).astype(np.float32)
+
+ jac_rev = jax.jacrev(proj.projection_simplex)(x)
+ jac_fwd = jax.jacfwd(proj.projection_simplex)(x)
+
+ with self.subTest('Check against theoretical expression'):
+ p = proj.projection_simplex(x)
+ jac_true = projection_simplex_jacobian(p)
+
+ np.testing.assert_array_almost_equal(jac_true, jac_fwd)
+ np.testing.assert_array_almost_equal(jac_true, jac_rev)
+
+ with self.subTest('Check against finite difference'):
+ jvp = jax.jvp(proj.projection_simplex, (x,), (v,))[1]
+ eps = 1e-4
+ jvp_finite_diff = (proj.projection_simplex(x + eps * v) -
+ proj.projection_simplex(x - eps * v)) / (2 * eps)
+ np.testing.assert_array_almost_equal(jvp, jvp_finite_diff, decimal=3)
+
+ with self.subTest('Check vector-Jacobian product'):
+ (vjp,) = jax.vjp(proj.projection_simplex, x)[1](v)
+ np.testing.assert_array_almost_equal(vjp, jnp.dot(v, jac_true))
+
+ with self.subTest('Check Jacobian-vector product'):
+ jvp = jax.jvp(proj.projection_simplex, (x,), (v,))[1]
+ np.testing.assert_array_almost_equal(jvp, jnp.dot(jac_true, v))
+
+ @parameterized.parameters(1.0, 0.8)
+ def test_projection_simplex_vmap(self, scale):
+ rng = np.random.RandomState(0)
+ x = rng.randn(3, 50).astype(np.float32)
+ scales = jnp.full(len(x), scale)
+
+ p = jax.vmap(proj.projection_simplex)(x, scales)
+ np.testing.assert_array_almost_equal(jnp.sum(p, axis=1), scales)
+ np.testing.assert_array_equal(True, 0 <= p)
+ np.testing.assert_array_equal(True, p <= scale)
if __name__ == '__main__':
| diff --git a/docs/api/projections.rst b/docs/api/projections.rst
index 1ea981c41..8d38166b4 100644
--- a/docs/api/projections.rst
+++ b/docs/api/projections.rst
@@ -34,6 +34,7 @@ Available projections
projection_box
projection_hypercube
projection_non_negative
+ projection_simplex
Projection onto a box
~~~~~~~~~~~~~~~~~~~~~
@@ -46,3 +47,7 @@ Projection onto a hypercube
Projection onto the non-negative orthant
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autofunction:: projection_non_negative
+
+Projection onto a simplex
+~~~~~~~~~~~~~~~~~~~~~~~~~
+.. autofunction:: projection_simplex
| [
{
"components": [
{
"doc": "Projection onto the unit simplex.",
"lines": [
97,
106
],
"name": "_projection_unit_simplex",
"signature": "def _projection_unit_simplex(values: chex.Array) -> chex.Array:",
"type": "function"
},
{
... | [
"optax/projections/_projections_test.py::ProjectionsTest::test_projection_simplex_array0",
"optax/projections/_projections_test.py::ProjectionsTest::test_projection_simplex_array1",
"optax/projections/_projections_test.py::ProjectionsTest::test_projection_simplex_edge_case0",
"optax/projections/_projections_t... | [
"optax/projections/_projections_test.py::ProjectionsTest::test_projection_box",
"optax/projections/_projections_test.py::ProjectionsTest::test_projection_hypercube",
"optax/projections/_projections_test.py::ProjectionsTest::test_projection_non_negative"
] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add projection_simplex.
Add projection_simplex.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in optax/projections/_projections.py]
(definition of _projection_unit_simplex:)
def _projection_unit_simplex(values: chex.Array) -> chex.Array:
"""Projection onto the unit simplex."""
(definition of _projection_unit_simplex_jvp:)
def _projection_unit_simplex_jvp( primals: list[chex.Array], tangents: list[chex.Array] ) -> tuple[chex.Array, chex.Array]:
(definition of projection_simplex:)
def projection_simplex(pytree: Any, scale: chex.Numeric = 1.0) -> Any:
"""Projection onto a simplex.
This function solves the following constrained optimization problem,
where ``p`` is the input pytree.
.. math::
\underset{p}{\text{argmin}} ~ ||x - p||_2^2 \quad \textrm{subject to} \quad
p \ge 0, p^\top 1 = \text{scale}
By default, the projection is onto the probability simplex (unit simplex).
Args:
pytree: pytree to project.
scale: value the projected pytree should sum to (default: 1.0).
Returns:
projected pytree, a pytree with the same structure as ``pytree``.
.. versionadded:: 0.2.3
Example:
Here is an example using a pytree::
>>> import jax.numpy as jnp
>>> from optax import tree_utils, projections
>>> pytree = {"w": jnp.array([2.5, 3.2]), "b": 0.5}
>>> tree_utils.tree_sum(pytree)
6.2
>>> new_pytree = projections.projection_simplex(pytree)
>>> tree_utils.tree_sum(new_pytree)
1.0000002"""
[end of new definitions in optax/projections/_projections.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 1e08bccf195ac54e7d9d766eb5e69345bf0e3230 | |
tobymao__sqlglot-3284 | 3,284 | tobymao/sqlglot | null | 46fbd8d9b7684d7c1613a264117c1bd5d6571999 | 2024-04-08T15:39:31Z | diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 702aaff6a2..6e1c2ce05e 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -2225,6 +2225,13 @@ class Lateral(UDTF):
}
+class MatchRecognizeMeasure(Expression):
+ arg_types = {
+ "this": True,
+ "window_frame": False,
+ }
+
+
class MatchRecognize(Expression):
arg_types = {
"partition_by": False,
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 1cb469aa46..6563accbd9 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -2111,6 +2111,14 @@ def ordered_sql(self, expression: exp.Ordered) -> str:
return f"{this}{sort_order}{nulls_sort_change}{with_fill}"
+ def matchrecognizemeasure_sql(self, expression: exp.MatchRecognizeMeasure) -> str:
+ window_frame = self.sql(expression, "window_frame")
+ window_frame = f"{window_frame} " if window_frame else ""
+
+ this = self.sql(expression, "this")
+
+ return f"{window_frame}{this}"
+
def matchrecognize_sql(self, expression: exp.MatchRecognize) -> str:
partition = self.partition_by_sql(expression)
order = self.sql(expression, "order")
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 5c68be0ae5..ab91576fe7 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -2738,6 +2738,13 @@ def _parse_from(
exp.From, comments=self._prev_comments, this=self._parse_table(joins=joins)
)
+ def _parse_match_recognize_measure(self) -> exp.MatchRecognizeMeasure:
+ return self.expression(
+ exp.MatchRecognizeMeasure,
+ window_frame=self._match_texts(("FINAL", "RUNNING")) and self._prev.text.upper(),
+ this=self._parse_expression(),
+ )
+
def _parse_match_recognize(self) -> t.Optional[exp.MatchRecognize]:
if not self._match(TokenType.MATCH_RECOGNIZE):
return None
@@ -2746,7 +2753,12 @@ def _parse_match_recognize(self) -> t.Optional[exp.MatchRecognize]:
partition = self._parse_partition_by()
order = self._parse_order()
- measures = self._parse_expressions() if self._match_text_seq("MEASURES") else None
+
+ measures = (
+ self._parse_csv(self._parse_match_recognize_measure)
+ if self._match_text_seq("MEASURES")
+ else None
+ )
if self._match_text_seq("ONE", "ROW", "PER", "MATCH"):
rows = exp.var("ONE ROW PER MATCH")
| diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index a41d35a0cd..a16bd993d3 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -1575,22 +1575,26 @@ def test_regexp_replace(self, logger):
)
def test_match_recognize(self):
- for row in (
- "ONE ROW PER MATCH",
- "ALL ROWS PER MATCH",
- "ALL ROWS PER MATCH SHOW EMPTY MATCHES",
- "ALL ROWS PER MATCH OMIT EMPTY MATCHES",
- "ALL ROWS PER MATCH WITH UNMATCHED ROWS",
- ):
- for after in (
- "AFTER MATCH SKIP",
- "AFTER MATCH SKIP PAST LAST ROW",
- "AFTER MATCH SKIP TO NEXT ROW",
- "AFTER MATCH SKIP TO FIRST x",
- "AFTER MATCH SKIP TO LAST x",
+ for window_frame in ("", "FINAL ", "RUNNING "):
+ for row in (
+ "ONE ROW PER MATCH",
+ "ALL ROWS PER MATCH",
+ "ALL ROWS PER MATCH SHOW EMPTY MATCHES",
+ "ALL ROWS PER MATCH OMIT EMPTY MATCHES",
+ "ALL ROWS PER MATCH WITH UNMATCHED ROWS",
):
- self.validate_identity(
- f"""SELECT
+ for after in (
+ "AFTER MATCH SKIP",
+ "AFTER MATCH SKIP PAST LAST ROW",
+ "AFTER MATCH SKIP TO NEXT ROW",
+ "AFTER MATCH SKIP TO FIRST x",
+ "AFTER MATCH SKIP TO LAST x",
+ ):
+ with self.subTest(
+ f"MATCH_RECOGNIZE with window frame {window_frame}, rows {row}, after {after}: "
+ ):
+ self.validate_identity(
+ f"""SELECT
*
FROM x
MATCH_RECOGNIZE (
@@ -1598,15 +1602,15 @@ def test_match_recognize(self):
ORDER BY
x DESC
MEASURES
- y AS b
+ {window_frame}y AS b
{row}
{after}
PATTERN (^ S1 S2*? ( {{- S3 -}} S4 )+ | PERMUTE(S1, S2){{1,2}} $)
DEFINE
x AS y
)""",
- pretty=True,
- )
+ pretty=True,
+ )
def test_show_users(self):
self.validate_identity("SHOW USERS")
| [] | [
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize"
] | [
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten",
"tests/dialects/test_snowflake.py::TestSnowflake::test_historical_data",
"tests/dialects/test_snowflake.py::Te... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat(snowflake): FINAL/RUNNING keywords in MATCH_RECOGNIZE MEASURES
Fixes #3282
The `MEASURES` subclause now supports the following syntax:
```
MEASURES {FINAL | RUNNING} <expr> AS <alias> [, ... ]*
```
Where each individual subclause is stored in a new expression `exp.MatchRecognizeMeasure`
Docs
--------
- [Snowflake MATCH_RECOGNIZE](https://docs.snowflake.com/en/sql-reference/constructs/match_recognize)
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Support FINAL/RUNNING specifier for measures in match_recognize (Snowflake)
**Fully reproducible code snippet**
```
query = """
SELECT company, price_date, price, "FINAL FIRST(AB12.price)", "FINAL LAST(AB12.price)"
FROM price_history
MATCH_RECOGNIZE (
MEASURES
FINAL FIRST(AB12.price) AS "FINAL FIRST(AB12.price)",
FINAL LAST(AB12.price) AS "FINAL LAST(AB12.price)"
ALL ROWS PER MATCH
AFTER MATCH SKIP PAST LAST ROW
)"""
output = sqlglot.parse(sql=query, dialect=sqlglot.Dialects.SNOWFLAKE)
```
You would expect it to run successfully (a `ParseError` is thrown)
```
ParseError: Expecting ). Line 6, Col: 13.
FROM price_history
MATCH_RECOGNIZE (
MEASURES
FINAL FIRST(AB12.price) AS "FINAL FIRST(AB12.price)",
LAST(AB12.price) AS "FINAL LAST(AB12.pric
```
After removing the `FINAL` specifier - the query parsing succeed
**Official Documentation**
Snowflake documentation: https://docs.snowflake.com/en/sql-reference/constructs/match_recognize
The code snippet is based on the last code example in this docs
----------
--------------------
</issues> | ceb42fabad60312699e4b15936aeebac00e22e4d | |
google-deepmind__optax-911 | 911 | google-deepmind/optax | null | da6882af7e65f20c904e7acdb844a0d09570232f | 2024-04-07T06:56:14Z | diff --git a/docs/api/contrib.rst b/docs/api/contrib.rst
index 8c6a2c021..f166de349 100644
--- a/docs/api/contrib.rst
+++ b/docs/api/contrib.rst
@@ -28,6 +28,9 @@ Experimental features and algorithms that don't meet the
ProdigyState
sam
SAMState
+ schedule_free
+ schedule_free_eval_params
+ ScheduleFreeState
split_real_and_imaginary
SplitRealAndImaginaryState
@@ -90,6 +93,13 @@ Prodigy
.. autoclass:: ProdigyState
:members:
+Schedule-Free
+~~~~~~~~~
+.. autofunction:: schedule_free
+.. autofunction:: schedule_free_eval_params
+.. autoclass:: ScheduleFreeState
+ :members:
+
Sharpness aware minimization
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autofunction:: sam
diff --git a/optax/_src/schedule.py b/optax/_src/schedule.py
index 842246ab3..bdff6cbbf 100644
--- a/optax/_src/schedule.py
+++ b/optax/_src/schedule.py
@@ -36,5 +36,6 @@
piecewise_interpolate_schedule = schedules.piecewise_interpolate_schedule
polynomial_schedule = schedules.polynomial_schedule
sgdr_schedule = schedules.sgdr_schedule
+warmup_constant_schedule = schedules.warmup_constant_schedule
warmup_cosine_decay_schedule = schedules.warmup_cosine_decay_schedule
warmup_exponential_decay_schedule = schedules.warmup_exponential_decay_schedule
diff --git a/optax/contrib/__init__.py b/optax/contrib/__init__.py
index ea25390f8..ab62e98e5 100644
--- a/optax/contrib/__init__.py
+++ b/optax/contrib/__init__.py
@@ -44,3 +44,6 @@
from optax.contrib._sam import NormalizeState
from optax.contrib._sam import sam
from optax.contrib._sam import SAMState
+from optax.contrib._schedule_free import schedule_free
+from optax.contrib._schedule_free import schedule_free_eval_params
+from optax.contrib._schedule_free import ScheduleFreeState
diff --git a/optax/contrib/_schedule_free.py b/optax/contrib/_schedule_free.py
new file mode 100644
index 000000000..4ba168fe6
--- /dev/null
+++ b/optax/contrib/_schedule_free.py
@@ -0,0 +1,187 @@
+# Copyright 2024 DeepMind Technologies Limited. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Schedule-Free wrapper for faster training & removes the need for lr decay."""
+
+from typing import NamedTuple, Optional
+
+import chex
+import jax
+import jax.numpy as jnp
+from optax._src import base
+
+
+class ScheduleFreeState(NamedTuple):
+ """State for schedule_free."""
+
+ b1: chex.Array
+ weight_sum: chex.Array
+ step_count: chex.Array
+ max_lr: chex.Array
+ base_optimizer_state: base.OptState
+ z: base.Params
+
+
+def schedule_free_eval_params(state: ScheduleFreeState, params: base.Params):
+ """Params for evaluation of :func:`optax.contrib.schedule_free`."""
+ return jax.tree_util.tree_map(
+ lambda yi, zi: (yi - (1.0 - state.b1) * zi) / state.b1, params, state.z
+ )
+
+
+def schedule_free(
+ base_optimizer: base.GradientTransformation,
+ learning_rate: base.ScalarOrSchedule,
+ b1: float = 0.9,
+ weight_lr_power: float = 2.0,
+ state_dtype=jnp.float32,
+) -> base.GradientTransformationExtraArgs:
+ r"""Turn base_optimizer schedule_free.
+
+ Accumulates updates returned by the base_optimizer w/o Momentum and
+ replaces the momentum of an underlying optimizer with a combination of
+ interpolation and averaging. In the case of gradient descent the update is
+
+ .. math::
+
+ \begin{align*}
+ y_{t} & = (1-\beta_1)z_{t} + \beta_1 x_{t},\\
+ z_{t+1} & =z_{t}-\gamma\nabla f(y_{t}),\\
+ x_{t+1} & =\left(1-\frac{1}{t}\right)x_{t}+\frac{1}{t}z_{t+1},
+ \end{align*}
+
+ Here :math:`x` is the sequence that evaluations of test/val loss should occur
+ at, which differs from the primary iterates :math:`z` and the gradient
+ evaluation locations :math:`y`. The updates to :math:`z` correspond to the
+ underlying optimizer, in this case a simple gradient step. Note that,
+ :math:`\beta_1` corresponds to `b1` in the code.
+
+ As the name suggests, Schedule-Free learning does not require a decreasing
+ learning rate schedule, yet typically out-performs, or at worst matches, SOTA
+ schedules such as cosine-decay and linear decay. Only two sequences need to be
+ stored at a time (the third can be computed from the other two on the fly) so
+ this method has the same memory requirements as the base optimizer (parameter
+ buffer + momentum).
+
+ In practice, authors recommend tuning :math:`\beta_1`, `warmup_steps` and
+ `peak_lr` for each problem seperately. Default for :math:`\beta_1` is 0.9 but
+ `0.95` and `0.98` may also work well. Schedule-Free can be wrapped on top of
+ any optax optimizer. At test time, the parameters should be evaluated using
+ :func:`optax.contrib.schedule_free_eval_params` as presented below.
+
+ For example, change this::
+
+ learning_rate_fn = optax.warmup_cosine_decay_schedule(peak_value=tuned_lr)
+ optimizer = optax.adam(learning_rate_fn, b1=b1)
+
+ To::
+
+ learning_rate_fn = optax.warmup_constant_schedule(peak_value=retuned_lr)
+ optimizer = optax.adam(learning_rate_fn, b1=0.)
+ optimizer = optax.contrib.schedule_free(optimizer, learning_rate_fn, b1=b1)
+ ..
+ params_for_eval = optax.contrib.schedule_free_eval_params(state, params)
+
+ Especially note that is important to switch off Momentum of the base
+ optimizer. As of Apr, 2024, schedule_free is tested with SGD and Adam.
+
+ References:
+ Defazio et al, `Schedule-Free Learning - A New Way to Train
+ <https://github.com/facebookresearch/schedule_free/tree/main>`_, 2024
+
+ Args:
+ base_optimizer: Base optimizer to compute updates from.
+ learning_rate: learning_rate schedule w/o decay but with warmup.
+ b1: beta_1 parameter in the y update.
+ weight_lr_power: we downweight the weight of averaging using this. This is
+ especially helpful in early iterations during warmup.
+ state_dtype: dtype for z sequence.
+
+ Returns:
+ A `GradientTransformationExtraArgs` with init and update functions.
+ """
+ base_optimizer = base.with_extra_args_support(base_optimizer)
+
+ def init_fn(params: base.Params) -> ScheduleFreeState:
+ z = jax.tree_util.tree_map(lambda t: t.astype(state_dtype), params)
+ return ScheduleFreeState(
+ b1=jnp.array([b1], dtype=jnp.float32),
+ weight_sum=jnp.zeros([], dtype=jnp.float32),
+ step_count=jnp.ones([], dtype=jnp.int32),
+ max_lr=jnp.zeros([], dtype=jnp.float32),
+ base_optimizer_state=base_optimizer.init(params),
+ z=z,
+ )
+
+ def update_fn(
+ grads: base.Updates,
+ state: ScheduleFreeState,
+ params: Optional[base.Params] = None,
+ **extra_args,
+ ):
+ lr = learning_rate
+ if callable(learning_rate):
+ lr = learning_rate(state.step_count)
+ max_lr = jnp.maximum(state.max_lr, lr)
+
+ next_step_count = state.step_count + 1
+
+ weight = max_lr**weight_lr_power
+ next_total_weight = state.weight_sum + weight
+ ck = weight / next_total_weight
+
+ base_updates, next_base_optimizer_state = base_optimizer.update(
+ grads,
+ state.base_optimizer_state,
+ params,
+ **extra_args,
+ )
+ z = jax.tree_util.tree_map(
+ lambda pi, ui: jnp.asarray(pi + ui).astype(jnp.asarray(pi).dtype),
+ state.z,
+ base_updates,
+ )
+
+ # Important: recompute x to both save memory and maintain accurate x seq
+ # especially if y is modified by another transform wrapped on top.
+ prev_x = jax.tree_util.tree_map(
+ lambda yi, zi: (yi - (1.0 - b1) * zi) / b1, params, state.z
+ )
+
+ x = jax.tree_util.tree_map(
+ lambda xi, zi: (1.0 - ck) * xi + ck * zi,
+ prev_x,
+ z,
+ )
+ new_params = jax.tree_util.tree_map(
+ lambda xi, zi: b1 * xi + (1.0 - b1) * zi,
+ x,
+ z,
+ )
+ updates = jax.tree_util.tree_map(
+ lambda npi, pi: npi - pi, new_params, params
+ )
+
+ next_state = ScheduleFreeState(
+ b1=jnp.array([b1], dtype=jnp.float32),
+ weight_sum=next_total_weight,
+ step_count=next_step_count,
+ max_lr=max_lr,
+ base_optimizer_state=next_base_optimizer_state,
+ z=z,
+ )
+
+ return updates, next_state
+
+ return base.GradientTransformationExtraArgs(init_fn, update_fn)
diff --git a/optax/schedules/__init__.py b/optax/schedules/__init__.py
index 12827e327..ab2a44ffe 100644
--- a/optax/schedules/__init__.py
+++ b/optax/schedules/__init__.py
@@ -32,5 +32,6 @@
from optax.schedules._schedule import piecewise_interpolate_schedule
from optax.schedules._schedule import polynomial_schedule
from optax.schedules._schedule import sgdr_schedule
+from optax.schedules._schedule import warmup_constant_schedule
from optax.schedules._schedule import warmup_cosine_decay_schedule
from optax.schedules._schedule import warmup_exponential_decay_schedule
diff --git a/optax/schedules/_schedule.py b/optax/schedules/_schedule.py
index 06f05cdd2..2891d3535 100644
--- a/optax/schedules/_schedule.py
+++ b/optax/schedules/_schedule.py
@@ -454,6 +454,29 @@ def cosine_onecycle_schedule(
int(transition_steps): 1. / (div_factor * final_div_factor)})
+def warmup_constant_schedule(
+ init_value: float,
+ peak_value: float,
+ warmup_steps: int,
+) -> base.Schedule:
+ r"""Linear warmup followed by constant schedule i.e no decay.
+
+ Args:
+ init_value: Initial value for the scalar to be annealed.
+ peak_value: Peak value for scalar to be annealed at end of warmup.
+ warmup_steps: Positive integer, the length of the linear warmup.
+
+ Returns:
+ schedule
+ A function that maps step counts to values
+ """
+ return linear_schedule(
+ init_value=init_value,
+ end_value=peak_value,
+ transition_steps=warmup_steps,
+ )
+
+
def warmup_cosine_decay_schedule(
init_value: float,
peak_value: float,
| diff --git a/optax/contrib/_schedule_free_test.py b/optax/contrib/_schedule_free_test.py
new file mode 100644
index 000000000..a9b8742c9
--- /dev/null
+++ b/optax/contrib/_schedule_free_test.py
@@ -0,0 +1,110 @@
+# Copyright 2024 DeepMind Technologies Limited. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for `_schedule_free.py`."""
+
+from absl.testing import absltest
+from absl.testing import parameterized
+import chex
+import jax
+import jax.numpy as jnp
+import numpy as np
+from optax._src import alias
+from optax._src import numerics
+from optax._src import schedule
+from optax._src import update
+from optax.contrib import _schedule_free
+from optax.tree_utils import _state_utils
+
+
+_WARM_LR = schedule.warmup_constant_schedule(0.0, 1e-2, 5_000)
+
+# TODO(harshm): try other optimizers with schedule_free.
+_OPTIMIZERS_UNDER_TEST = (
+ dict(opt_name='sgd', opt_kwargs=dict(learning_rate=_WARM_LR, momentum=0.0)),
+ dict(opt_name='adam', opt_kwargs=dict(learning_rate=_WARM_LR, b1=0.0)),
+ dict(opt_name='adamw', opt_kwargs=dict(learning_rate=_WARM_LR, b1=0.0)),
+)
+
+
+def _setup_parabola(dtype):
+ """Quadratic function as an optimization target."""
+ initial_params = jnp.array([-1.0, 10.0, 1.0], dtype=dtype)
+ final_params = jnp.array([1.0, -1.0, 1.0], dtype=dtype)
+
+ @jax.grad
+ def get_updates(params):
+ return jnp.sum(numerics.abs_sq(params - final_params))
+
+ return initial_params, final_params, get_updates
+
+
+def _setup_rosenbrock(dtype):
+ """Rosenbrock function as an optimization target."""
+ a = 1.0
+ b = 100.0
+
+ initial_params = jnp.array([0.0, 0.0], dtype=dtype)
+ final_params = jnp.array([a, a**2], dtype=dtype)
+
+ @jax.grad
+ def get_updates(params):
+ return numerics.abs_sq(a - params[0]) + b * numerics.abs_sq(
+ params[1] - params[0] ** 2
+ )
+
+ return initial_params, final_params, get_updates
+
+
+class ScheduleFreeTest(chex.TestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.grads = {'x': np.array(2.0), 'y': np.array(-2.0)}
+ self.initial_params = {'x': np.array(3.0), 'y': np.array(-3.0)}
+
+ @parameterized.product(
+ _OPTIMIZERS_UNDER_TEST,
+ target=(_setup_parabola, _setup_rosenbrock),
+ dtype=(jnp.float32,),
+ )
+ def test_optimization(self, opt_name, opt_kwargs, target, dtype):
+
+ opt = getattr(alias, opt_name)(**opt_kwargs)
+ opt = _schedule_free.schedule_free(
+ opt,
+ learning_rate=_WARM_LR,
+ )
+ initial_params, final_params, get_updates = target(dtype)
+
+ @jax.jit
+ def step(params, state):
+ updates = get_updates(params)
+ updates, state = opt.update(updates, state, params)
+ params = update.apply_updates(params, updates)
+ return params, state
+
+ params = initial_params
+ state = opt.init(params)
+ # A no-op change, to verify that tree map works.
+ state = _state_utils.tree_map_params(opt, lambda v: v, state)
+
+ for _ in range(25000):
+ params, state = step(params, state)
+
+ chex.assert_trees_all_close(params, final_params, rtol=3e-2, atol=3e-2)
+
+
+if __name__ == '__main__':
+ absltest.main()
| diff --git a/docs/api/contrib.rst b/docs/api/contrib.rst
index 8c6a2c021..f166de349 100644
--- a/docs/api/contrib.rst
+++ b/docs/api/contrib.rst
@@ -28,6 +28,9 @@ Experimental features and algorithms that don't meet the
ProdigyState
sam
SAMState
+ schedule_free
+ schedule_free_eval_params
+ ScheduleFreeState
split_real_and_imaginary
SplitRealAndImaginaryState
@@ -90,6 +93,13 @@ Prodigy
.. autoclass:: ProdigyState
:members:
+Schedule-Free
+~~~~~~~~~
+.. autofunction:: schedule_free
+.. autofunction:: schedule_free_eval_params
+.. autoclass:: ScheduleFreeState
+ :members:
+
Sharpness aware minimization
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autofunction:: sam
| [
{
"components": [
{
"doc": "State for schedule_free.",
"lines": [
25,
33
],
"name": "ScheduleFreeState",
"signature": "class ScheduleFreeState(NamedTuple):",
"type": "class"
},
{
"doc": "Params for evaluation of :func:... | [
"optax/contrib/_schedule_free_test.py::ScheduleFreeTest::test_optimization0",
"optax/contrib/_schedule_free_test.py::ScheduleFreeTest::test_optimization1",
"optax/contrib/_schedule_free_test.py::ScheduleFreeTest::test_optimization2",
"optax/contrib/_schedule_free_test.py::ScheduleFreeTest::test_optimization3"... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Port schedule_free optimizer to optax. Original pytorch repo: https://github.com/facebookresearch/schedule_free
Port schedule_free optimizer to optax. Original pytorch repo: https://github.com/facebookresearch/schedule_free
Also add warmup_constant_schedule, which we recommend using with schedule_free
----------
Nice! Thanks for this! One thing that should be mentioned in the docs is that the loss should be evaluated at the `opt_state.x`, not the `params`. (Not sure how much this matters in practice). Otherwise, this looks good to me.
As it currently stands I don't think this will work seamlessly with equinox (https://github.com/patrick-kidger/equinox) which is widely used amongst JAX community.
> As it currently stands I don't think this will work seamlessly with equinox (https://github.com/patrick-kidger/equinox) which is widely used amongst JAX community.
@adam-hartshorne Can you explain why this does not work and maybe for further reference explain what are key criterions for integration of optimizers in equinox? Thank you!
Equinox and the new experimental version of Flax* are both models which try to enable a more pythonic / class based approach (rather than the JAX purely functional / PyTree paradigm). Thus you are encouraged to define classes which are instantiated as a "model" object. They ultimately attempt to return everything as a normal PyTree, but it does require some additional handling of things like static variables and traversing PyTrees. Thus you end up with this design pattern of split / combine / filter. To ease this, they also have special methods for updating during optimisation, which handles all this.
Here is a simple example
https://github.com/patrick-kidger/equinox/blob/main/examples/train_rnn.ipynb
As you can see it requires the use of decorators, @eqx.filter_value_and_grad, @eqx.filter_jit and the updates to the params are applied using eqx.apply_updates.
Now looking at your code the update_fn hard codes the use of optax_update and jax.tree_util.tree_map to the parameters. I don't believe this will be compatible with all models that have been built by inheriting from eqx.Module classes.
*there are a number of other attempts at this.
Thanks for the summary @adam-hartshorne.
I'm not sure I understand though:
(i) this optimizer will still return updates that will be added to the optimizer with the library of your choice. All operations are done on pytrees of the form of updates (grads) that are normally handled by e.g. equinox (after all equinox uses optax too so it also works on updates/params that are usual pytrees without functions). No "model" is given here, nor operations that would call the model.
(ii) the issue here would be in the definition of params in the update function (is it model or is it the params of the model?). This issue, if it exists, should not be new: numerous optimizers have the optional "params" argument like lookahead for example.
That said:
- could you provide a minimum working example to showcase the failures you mention for me to understand better the bug you're pointing out?
- the apply_updates of optax could easily be replaced by the one of equinox to ensure compatibility and ease of use.
fwiw, I've ran into problems with mu_dtype=bf16, at least with small initializations for transformers (0.01 std normal). Haven't tried keeping only x or only z in bfloat16 though, maybe keeping only one in bfloat16 still works in which case it might make sense to pass in separate dtype args for each of them. Also, should x and z be cast back to state_dtype before creating next_state, and state_dtype be canonicalized near the top?
Edit: To clarify problems I run into with bf16 state, it seemingly stalls training altogether and progresses only very slowly.
Edit2 Seems my issue is coming from something other than dtype, will have to do more testing, works differently than [ameya98's implementation](https://gist.github.com/ameya98/7f103501714f4d2fdc0cb793579648d9) but haven't taken time to find differences in the code and against fb research's repo. It does seem very sensitive to dtype, though, vs. regular momentum which most have become accustomed to keeping in bf16.
Any chance this could be merged soon? Wanted to experiment with it!
This is almost good to go internally, I think we are reaching the last round of reviews and it should be merged (so probably early next week).
Hi, any update on this?
I just pinged the author. Sorry for the delay
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in optax/contrib/_schedule_free.py]
(definition of ScheduleFreeState:)
class ScheduleFreeState(NamedTuple):
"""State for schedule_free."""
(definition of schedule_free_eval_params:)
def schedule_free_eval_params(state: ScheduleFreeState, params: base.Params):
"""Params for evaluation of :func:`optax.contrib.schedule_free`."""
(definition of schedule_free:)
def schedule_free( base_optimizer: base.GradientTransformation, learning_rate: base.ScalarOrSchedule, b1: float = 0.9, weight_lr_power: float = 2.0, state_dtype=jnp.float32, ) -> base.GradientTransformationExtraArgs:
"""Turn base_optimizer schedule_free.
Accumulates updates returned by the base_optimizer w/o Momentum and
replaces the momentum of an underlying optimizer with a combination of
interpolation and averaging. In the case of gradient descent the update is
.. math::
\begin{align*}
y_{t} & = (1-\beta_1)z_{t} + \beta_1 x_{t},\\
z_{t+1} & =z_{t}-\gamma\nabla f(y_{t}),\\
x_{t+1} & =\left(1-\frac{1}{t}\right)x_{t}+\frac{1}{t}z_{t+1},
\end{align*}
Here :math:`x` is the sequence that evaluations of test/val loss should occur
at, which differs from the primary iterates :math:`z` and the gradient
evaluation locations :math:`y`. The updates to :math:`z` correspond to the
underlying optimizer, in this case a simple gradient step. Note that,
:math:`\beta_1` corresponds to `b1` in the code.
As the name suggests, Schedule-Free learning does not require a decreasing
learning rate schedule, yet typically out-performs, or at worst matches, SOTA
schedules such as cosine-decay and linear decay. Only two sequences need to be
stored at a time (the third can be computed from the other two on the fly) so
this method has the same memory requirements as the base optimizer (parameter
buffer + momentum).
In practice, authors recommend tuning :math:`\beta_1`, `warmup_steps` and
`peak_lr` for each problem seperately. Default for :math:`\beta_1` is 0.9 but
`0.95` and `0.98` may also work well. Schedule-Free can be wrapped on top of
any optax optimizer. At test time, the parameters should be evaluated using
:func:`optax.contrib.schedule_free_eval_params` as presented below.
For example, change this::
learning_rate_fn = optax.warmup_cosine_decay_schedule(peak_value=tuned_lr)
optimizer = optax.adam(learning_rate_fn, b1=b1)
To::
learning_rate_fn = optax.warmup_constant_schedule(peak_value=retuned_lr)
optimizer = optax.adam(learning_rate_fn, b1=0.)
optimizer = optax.contrib.schedule_free(optimizer, learning_rate_fn, b1=b1)
..
params_for_eval = optax.contrib.schedule_free_eval_params(state, params)
Especially note that is important to switch off Momentum of the base
optimizer. As of Apr, 2024, schedule_free is tested with SGD and Adam.
References:
Defazio et al, `Schedule-Free Learning - A New Way to Train
<https://github.com/facebookresearch/schedule_free/tree/main>`_, 2024
Args:
base_optimizer: Base optimizer to compute updates from.
learning_rate: learning_rate schedule w/o decay but with warmup.
b1: beta_1 parameter in the y update.
weight_lr_power: we downweight the weight of averaging using this. This is
especially helpful in early iterations during warmup.
state_dtype: dtype for z sequence.
Returns:
A `GradientTransformationExtraArgs` with init and update functions."""
(definition of schedule_free.init_fn:)
def init_fn(params: base.Params) -> ScheduleFreeState:
(definition of schedule_free.update_fn:)
def update_fn( grads: base.Updates, state: ScheduleFreeState, params: Optional[base.Params] = None, **extra_args, ):
[end of new definitions in optax/contrib/_schedule_free.py]
[start of new definitions in optax/schedules/_schedule.py]
(definition of warmup_constant_schedule:)
def warmup_constant_schedule( init_value: float, peak_value: float, warmup_steps: int, ) -> base.Schedule:
"""Linear warmup followed by constant schedule i.e no decay.
Args:
init_value: Initial value for the scalar to be annealed.
peak_value: Peak value for scalar to be annealed at end of warmup.
warmup_steps: Positive integer, the length of the linear warmup.
Returns:
schedule
A function that maps step counts to values"""
[end of new definitions in optax/schedules/_schedule.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 1e08bccf195ac54e7d9d766eb5e69345bf0e3230 | |
tobymao__sqlglot-3277 | 3,277 | tobymao/sqlglot | null | 08222c2c626353be108347b95644660fe04dfcd1 | 2024-04-04T20:43:32Z | diff --git a/sqlglot/dialects/bigquery.py b/sqlglot/dialects/bigquery.py
index 2167ba29a1..dbe90b045f 100644
--- a/sqlglot/dialects/bigquery.py
+++ b/sqlglot/dialects/bigquery.py
@@ -15,7 +15,7 @@
build_formatted_time,
filter_array_using_unnest,
if_sql,
- inline_array_sql,
+ inline_array_unless_query,
max_or_greatest,
min_or_least,
no_ilike_sql,
@@ -576,6 +576,7 @@ class Generator(generator.Generator):
exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
exp.ArgMax: arg_max_or_min_no_count("MAX_BY"),
exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
+ exp.Array: inline_array_unless_query,
exp.ArrayContains: _array_contains_sql,
exp.ArrayFilter: filter_array_using_unnest,
exp.ArraySize: rename_func("ARRAY_LENGTH"),
@@ -843,13 +844,6 @@ def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
def trycast_sql(self, expression: exp.TryCast) -> str:
return self.cast_sql(expression, safe_prefix="SAFE_")
- def array_sql(self, expression: exp.Array) -> str:
- first_arg = seq_get(expression.expressions, 0)
- if isinstance(first_arg, exp.Query):
- return f"ARRAY{self.wrap(self.sql(first_arg))}"
-
- return inline_array_sql(self, expression)
-
def bracket_sql(self, expression: exp.Bracket) -> str:
this = expression.this
expressions = expression.expressions
diff --git a/sqlglot/dialects/dialect.py b/sqlglot/dialects/dialect.py
index 81057c2695..1e4cfeba09 100644
--- a/sqlglot/dialects/dialect.py
+++ b/sqlglot/dialects/dialect.py
@@ -571,6 +571,13 @@ def inline_array_sql(self: Generator, expression: exp.Array) -> str:
return f"[{self.expressions(expression, flat=True)}]"
+def inline_array_unless_query(self: Generator, expression: exp.Array) -> str:
+ elem = seq_get(expression.expressions, 0)
+ if isinstance(elem, exp.Expression) and elem.find(exp.Query):
+ return self.func("ARRAY", elem)
+ return inline_array_sql(self, expression)
+
+
def no_ilike_sql(self: Generator, expression: exp.ILike) -> str:
return self.like_sql(
exp.Like(this=exp.Lower(this=expression.this), expression=expression.expression)
diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py
index 6a1d07a18d..3b0651f859 100644
--- a/sqlglot/dialects/duckdb.py
+++ b/sqlglot/dialects/duckdb.py
@@ -15,7 +15,7 @@
datestrtodate_sql,
encode_decode_sql,
build_formatted_time,
- inline_array_sql,
+ inline_array_unless_query,
no_comment_column_constraint_sql,
no_safe_divide_sql,
no_timestamp_sql,
@@ -312,6 +312,15 @@ class Parser(parser.Parser):
),
}
+ def _parse_bracket(
+ self, this: t.Optional[exp.Expression] = None
+ ) -> t.Optional[exp.Expression]:
+ bracket = super()._parse_bracket(this)
+ if isinstance(bracket, exp.Bracket):
+ bracket.set("returns_list_for_maps", True)
+
+ return bracket
+
def _parse_map(self) -> exp.ToMap | exp.Map:
if self._match(TokenType.L_BRACE, advance=False):
return self.expression(exp.ToMap, this=self._parse_bracket())
@@ -370,11 +379,7 @@ class Generator(generator.Generator):
TRANSFORMS = {
**generator.Generator.TRANSFORMS,
exp.ApproxDistinct: approx_count_distinct_sql,
- exp.Array: lambda self, e: (
- self.func("ARRAY", e.expressions[0])
- if e.expressions and e.expressions[0].find(exp.Select)
- else inline_array_sql(self, e)
- ),
+ exp.Array: inline_array_unless_query,
exp.ArrayFilter: rename_func("LIST_FILTER"),
exp.ArraySize: rename_func("ARRAY_LENGTH"),
exp.ArgMax: arg_max_or_min_no_count("ARG_MAX"),
@@ -593,7 +598,19 @@ def generateseries_sql(self, expression: exp.GenerateSeries) -> str:
return super().generateseries_sql(expression)
def bracket_sql(self, expression: exp.Bracket) -> str:
- if isinstance(expression.this, exp.Array):
- expression.this.replace(exp.paren(expression.this))
+ this = expression.this
+ if isinstance(this, exp.Array):
+ this.replace(exp.paren(this))
+
+ bracket = super().bracket_sql(expression)
+
+ if not expression.args.get("returns_list_for_maps"):
+ if not this.type:
+ from sqlglot.optimizer.annotate_types import annotate_types
+
+ this = annotate_types(this)
+
+ if this.is_type(exp.DataType.Type.MAP):
+ bracket = f"({bracket})[1]"
- return super().bracket_sql(expression)
+ return bracket
diff --git a/sqlglot/dialects/spark.py b/sqlglot/dialects/spark.py
index 88b5ddc4a9..9bb9a5cb97 100644
--- a/sqlglot/dialects/spark.py
+++ b/sqlglot/dialects/spark.py
@@ -6,7 +6,7 @@
from sqlglot.dialects.dialect import rename_func, unit_to_var
from sqlglot.dialects.hive import _build_with_ignore_nulls
from sqlglot.dialects.spark2 import Spark2, temporary_storage_provider
-from sqlglot.helper import seq_get
+from sqlglot.helper import ensure_list, seq_get
from sqlglot.transforms import (
ctas_with_tmp_tables_to_create_tmp_view,
remove_unique_constraints,
@@ -63,6 +63,9 @@ class Parser(Spark2.Parser):
**Spark2.Parser.FUNCTIONS,
"ANY_VALUE": _build_with_ignore_nulls(exp.AnyValue),
"DATEDIFF": _build_datediff,
+ "TRY_ELEMENT_AT": lambda args: exp.Bracket(
+ this=seq_get(args, 0), expressions=ensure_list(seq_get(args, 1)), safe=True
+ ),
}
def _parse_generated_as_identity(
@@ -112,6 +115,13 @@ class Generator(Spark2.Generator):
TRANSFORMS.pop(exp.DateDiff)
TRANSFORMS.pop(exp.Group)
+ def bracket_sql(self, expression: exp.Bracket) -> str:
+ if expression.args.get("safe"):
+ key = seq_get(self.bracket_offset_expressions(expression), 0)
+ return self.func("TRY_ELEMENT_AT", expression.this, key)
+
+ return super().bracket_sql(expression)
+
def computedcolumnconstraint_sql(self, expression: exp.ComputedColumnConstraint) -> str:
return f"GENERATED ALWAYS AS ({self.sql(expression, 'this')})"
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index e79c04bd8d..38bfc91a03 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -4395,7 +4395,13 @@ class Between(Predicate):
class Bracket(Condition):
# https://cloud.google.com/bigquery/docs/reference/standard-sql/operators#array_subscript_operator
- arg_types = {"this": True, "expressions": True, "offset": False, "safe": False}
+ arg_types = {
+ "this": True,
+ "expressions": True,
+ "offset": False,
+ "safe": False,
+ "returns_list_for_maps": False,
+ }
@property
def output_name(self) -> str:
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 76d9b5d65f..df0929655b 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -2412,12 +2412,15 @@ def between_sql(self, expression: exp.Between) -> str:
high = self.sql(expression, "high")
return f"{this} BETWEEN {low} AND {high}"
- def bracket_sql(self, expression: exp.Bracket) -> str:
- expressions = apply_index_offset(
+ def bracket_offset_expressions(self, expression: exp.Bracket) -> t.List[exp.Expression]:
+ return apply_index_offset(
expression.this,
expression.expressions,
self.dialect.INDEX_OFFSET - expression.args.get("offset", 0),
)
+
+ def bracket_sql(self, expression: exp.Bracket) -> str:
+ expressions = self.bracket_offset_expressions(expression)
expressions_sql = ", ".join(self.sql(e) for e in expressions)
return f"{self.sql(expression, 'this')}[{expressions_sql}]"
| diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py
index 5a7e93e1fc..0b13a7042a 100644
--- a/tests/dialects/test_duckdb.py
+++ b/tests/dialects/test_duckdb.py
@@ -240,6 +240,7 @@ def test_duckdb(self):
self.validate_identity("SELECT MAP(['key1', 'key2', 'key3'], [10, 20, 30])")
self.validate_identity("SELECT MAP {'x': 1}")
+ self.validate_identity("SELECT (MAP {'x': 1})['x']")
self.validate_identity("SELECT df1.*, df2.* FROM df1 POSITIONAL JOIN df2")
self.validate_identity("MAKE_TIMESTAMP(1992, 9, 20, 13, 34, 27.123456)")
self.validate_identity("MAKE_TIMESTAMP(1667810584123456)")
diff --git a/tests/dialects/test_spark.py b/tests/dialects/test_spark.py
index 18f1fb732a..d2285e0565 100644
--- a/tests/dialects/test_spark.py
+++ b/tests/dialects/test_spark.py
@@ -2,6 +2,7 @@
from sqlglot import exp, parse_one
from sqlglot.dialects.dialect import Dialects
+from sqlglot.helper import logger as helper_logger
from tests.dialects.test_dialect import Validator
@@ -223,17 +224,16 @@ def test_hint(self, logger):
)
def test_spark(self):
- self.validate_identity("any_value(col, true)", "ANY_VALUE(col) IGNORE NULLS")
- self.validate_identity("first(col, true)", "FIRST(col) IGNORE NULLS")
- self.validate_identity("first_value(col, true)", "FIRST_VALUE(col) IGNORE NULLS")
- self.validate_identity("last(col, true)", "LAST(col) IGNORE NULLS")
- self.validate_identity("last_value(col, true)", "LAST_VALUE(col) IGNORE NULLS")
-
self.assertEqual(
parse_one("REFRESH TABLE t", read="spark").assert_is(exp.Refresh).sql(dialect="spark"),
"REFRESH TABLE t",
)
+ self.validate_identity("any_value(col, true)", "ANY_VALUE(col) IGNORE NULLS")
+ self.validate_identity("first(col, true)", "FIRST(col) IGNORE NULLS")
+ self.validate_identity("first_value(col, true)", "FIRST_VALUE(col) IGNORE NULLS")
+ self.validate_identity("last(col, true)", "LAST(col) IGNORE NULLS")
+ self.validate_identity("last_value(col, true)", "LAST_VALUE(col) IGNORE NULLS")
self.validate_identity("DESCRIBE EXTENDED db.table")
self.validate_identity("SELECT * FROM test TABLESAMPLE (50 PERCENT)")
self.validate_identity("SELECT * FROM test TABLESAMPLE (5 ROWS)")
@@ -284,6 +284,30 @@ def test_spark(self):
"SELECT STR_TO_MAP('a:1,b:2,c:3', ',', ':')",
)
+ with self.assertLogs(helper_logger):
+ self.validate_all(
+ "SELECT TRY_ELEMENT_AT(ARRAY(1, 2, 3), 2)",
+ read={
+ "databricks": "SELECT TRY_ELEMENT_AT(ARRAY(1, 2, 3), 2)",
+ },
+ write={
+ "databricks": "SELECT TRY_ELEMENT_AT(ARRAY(1, 2, 3), 2)",
+ "duckdb": "SELECT ([1, 2, 3])[3]",
+ "spark": "SELECT TRY_ELEMENT_AT(ARRAY(1, 2, 3), 2)",
+ },
+ )
+
+ self.validate_all(
+ "SELECT TRY_ELEMENT_AT(MAP(1, 'a', 2, 'b'), 2)",
+ read={
+ "databricks": "SELECT TRY_ELEMENT_AT(MAP(1, 'a', 2, 'b'), 2)",
+ },
+ write={
+ "databricks": "SELECT TRY_ELEMENT_AT(MAP(1, 'a', 2, 'b'), 2)",
+ "duckdb": "SELECT (MAP([1, 2], ['a', 'b'])[2])[1]",
+ "spark": "SELECT TRY_ELEMENT_AT(MAP(1, 'a', 2, 'b'), 2)",
+ },
+ )
self.validate_all(
"SELECT SPLIT('123|789', '\\\\|')",
read={
| [] | [
"tests/dialects/test_spark.py::TestSpark::test_spark"
] | [
"tests/dialects/test_duckdb.py::TestDuckDB::test_array",
"tests/dialects/test_duckdb.py::TestDuckDB::test_array_index",
"tests/dialects/test_duckdb.py::TestDuckDB::test_bool_or",
"tests/dialects/test_duckdb.py::TestDuckDB::test_cast",
"tests/dialects/test_duckdb.py::TestDuckDB::test_duckdb",
"tests/dialec... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Feat!: transpile map retrieval to duckdb, transpile TRY_ELEMENT_AT
DuckDB says the following for [retrieving values from maps](https://duckdb.org/docs/sql/data_types/map#retrieving-from-maps):
> MAPs use bracket notation for retrieving values. Selecting from a MAP returns a LIST rather than an individual value, with an empty LIST meaning that the key was not found.
This makes it tricky to transpile such retrievals to / from it, because we need to either unwrap / wrap the retrieved value, respectively.
This PR:
- implements a best-effort approach to handle the `-> DuckDB` direction of the transpilation
- facilitates the transpilation of Spark's `TRY_ELEMENT_AT` function
One side effect of this change is that when constructing `Bracket` nodes for DuckDB _manually_, it will also be required to set `returns_list_for_maps` to `True`, otherwise we'll generate incorrect SQL when targeting DuckDB.
References:
- https://spark.apache.org/docs/latest/api/sql/index.html#try_element_at
- https://docs.databricks.com/en/sql/language-manual/functions/try_element_at.html
- https://duckdb.org/docs/sql/data_types/map#retrieving-from-maps
- https://duckdb.org/docs/sql/data_types/array.html
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | ceb42fabad60312699e4b15936aeebac00e22e4d | ||
deepset-ai__haystack-7468 | 7,468 | deepset-ai/haystack | null | 12acb3f12e2ab92ff61534f55c2cb65dc64e382b | 2024-04-04T10:56:58Z | diff --git a/haystack/components/evaluators/document_mrr.py b/haystack/components/evaluators/document_mrr.py
new file mode 100644
index 0000000000..d0194902ac
--- /dev/null
+++ b/haystack/components/evaluators/document_mrr.py
@@ -0,0 +1,79 @@
+from typing import Any, Dict, List
+
+from haystack import Document, component
+
+
+@component
+class DocumentMeanReciprocalRank:
+ """
+ Evaluator that calculates the mean reciprocal rank of the retrieved documents.
+
+ MRR measures how high the first retrieved document is ranked.
+ Each question can have multiple ground truth documents and multiple retrieved documents.
+
+ `DocumentMeanReciprocalRank` doesn't normalize its inputs, the `DocumentCleaner` component
+ should be used to clean and normalize the documents before passing them to this evaluator.
+
+ Usage example:
+ ```python
+ from haystack.components.evaluators import AnswerExactMatchEvaluator
+ evaluator = DocumentMeanReciprocalRank()
+ result = evaluator.run(
+ ground_truth_documents=[
+ [Document(content="France")],
+ [Document(content="9th century"), Document(content="9th")],
+ ],
+ retrieved_documents=[
+ [Document(content="France")],
+ [Document(content="9th century"), Document(content="10th century"), Document(content="9th")],
+ ],
+ )
+ print(result["individual_scores"])
+ # [1.0, 0.8333333333333333]
+ print(result["score"])
+ # 0.9166666666666666
+ ```
+ """
+
+ @component.output_types(score=float, individual_scores=List[float])
+ def run(
+ self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]
+ ) -> Dict[str, Any]:
+ """
+ Run the DocumentMeanReciprocalRank on the given inputs.
+
+ `ground_truth_documents` and `retrieved_documents` must have the same length.
+
+ :param ground_truth_documents:
+ A list of expected documents for each question.
+ :param retrieved_documents:
+ A list of retrieved documents for each question.
+ :returns:
+ A dictionary with the following outputs:
+ - `score` - The average of calculated scores.
+ - `invididual_scores` - A list of numbers from 0.0 to 1.0 that represents how high the first retrieved document is ranked.
+ """
+ if len(ground_truth_documents) != len(retrieved_documents):
+ msg = "The length of ground_truth_documents and retrieved_documents must be the same."
+ raise ValueError(msg)
+
+ individual_scores = []
+
+ for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):
+ score = 0.0
+ for ground_document in ground_truth:
+ if ground_document.content is None:
+ continue
+
+ for rank, retrieved_document in enumerate(retrieved):
+ if retrieved_document.content is None:
+ continue
+
+ if ground_document.content in retrieved_document.content:
+ score = 1 / (rank + 1)
+ break
+ individual_scores.append(score)
+
+ score = sum(individual_scores) / len(retrieved_documents)
+
+ return {"score": score, "individual_scores": individual_scores}
diff --git a/releasenotes/notes/document-mrr-evaluator-fa7c266cc91201a7.yaml b/releasenotes/notes/document-mrr-evaluator-fa7c266cc91201a7.yaml
new file mode 100644
index 0000000000..7e56e9489f
--- /dev/null
+++ b/releasenotes/notes/document-mrr-evaluator-fa7c266cc91201a7.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Add DocumentMeanReciprocalRank, it can be used to calculate mean reciprocal rank of retrieved documents.
| diff --git a/test/components/evaluators/test_document_mrr.py b/test/components/evaluators/test_document_mrr.py
new file mode 100644
index 0000000000..959492c648
--- /dev/null
+++ b/test/components/evaluators/test_document_mrr.py
@@ -0,0 +1,82 @@
+import pytest
+
+from haystack import Document
+from haystack.components.evaluators.document_mrr import DocumentMeanReciprocalRank
+
+
+def test_run_with_all_matching():
+ evaluator = DocumentMeanReciprocalRank()
+ result = evaluator.run(
+ ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ retrieved_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ )
+
+ assert result == {"individual_scores": [1.0, 1.0], "score": 1.0}
+
+
+def test_run_with_no_matching():
+ evaluator = DocumentMeanReciprocalRank()
+ result = evaluator.run(
+ ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ retrieved_documents=[[Document(content="Paris")], [Document(content="London")]],
+ )
+
+ assert result == {"individual_scores": [0.0, 0.0], "score": 0.0}
+
+
+def test_run_with_partial_matching():
+ evaluator = DocumentMeanReciprocalRank()
+ result = evaluator.run(
+ ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ retrieved_documents=[[Document(content="Berlin")], [Document(content="London")]],
+ )
+
+ assert result == {"individual_scores": [1.0, 0.0], "score": 0.5}
+
+
+def test_run_with_complex_data():
+ evaluator = DocumentMeanReciprocalRank()
+ result = evaluator.run(
+ ground_truth_documents=[
+ [Document(content="France")],
+ [Document(content="9th century"), Document(content="9th")],
+ [Document(content="classical music"), Document(content="classical")],
+ [Document(content="11th century"), Document(content="the 11th")],
+ [Document(content="Denmark, Iceland and Norway")],
+ [Document(content="10th century"), Document(content="10th")],
+ ],
+ retrieved_documents=[
+ [Document(content="France")],
+ [Document(content="10th century"), Document(content="9th century"), Document(content="9th")],
+ [Document(content="rock music"), Document(content="dubstep"), Document(content="classical")],
+ [Document(content="11th"), Document(content="the 11th"), Document(content="11th century")],
+ [Document(content="Denmark"), Document(content="Norway"), Document(content="Iceland")],
+ [
+ Document(content="10th century"),
+ Document(content="the first half of the 10th century"),
+ Document(content="10th"),
+ Document(content="10th"),
+ ],
+ ],
+ )
+
+ assert result == {
+ "individual_scores": [1.0, 0.5, 0.3333333333333333, 0.5, 0.0, 1.0],
+ "score": pytest.approx(0.555555555555555),
+ }
+
+
+def test_run_with_different_lengths():
+ with pytest.raises(ValueError):
+ evaluator = DocumentMeanReciprocalRank()
+ evaluator.run(
+ ground_truth_documents=[[Document(content="Berlin")]],
+ retrieved_documents=[[Document(content="Berlin")], [Document(content="London")]],
+ )
+
+ with pytest.raises(ValueError):
+ evaluator = DocumentMeanReciprocalRank()
+ evaluator.run(
+ ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ retrieved_documents=[[Document(content="Berlin")]],
+ )
| diff --git a/releasenotes/notes/document-mrr-evaluator-fa7c266cc91201a7.yaml b/releasenotes/notes/document-mrr-evaluator-fa7c266cc91201a7.yaml
new file mode 100644
index 0000000000..7e56e9489f
--- /dev/null
+++ b/releasenotes/notes/document-mrr-evaluator-fa7c266cc91201a7.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Add DocumentMeanReciprocalRank, it can be used to calculate mean reciprocal rank of retrieved documents.
| [
{
"components": [
{
"doc": "Evaluator that calculates the mean reciprocal rank of the retrieved documents.\n\nMRR measures how high the first retrieved document is ranked.\nEach question can have multiple ground truth documents and multiple retrieved documents.\n\n`DocumentMeanReciprocalRank` does... | [
"test/components/evaluators/test_document_mrr.py::test_run_with_all_matching",
"test/components/evaluators/test_document_mrr.py::test_run_with_no_matching",
"test/components/evaluators/test_document_mrr.py::test_run_with_partial_matching",
"test/components/evaluators/test_document_mrr.py::test_run_with_comple... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat: Add `DocumentMeanReciprocalRank`
### Related Issues
- fixes #6065
### Proposed Changes:
Add `DocumentMeanReciprocalRank` Component to calculate Mean Reciprocal Rank of a retrieved documents given a list of ground truth documents.
### How did you test it?
I added unit tests.
### Notes for the reviewer
N/A
### Checklist
- I have read the [contributors guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md) and the [code of conduct](https://github.com/deepset-ai/haystack/blob/main/code_of_conduct.txt)
- I have updated the related issue with new insights and changes
- I added unit tests and updated the docstrings
- I've used one of the [conventional commit types](https://www.conventionalcommits.org/en/v1.0.0/) for my PR title: `fix:`, `feat:`, `build:`, `chore:`, `ci:`, `docs:`, `style:`, `refactor:`, `perf:`, `test:`.
- I documented my code
- I ran [pre-commit hooks](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md#installation) and fixed any issue
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/components/evaluators/document_mrr.py]
(definition of DocumentMeanReciprocalRank:)
class DocumentMeanReciprocalRank:
"""Evaluator that calculates the mean reciprocal rank of the retrieved documents.
MRR measures how high the first retrieved document is ranked.
Each question can have multiple ground truth documents and multiple retrieved documents.
`DocumentMeanReciprocalRank` doesn't normalize its inputs, the `DocumentCleaner` component
should be used to clean and normalize the documents before passing them to this evaluator.
Usage example:
```python
from haystack.components.evaluators import AnswerExactMatchEvaluator
evaluator = DocumentMeanReciprocalRank()
result = evaluator.run(
ground_truth_documents=[
[Document(content="France")],
[Document(content="9th century"), Document(content="9th")],
],
retrieved_documents=[
[Document(content="France")],
[Document(content="9th century"), Document(content="10th century"), Document(content="9th")],
],
)
print(result["individual_scores"])
# [1.0, 0.8333333333333333]
print(result["score"])
# 0.9166666666666666
```"""
(definition of DocumentMeanReciprocalRank.run:)
def run( self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]] ) -> Dict[str, Any]:
"""Run the DocumentMeanReciprocalRank on the given inputs.
`ground_truth_documents` and `retrieved_documents` must have the same length.
:param ground_truth_documents:
A list of expected documents for each question.
:param retrieved_documents:
A list of retrieved documents for each question.
:returns:
A dictionary with the following outputs:
- `score` - The average of calculated scores.
- `invididual_scores` - A list of numbers from 0.0 to 1.0 that represents how high the first retrieved document is ranked."""
[end of new definitions in haystack/components/evaluators/document_mrr.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Implement function to calculate Mean Reciprocal Rank metric
As specified in proposal #5794 we need to implement a function to calculate the Mean Reciprocal Rank metric.
Ideally the function should be part of the private interface and called only through the `calculate_metrics` function (see #6063). `_calculate_mrr()` could be a nice name.
For more detailed information check out the original proposal.
----------
--------------------
</issues> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 |
conan-io__conan-16010 | 16,010 | conan-io/conan | null | f0bddeb301cdeb96598b3291cc93b2d433e38ca6 | 2024-04-03T14:55:51Z | diff --git a/conan/tools/gnu/__init__.py b/conan/tools/gnu/__init__.py
index 9c413ed2145..cb51b0360b2 100644
--- a/conan/tools/gnu/__init__.py
+++ b/conan/tools/gnu/__init__.py
@@ -1,5 +1,6 @@
from conan.tools.gnu.autotools import Autotools
from conan.tools.gnu.autotoolstoolchain import AutotoolsToolchain
+from conan.tools.gnu.gnutoolchain import GnuToolchain
from conan.tools.gnu.autotoolsdeps import AutotoolsDeps
from conan.tools.gnu.pkgconfig import PkgConfig
from conan.tools.gnu.pkgconfigdeps import PkgConfigDeps
diff --git a/conan/tools/gnu/autotoolstoolchain.py b/conan/tools/gnu/autotoolstoolchain.py
index c81e70abebc..6f961e4429b 100644
--- a/conan/tools/gnu/autotoolstoolchain.py
+++ b/conan/tools/gnu/autotoolstoolchain.py
@@ -1,21 +1,22 @@
+from conan.errors import ConanException
from conan.internal import check_duplicated_generator
from conan.internal.internal_tools import raise_on_universal_arch
-from conan.tools.apple.apple import apple_min_version_flag, is_apple_os, to_apple_arch, \
- apple_sdk_path, resolve_apple_flags
+from conan.tools.apple.apple import is_apple_os, resolve_apple_flags
from conan.tools.build import cmd_args_to_string, save_toolchain_args
from conan.tools.build.cross_building import cross_building
-from conan.tools.build.flags import architecture_flag, build_type_flags, cppstd_flag, build_type_link_flags, libcxx_flags
+from conan.tools.build.flags import architecture_flag, build_type_flags, cppstd_flag, \
+ build_type_link_flags, \
+ libcxx_flags
from conan.tools.env import Environment
from conan.tools.gnu.get_gnu_triplet import _get_gnu_triplet
from conan.tools.microsoft import VCVars, msvc_runtime_flag, unix_path, check_min_vs, is_msvc
-from conan.errors import ConanException
from conans.model.pkg_type import PackageType
class AutotoolsToolchain:
+
def __init__(self, conanfile, namespace=None, prefix="/"):
"""
-
:param conanfile: The current recipe object. Always use ``self``.
:param namespace: This argument avoids collisions when you have multiple toolchain calls in
the same recipe. By setting this argument, the *conanbuild.conf* file used to pass
@@ -68,10 +69,10 @@ def __init__(self, conanfile, namespace=None, prefix="/"):
compiler = self._conanfile.settings.get_safe("compiler")
if not self._host:
- self._host = _get_gnu_triplet(os_host, arch_host, compiler=compiler)
+ self._host = _get_gnu_triplet(os_host, arch_host, compiler=compiler)["triplet"]
# Build triplet
if not self._build:
- self._build = _get_gnu_triplet(os_build, arch_build, compiler=compiler)
+ self._build = _get_gnu_triplet(os_build, arch_build, compiler=compiler)["triplet"]
sysroot = self._conanfile.conf.get("tools.build:sysroot")
sysroot = sysroot.replace("\\", "/") if sysroot is not None else None
diff --git a/conan/tools/gnu/get_gnu_triplet.py b/conan/tools/gnu/get_gnu_triplet.py
index 5e230218baf..dc7e3b68dc7 100644
--- a/conan/tools/gnu/get_gnu_triplet.py
+++ b/conan/tools/gnu/get_gnu_triplet.py
@@ -1,19 +1,7 @@
from conan.errors import ConanException
-def _get_gnu_triplet(os_, arch, compiler=None):
- """
- Returns string with <machine>-<vendor>-<op_system> triplet (<vendor> can be omitted in practice)
-
- :param os_: os to be used to create the triplet
- :param arch: arch to be used to create the triplet
- :param compiler: compiler used to create the triplet (only needed fo windows)
- """
-
- if os_ == "Windows" and compiler is None:
- raise ConanException("'compiler' parameter for 'get_gnu_triplet()' is not specified and "
- "needed for os=Windows")
-
+def _get_gnu_arch(os_, arch):
# Calculate the arch
machine = {"x86": "i686",
"x86_64": "x86_64",
@@ -67,7 +55,10 @@ def _get_gnu_triplet(os_, arch, compiler=None):
raise ConanException("Unknown '%s' machine, Conan doesn't know how to "
"translate it to the GNU triplet, please report at "
" https://github.com/conan-io/conan/issues" % arch)
+ return machine
+
+def _get_gnu_os(os_, arch, compiler=None):
# Calculate the OS
if compiler == "gcc":
windows_op = "w64-mingw32"
@@ -98,5 +89,24 @@ def _get_gnu_triplet(os_, arch, compiler=None):
if arch == "armv8_32" and os_ == "Linux":
op_system += "_ilp32" # https://wiki.linaro.org/Platform/arm64-ilp32
+ return op_system
+
- return "%s-%s" % (machine, op_system)
+def _get_gnu_triplet(os_, arch, compiler=None):
+ """
+ Returns string with <machine>-<vendor>-<op_system> triplet (<vendor> can be omitted in practice)
+
+ :param os_: os to be used to create the triplet
+ :param arch: arch to be used to create the triplet
+ :param compiler: compiler used to create the triplet (only needed fo windows)
+ """
+ if os_ == "Windows" and compiler is None:
+ raise ConanException("'compiler' parameter for 'get_gnu_triplet()' is not specified and "
+ "needed for os=Windows")
+ machine = _get_gnu_arch(os_, arch)
+ op_system = _get_gnu_os(os_, arch, compiler=compiler)
+ return {
+ 'machine': machine,
+ 'system': op_system,
+ 'triplet': f"{machine}-{op_system}"
+ }
diff --git a/conan/tools/gnu/gnutoolchain.py b/conan/tools/gnu/gnutoolchain.py
new file mode 100644
index 00000000000..95c32129e9d
--- /dev/null
+++ b/conan/tools/gnu/gnutoolchain.py
@@ -0,0 +1,265 @@
+from conan.internal import check_duplicated_generator
+from conan.internal.internal_tools import raise_on_universal_arch
+from conan.tools.apple.apple import is_apple_os, resolve_apple_flags
+from conan.tools.build import cmd_args_to_string, save_toolchain_args
+from conan.tools.build.cross_building import cross_building
+from conan.tools.build.flags import architecture_flag, build_type_flags, cppstd_flag, \
+ build_type_link_flags, \
+ libcxx_flags
+from conan.tools.env import Environment
+from conan.tools.gnu.get_gnu_triplet import _get_gnu_triplet
+from conan.tools.microsoft import VCVars, msvc_runtime_flag, unix_path, check_min_vs, is_msvc
+from conans.model.pkg_type import PackageType
+
+
+class GnuToolchain:
+ """
+ GnuToolchain generator.
+
+ Note: it's based on legacy AutotoolsToolchain but with a more modern and usable UX
+ """
+ def __init__(self, conanfile, namespace=None, prefix="/"):
+ """
+ :param conanfile: The current recipe object. Always use ``self``.
+ :param namespace: This argument avoids collisions when you have multiple toolchain calls in
+ the same recipe. By setting this argument, the *conanbuild.conf* file used to pass
+ information to the build helper will be named as *<namespace>_conanbuild.conf*. The default
+ value is ``None`` meaning that the name of the generated file is *conanbuild.conf*. This
+ namespace must be also set with the same value in the constructor of the Autotools build
+ helper so that it reads the information from the proper file.
+ :param prefix: Folder to use for ``--prefix`` argument ("/" by default).
+ """
+ raise_on_universal_arch(conanfile)
+ self._conanfile = conanfile
+ self._namespace = namespace
+ self._is_apple_system = is_apple_os(self._conanfile)
+ self._prefix = prefix
+ # Extra flags
+ self.extra_cxxflags = []
+ self.extra_cflags = []
+ self.extra_ldflags = []
+ self.extra_defines = []
+ # Extra environment definitions
+ self.extra_env = Environment()
+ # Defines
+ self.ndebug = None
+ build_type = self._conanfile.settings.get_safe("build_type")
+ if build_type in ['Release', 'RelWithDebInfo', 'MinSizeRel']:
+ self.ndebug = "NDEBUG"
+
+ # TODO: This is also covering compilers like Visual Studio, necessary to test it (&remove?)
+ self.build_type_flags = build_type_flags(self._conanfile.settings)
+ self.build_type_link_flags = build_type_link_flags(self._conanfile.settings)
+
+ self.cppstd = cppstd_flag(self._conanfile)
+ self.arch_flag = architecture_flag(self._conanfile.settings)
+ self.libcxx, self.gcc_cxx11_abi = libcxx_flags(self._conanfile)
+ self.fpic = self._conanfile.options.get_safe("fPIC")
+ self.msvc_runtime_flag = self._get_msvc_runtime_flag()
+ self.msvc_extra_flags = self._msvc_extra_flags()
+
+ # Host/Build triplets
+ self.triplets_info = {
+ "host": {"triplet": self._conanfile.conf.get("tools.gnu:host_triplet")},
+ "build": {"triplet": self._conanfile.conf.get("tools.gnu:build_triplet")}
+ }
+ is_cross_building = cross_building(self._conanfile)
+ if is_cross_building:
+ compiler = self._conanfile.settings.get_safe("compiler")
+ # Host triplet
+ if not self.triplets_info["host"]["triplet"]:
+ os_host = conanfile.settings.get_safe("os")
+ arch_host = conanfile.settings.get_safe("arch")
+ self.triplets_info["host"] = _get_gnu_triplet(os_host, arch_host, compiler=compiler)
+ # Build triplet
+ if not self.triplets_info["build"]["triplet"]:
+ os_build = conanfile.settings_build.get_safe('os')
+ arch_build = conanfile.settings_build.get_safe('arch')
+ self.triplets_info["build"] = _get_gnu_triplet(os_build, arch_build, compiler=compiler)
+
+ sysroot = self._conanfile.conf.get("tools.build:sysroot")
+ sysroot = sysroot.replace("\\", "/") if sysroot is not None else None
+ self.sysroot_flag = "--sysroot {}".format(sysroot) if sysroot else None
+ self.configure_args = {}
+ self.autoreconf_args = {"--force": None, "--install": None}
+ self.make_args = {}
+ # Initializing configure arguments: triplets, shared flags, dirs flags, etc.
+ self.configure_args.update(self._get_default_configure_shared_flags())
+ self.configure_args.update(self._get_default_configure_install_flags())
+ self.configure_args.update(self._get_default_triplets())
+ # Apple stuff
+ is_cross_building_osx = (is_cross_building
+ and conanfile.settings_build.get_safe('os') == "Macos"
+ and is_apple_os(conanfile))
+ min_flag, arch_flag, isysroot_flag = (
+ resolve_apple_flags(conanfile, is_cross_building=is_cross_building_osx)
+ )
+ # https://man.archlinux.org/man/clang.1.en#Target_Selection_Options
+ self.apple_arch_flag = arch_flag
+ # -isysroot makes all includes for your library relative to the build directory
+ self.apple_isysroot_flag = isysroot_flag
+ self.apple_min_version_flag = min_flag
+ # MSVC common stuff
+ self._initialize_default_extra_env()
+
+ def yes_no(self, option_name, default=None, negated=False):
+ """
+ Simple wrapper to return "yes" or "no" depending on whether option_name is
+ evaluated as True or False.
+
+ :param option_name: option name.
+ :param default: Default value to return.
+ :param negated: Negates the option value if True.
+ :return: "yes" or "no" depending on whether option_name is True or False.
+ """
+ option_value = bool(self._conanfile.options.get_safe(option_name, default=default))
+ option_value = not option_value if negated else option_value
+ return "yes" if option_value else "no"
+
+ def _initialize_default_extra_env(self):
+ """Initialize the default environment variables."""
+ extra_env_vars = dict()
+ # Normally, these are the most common default flags used by MSVC in Windows
+ if is_msvc(self._conanfile):
+ extra_env_vars = {"CC": "cl -nologo",
+ "CXX": "cl -nologo",
+ "LD": "link -nologo",
+ "AR": "lib",
+ "NM": "dumpbin -symbols",
+ "OBJDUMP": ":",
+ "RANLIB": ":",
+ "STRIP": ":"}
+ # Configuration map
+ compilers_mapping = {"c": "CC", "cpp": "CXX", "cuda": "NVCC", "fortran": "FC",
+ "rc": "RC", "ld": "LD", "ar": "AR", "nm": "NM", "ranlib": "RANLIB",
+ "objdump": "OBJDUMP", "strip": "STRIP"}
+ # Compiler definitions by conf
+ compilers_by_conf = self._conanfile.conf.get("tools.build:compiler_executables", default={},
+ check_type=dict)
+ if compilers_by_conf:
+ for comp, env_var in compilers_mapping.items():
+ if comp in compilers_by_conf:
+ compiler = compilers_by_conf[comp]
+ # https://github.com/conan-io/conan/issues/13780
+ compiler = unix_path(self._conanfile, compiler)
+ extra_env_vars[env_var] = compiler # User/tools ones have precedence
+ # Update the extra_env attribute with all the compiler values
+ for env_var, env_value in extra_env_vars.items():
+ self.extra_env.define(env_var, env_value)
+
+ def _get_msvc_runtime_flag(self):
+ flag = msvc_runtime_flag(self._conanfile)
+ return f"-{flag}" if flag else ""
+
+ def _msvc_extra_flags(self):
+ if is_msvc(self._conanfile) and check_min_vs(self._conanfile, "180", raise_invalid=False):
+ return ["-FS"]
+ return []
+
+ def _add_msvc_flags(self, flags):
+ # This is to avoid potential duplicate with users recipes -FS (alreday some in ConanCenter)
+ return [f for f in self.msvc_extra_flags if f not in flags]
+
+ @staticmethod
+ def _filter_list_empty_fields(v):
+ return list(filter(bool, v))
+
+ @staticmethod
+ def _dict_to_list(flags):
+ return [f"{k}={v}" if v else k for k, v in flags.items()]
+
+ @property
+ def cxxflags(self):
+ fpic = "-fPIC" if self.fpic else None
+ ret = [self.libcxx, self.cppstd, self.arch_flag, fpic, self.msvc_runtime_flag,
+ self.sysroot_flag]
+ apple_flags = [self.apple_isysroot_flag, self.apple_arch_flag, self.apple_min_version_flag]
+ conf_flags = self._conanfile.conf.get("tools.build:cxxflags", default=[], check_type=list)
+ vs_flag = self._add_msvc_flags(self.extra_cxxflags)
+ ret = ret + self.build_type_flags + apple_flags + self.extra_cxxflags + vs_flag + conf_flags
+ return self._filter_list_empty_fields(ret)
+
+ @property
+ def cflags(self):
+ fpic = "-fPIC" if self.fpic else None
+ ret = [self.arch_flag, fpic, self.msvc_runtime_flag, self.sysroot_flag]
+ apple_flags = [self.apple_isysroot_flag, self.apple_arch_flag, self.apple_min_version_flag]
+ conf_flags = self._conanfile.conf.get("tools.build:cflags", default=[], check_type=list)
+ vs_flag = self._add_msvc_flags(self.extra_cflags)
+ ret = ret + self.build_type_flags + apple_flags + self.extra_cflags + vs_flag + conf_flags
+ return self._filter_list_empty_fields(ret)
+
+ @property
+ def ldflags(self):
+ ret = [self.arch_flag, self.sysroot_flag]
+ apple_flags = [self.apple_isysroot_flag, self.apple_arch_flag, self.apple_min_version_flag]
+ conf_flags = self._conanfile.conf.get("tools.build:sharedlinkflags", default=[],
+ check_type=list)
+ conf_flags.extend(self._conanfile.conf.get("tools.build:exelinkflags", default=[],
+ check_type=list))
+ linker_scripts = self._conanfile.conf.get("tools.build:linker_scripts", default=[],
+ check_type=list)
+ conf_flags.extend(["-T'" + linker_script + "'" for linker_script in linker_scripts])
+ ret = ret + self.build_type_link_flags + apple_flags + self.extra_ldflags + conf_flags
+ return self._filter_list_empty_fields(ret)
+
+ @property
+ def defines(self):
+ conf_flags = self._conanfile.conf.get("tools.build:defines", default=[], check_type=list)
+ ret = [self.ndebug, self.gcc_cxx11_abi] + self.extra_defines + conf_flags
+ return self._filter_list_empty_fields(ret)
+
+ def _get_default_configure_shared_flags(self):
+ args = {}
+ # Just add these flags if there's a shared option defined (never add to exe's)
+ if self._conanfile.package_type is PackageType.SHARED:
+ args = {"--enable-shared": None, "--disable-static": None}
+ elif self._conanfile.package_type is PackageType.STATIC:
+ args = {"--disable-shared": None, "--enable-static": None}
+ return args
+
+ def _get_default_configure_install_flags(self):
+ configure_install_flags = {"--prefix": self._prefix}
+ # If someone want arguments but not the defaults can pass them in args manually
+ for flag_name, cppinfo_name in [("bindir", "bindirs"), ("sbindir", "bindirs"),
+ ("libdir", "libdirs"), ("includedir", "includedirs"),
+ ("oldincludedir", "includedirs"),
+ ("datarootdir", "resdirs")]:
+ elements = getattr(self._conanfile.cpp.package, cppinfo_name)
+ cppinfo_value = f"${{prefix}}/{elements[0]}" if elements else None
+ if cppinfo_value:
+ configure_install_flags[f"--{flag_name}"] = cppinfo_value
+ return configure_install_flags
+
+ def _get_default_triplets(self):
+ triplets = {}
+ for context, info in self.triplets_info.items():
+ if info.get("triplet") is not None:
+ triplets[f"--{context}"] = info["triplet"]
+ return triplets
+
+ @property
+ def _environment(self):
+ env = Environment()
+ # Flags and defines
+ env.append("CPPFLAGS", ["-D{}".format(d) for d in self.defines])
+ env.append("CXXFLAGS", self.cxxflags)
+ env.append("CFLAGS", self.cflags)
+ env.append("LDFLAGS", self.ldflags)
+ env.prepend_path("PKG_CONFIG_PATH", self._conanfile.generators_folder)
+ # Let's compose with user extra env variables defined (user ones have precedence)
+ return self.extra_env.compose_env(env)
+
+ def generate(self):
+ check_duplicated_generator(self, self._conanfile)
+ # Composing both environments. User extra_env definitions has precedence
+ env_vars = self._environment.vars(self._conanfile)
+ env_vars.save_script("conanautotoolstoolchain")
+ # Converts all the arguments into strings
+ args = {
+ "configure_args": cmd_args_to_string(self._dict_to_list(self.configure_args)),
+ "make_args": cmd_args_to_string(self._dict_to_list(self.make_args)),
+ "autoreconf_args": cmd_args_to_string(self._dict_to_list(self.autoreconf_args))
+ }
+ save_toolchain_args(args, namespace=self._namespace)
+ VCVars(self._conanfile).generate()
diff --git a/conans/client/generators/__init__.py b/conans/client/generators/__init__.py
index 749e9ec2c71..eeed8ddbb65 100644
--- a/conans/client/generators/__init__.py
+++ b/conans/client/generators/__init__.py
@@ -8,19 +8,26 @@
from conans.errors import ConanException, conanfile_exception_formatter
from conans.util.files import save, mkdir, chdir
-_generators = {"CMakeToolchain": "conan.tools.cmake", "CMakeDeps": "conan.tools.cmake",
+_generators = {"CMakeToolchain": "conan.tools.cmake",
+ "CMakeDeps": "conan.tools.cmake",
"MesonToolchain": "conan.tools.meson",
- "MSBuildDeps": "conan.tools.microsoft", "MSBuildToolchain": "conan.tools.microsoft",
- "NMakeToolchain": "conan.tools.microsoft", "NMakeDeps": "conan.tools.microsoft",
+ "MSBuildDeps": "conan.tools.microsoft",
+ "MSBuildToolchain": "conan.tools.microsoft",
+ "NMakeToolchain": "conan.tools.microsoft",
+ "NMakeDeps": "conan.tools.microsoft",
"VCVars": "conan.tools.microsoft",
"QbsProfile": "conan.tools.qbs.qbsprofile",
"VirtualRunEnv": "conan.tools.env.virtualrunenv",
"VirtualBuildEnv": "conan.tools.env.virtualbuildenv",
- "AutotoolsDeps": "conan.tools.gnu", "AutotoolsToolchain": "conan.tools.gnu",
+ "AutotoolsDeps": "conan.tools.gnu",
+ "AutotoolsToolchain": "conan.tools.gnu",
+ "GnuToolchain": "conan.tools.gnu",
"PkgConfigDeps": "conan.tools.gnu",
- "BazelDeps": "conan.tools.google", "BazelToolchain": "conan.tools.google",
+ "BazelDeps": "conan.tools.google",
+ "BazelToolchain": "conan.tools.google",
"IntelCC": "conan.tools.intel",
- "XcodeDeps": "conan.tools.apple", "XcodeToolchain": "conan.tools.apple",
+ "XcodeDeps": "conan.tools.apple",
+ "XcodeToolchain": "conan.tools.apple",
"PremakeDeps": "conan.tools.premake",
"MakeDeps": "conan.tools.gnu",
"SConsDeps": "conan.tools.scons"
diff --git a/conans/model/conf.py b/conans/model/conf.py
index f74ee2107c4..5098608e9b4 100644
--- a/conans/model/conf.py
+++ b/conans/model/conf.py
@@ -113,7 +113,7 @@
"tools.apple:enable_visibility": "(boolean) Enable/Disable Visibility Apple Clang flags",
"tools.env.virtualenv:powershell": "If it is set to True it will generate powershell launchers if os=Windows",
# Compilers/Flags configurations
- "tools.build:compiler_executables": "Defines a Python dict-like with the compilers path to be used. Allowed keys {'c', 'cpp', 'cuda', 'objc', 'objcxx', 'rc', 'fortran', 'asm', 'hip', 'ispc'}",
+ "tools.build:compiler_executables": "Defines a Python dict-like with the compilers path to be used. Allowed keys {'c', 'cpp', 'cuda', 'objc', 'objcxx', 'rc', 'fortran', 'asm', 'hip', 'ispc', 'ld', 'ar'}",
"tools.build:cxxflags": "List of extra CXX flags used by different toolchains like CMakeToolchain, AutotoolsToolchain and MesonToolchain",
"tools.build:cflags": "List of extra C flags used by different toolchains like CMakeToolchain, AutotoolsToolchain and MesonToolchain",
"tools.build:defines": "List of extra definition flags used by different toolchains like CMakeToolchain and AutotoolsToolchain",
| diff --git a/conans/test/functional/toolchains/gnu/test_gnutoolchain_apple.py b/conans/test/functional/toolchains/gnu/test_gnutoolchain_apple.py
new file mode 100644
index 00000000000..ce146db9c18
--- /dev/null
+++ b/conans/test/functional/toolchains/gnu/test_gnutoolchain_apple.py
@@ -0,0 +1,81 @@
+import os
+import platform
+import textwrap
+
+import pytest
+
+from conan.tools.apple.apple import _to_apple_arch
+from conans.test.assets.autotools import gen_makefile
+from conans.test.assets.sources import gen_function_h, gen_function_cpp
+from conans.test.utils.tools import TestClient
+
+
+@pytest.mark.skipif(platform.system() != "Darwin", reason="Only OSX")
+@pytest.mark.parametrize("config", [("x86_64", "Macos", "10.14", None),
+ ("armv8", "iOS", "10.0", "iphoneos"),
+ ("armv7", "iOS", "10.0", "iphoneos"),
+ ("x86", "iOS", "10.0", "iphonesimulator"),
+ ("x86_64", "iOS", "10.0", "iphonesimulator"),
+ ("armv8", "Macos", "10.14", None) # M1
+ ])
+def test_makefile_arch(config):
+ makefile = gen_makefile(apps=["app"], libs=["hello"])
+ conanfile_py = textwrap.dedent("""
+ from conan import ConanFile, tools
+ from conan.tools.gnu import Autotools
+
+ class App(ConanFile):
+ settings = "os", "arch", "compiler", "build_type"
+ options = {"shared": [True, False], "fPIC": [True, False]}
+ default_options = {"shared": False, "fPIC": True}
+ generators = "GnuToolchain"
+
+ def config_options(self):
+ if self.settings.os == "Windows":
+ self.options.rm_safe("fPIC")
+
+ def configure(self):
+ if self.options.shared:
+ self.options.rm_safe("fPIC")
+
+ def build(self):
+ env_build = Autotools(self)
+ env_build.make()
+ """)
+ arch, os_, os_version, os_sdk = config
+ profile = textwrap.dedent("""
+ include(default)
+ [settings]
+ os = {os}
+ {os_sdk}
+ os.version = {os_version}
+ arch = {arch}
+ """).format(os=os_, arch=arch,
+ os_version=os_version, os_sdk="os.sdk = " + os_sdk if os_sdk else "")
+
+ t = TestClient()
+ hello_h = gen_function_h(name="hello")
+ hello_cpp = gen_function_cpp(name="hello")
+ main_cpp = gen_function_cpp(name="main", includes=["hello"], calls=["hello"])
+
+ t.save({"Makefile": makefile,
+ "hello.h": hello_h,
+ "hello.cpp": hello_cpp,
+ "app.cpp": main_cpp,
+ "conanfile.py": conanfile_py,
+ "profile": profile})
+
+ t.run("install . --profile:host=profile --profile:build=default")
+ t.run("build . --profile:host=profile --profile:build=default")
+
+ libhello = os.path.join(t.current_folder, "libhello.a")
+ app = os.path.join(t.current_folder, "app")
+ assert os.path.isfile(libhello)
+ assert os.path.isfile(app)
+
+ expected_arch = _to_apple_arch(arch)
+ t.run_command('lipo -info "%s"' % libhello)
+ assert "architecture: %s" % expected_arch in t.out
+
+ t.run_command('lipo -info "%s"' % app)
+ assert "architecture: %s" % expected_arch in t.out
diff --git a/conans/test/integration/toolchains/gnu/test_gnutoolchain.py b/conans/test/integration/toolchains/gnu/test_gnutoolchain.py
new file mode 100644
index 00000000000..0541e238128
--- /dev/null
+++ b/conans/test/integration/toolchains/gnu/test_gnutoolchain.py
@@ -0,0 +1,437 @@
+import os
+import platform
+import textwrap
+import re
+
+import pytest
+
+from conans.test.assets.genconanfile import GenConanfile
+from conans.test.utils.tools import TestClient
+from conans.util.files import save, load
+
+
+@pytest.mark.parametrize("os_", ["Macos", "Linux", "Windows"])
+def test_extra_flags_via_conf(os_):
+ os_sdk = "tools.apple:sdk_path=/my/sdk/path" if os_ == "Macos" else ""
+ profile = textwrap.dedent(f"""
+ [settings]
+ os={os_}
+ compiler=gcc
+ compiler.version=6
+ compiler.libcxx=libstdc++11
+ arch=armv8
+ build_type=Release
+
+ [conf]
+ tools.build:cxxflags=["--flag1", "--flag2"]
+ tools.build:cflags+=["--flag3", "--flag4"]
+ tools.build:sharedlinkflags+=["--flag5"]
+ tools.build:exelinkflags+=["--flag6"]
+ tools.build:defines+=["DEF1", "DEF2"]
+ {os_sdk}
+ """)
+ client = TestClient()
+ conanfile = GenConanfile().with_settings("os", "arch", "compiler", "build_type") \
+ .with_generator("GnuToolchain")
+ client.save({"conanfile.py": conanfile,
+ "profile": profile})
+ client.run("install . --profile:build=profile --profile:host=profile")
+ toolchain = client.load(
+ "conanautotoolstoolchain{}".format('.bat' if os_ == "Windows" else '.sh'))
+ if os_ == "Windows":
+ assert 'set "CPPFLAGS=%CPPFLAGS% -DNDEBUG -DDEF1 -DDEF2"' in toolchain
+ assert 'set "CXXFLAGS=%CXXFLAGS% -O3 --flag1 --flag2"' in toolchain
+ assert 'set "CFLAGS=%CFLAGS% -O3 --flag3 --flag4"' in toolchain
+ assert 'set "LDFLAGS=%LDFLAGS% --flag5 --flag6"' in toolchain
+ assert f'set "PKG_CONFIG_PATH={client.current_folder};%PKG_CONFIG_PATH%"' in toolchain
+ elif os_ == "Linux":
+ assert 'export CPPFLAGS="$CPPFLAGS -DNDEBUG -DDEF1 -DDEF2"' in toolchain
+ assert 'export CXXFLAGS="$CXXFLAGS -O3 --flag1 --flag2"' in toolchain
+ assert 'export CFLAGS="$CFLAGS -O3 --flag3 --flag4"' in toolchain
+ assert 'export LDFLAGS="$LDFLAGS --flag5 --flag6"' in toolchain
+ assert f'export PKG_CONFIG_PATH="{client.current_folder}:$PKG_CONFIG_PATH"' in toolchain
+ else: # macOS
+ assert 'export CPPFLAGS="$CPPFLAGS -DNDEBUG -DDEF1 -DDEF2"' in toolchain
+ assert 'export CXXFLAGS="$CXXFLAGS -O3 --flag1 --flag2"' in toolchain
+ assert 'export CFLAGS="$CFLAGS -O3 --flag3 --flag4"' in toolchain
+ assert 'export LDFLAGS="$LDFLAGS --flag5 --flag6"' in toolchain
+ assert f'export PKG_CONFIG_PATH="{client.current_folder}:$PKG_CONFIG_PATH"' in toolchain
+
+
+def test_extra_flags_order():
+ client = TestClient()
+ conanfile = textwrap.dedent("""
+ from conan import ConanFile
+ from conan.tools.gnu import GnuToolchain
+
+ class Conan(ConanFile):
+ name = "pkg"
+ version = "0.1"
+ settings = "os", "arch", "build_type"
+ def generate(self):
+ at = GnuToolchain(self)
+ at.extra_cxxflags = ["extra_cxxflags"]
+ at.extra_cflags = ["extra_cflags"]
+ at.extra_ldflags = ["extra_ldflags"]
+ at.extra_defines = ["extra_defines"]
+ at.generate()
+ """)
+ profile = textwrap.dedent("""
+ include(default)
+ [conf]
+ tools.build:cxxflags+=['cxxflags']
+ tools.build:cflags+=['cflags']
+ tools.build:sharedlinkflags+=['sharedlinkflags']
+ tools.build:exelinkflags+=['exelinkflags']
+ tools.build:defines+=['defines']
+ """)
+ client.save({"conanfile.py": conanfile, "profile": profile})
+ client.run('install . -pr=./profile')
+ toolchain = client.load(
+ "conanautotoolstoolchain{}".format('.bat' if platform.system() == "Windows" else '.sh'))
+
+ assert '-Dextra_defines -Ddefines' in toolchain
+ assert 'extra_cxxflags cxxflags' in toolchain
+ assert 'extra_cflags cflags' in toolchain
+ assert 'extra_ldflags sharedlinkflags exelinkflags' in toolchain
+
+
+def test_autotools_custom_environment():
+ client = TestClient()
+ conanfile = textwrap.dedent("""
+ from conan import ConanFile
+ from conan.tools.gnu import GnuToolchain
+
+ class Conan(ConanFile):
+ settings = "os"
+ def generate(self):
+ at = GnuToolchain(self)
+ env = at.extra_env
+ env.define("FOO", "BAR")
+ at.generate()
+ """)
+
+ client.save({"conanfile.py": conanfile})
+ client.run("install . -s:b os=Linux -s:h os=Linux")
+ content = load(os.path.join(client.current_folder, "conanautotoolstoolchain.sh"))
+ assert 'export FOO="BAR"' in content
+
+
+@pytest.mark.parametrize("os_", ["Linux", "Windows"])
+def test_linker_scripts_via_conf(os_):
+ profile = textwrap.dedent("""
+ [settings]
+ os=%s
+ compiler=gcc
+ compiler.version=6
+ compiler.libcxx=libstdc++11
+ arch=armv8
+ build_type=Release
+
+ [conf]
+
+ tools.build:sharedlinkflags+=["--flag5"]
+ tools.build:exelinkflags+=["--flag6"]
+ tools.build:linker_scripts+=["/linker/scripts/flash.ld", "/linker/scripts/extra_data.ld"]
+ """ % os_)
+ client = TestClient()
+ conanfile = GenConanfile().with_settings("os", "arch", "compiler", "build_type") \
+ .with_generator("GnuToolchain")
+ client.save({"conanfile.py": conanfile,
+ "profile": profile})
+ client.run("install . --profile:build=profile --profile:host=profile")
+ toolchain = client.load(
+ "conanautotoolstoolchain{}".format('.bat' if os_ == "Windows" else '.sh'))
+ if os_ == "Windows":
+ assert 'set "LDFLAGS=%LDFLAGS% --flag5 --flag6 -T\'/linker/scripts/flash.ld\' -T\'/linker/scripts/extra_data.ld\'"' in toolchain
+ else:
+ assert 'export LDFLAGS="$LDFLAGS --flag5 --flag6 -T\'/linker/scripts/flash.ld\' -T\'/linker/scripts/extra_data.ld\'"' in toolchain
+
+
+def test_not_none_values():
+ conanfile = textwrap.dedent("""
+ from conan import ConanFile
+ from conan.tools.gnu import GnuToolchain
+
+ class Foo(ConanFile):
+ name = "foo"
+ version = "1.0"
+
+ def generate(self):
+ tc = GnuToolchain(self)
+ assert None not in tc.defines
+ assert None not in tc.cxxflags
+ assert None not in tc.cflags
+ assert None not in tc.ldflags
+
+ """)
+
+ client = TestClient()
+ client.save({"conanfile.py": conanfile})
+ client.run("install .")
+
+
+def test_set_prefix():
+ conanfile = textwrap.dedent("""
+ from conan import ConanFile
+ from conan.tools.gnu import GnuToolchain
+ from conan.tools.layout import basic_layout
+
+
+ class Foo(ConanFile):
+ name = "foo"
+ version = "1.0"
+ def layout(self):
+ basic_layout(self)
+ def generate(self):
+ at_toolchain = GnuToolchain(self, prefix="/somefolder")
+ at_toolchain.generate()
+ """)
+
+ client = TestClient()
+ client.save({"conanfile.py": conanfile})
+ client.run("install .")
+ conanbuild = client.load(
+ os.path.join(client.current_folder, "build", "conan", "conanbuild.conf"))
+ assert "--prefix=/somefolder" in conanbuild
+ assert conanbuild.count("--prefix") == 1
+
+
+def test_unknown_compiler():
+ client = TestClient()
+ settings = load(client.cache.settings_path)
+ settings = settings.replace("gcc:", "xlc:\n gcc:", 1)
+ save(client.cache.settings_path, settings)
+ client.save({"conanfile.py": GenConanfile().with_settings("compiler", "build_type")
+ .with_generator("GnuToolchain")
+ })
+ # this used to crash, because of build_type_flags in GnuToolchain returning empty string
+ client.run("install . -s compiler=xlc")
+ assert "conanfile.py: Generator 'GnuToolchain' calling 'generate()'" in client.out
+
+
+def test_toolchain_and_compilers_build_context():
+ """
+ Tests how GnuToolchain manages the build context profile if the build profile is
+ specifying another compiler path (using conf)
+
+ Issue related: https://github.com/conan-io/conan/issues/15878
+ """
+ host = textwrap.dedent("""
+ [settings]
+ arch=armv8
+ build_type=Release
+ compiler=gcc
+ compiler.cppstd=gnu17
+ compiler.libcxx=libstdc++11
+ compiler.version=11
+ os=Linux
+
+ [conf]
+ tools.build:compiler_executables={"c": "gcc", "cpp": "g++", "rc": "windres"}
+ """)
+ build = textwrap.dedent("""
+ [settings]
+ os=Linux
+ arch=x86_64
+ compiler=clang
+ compiler.version=12
+ compiler.libcxx=libc++
+ compiler.cppstd=11
+
+ [conf]
+ tools.build:compiler_executables={"c": "clang", "cpp": "clang++"}
+ """)
+ tool = textwrap.dedent("""
+ import os
+ from conan import ConanFile
+ from conan.tools.files import load
+
+ class toolRecipe(ConanFile):
+ name = "tool"
+ version = "1.0"
+ # Binary configuration
+ settings = "os", "compiler", "build_type", "arch"
+ generators = "GnuToolchain"
+
+ def build(self):
+ toolchain = os.path.join(self.generators_folder, "conanautotoolstoolchain.sh")
+ content = load(self, toolchain)
+ assert 'export CC="clang"' in content
+ assert 'export CXX="clang++"' in content
+ """)
+ consumer = textwrap.dedent("""
+ import os
+ from conan import ConanFile
+ from conan.tools.files import load
+
+ class consumerRecipe(ConanFile):
+ name = "consumer"
+ version = "1.0"
+ # Binary configuration
+ settings = "os", "compiler", "build_type", "arch"
+ generators = "GnuToolchain"
+ tool_requires = "tool/1.0"
+
+ def build(self):
+ toolchain = os.path.join(self.generators_folder, "conanautotoolstoolchain.sh")
+ content = load(self, toolchain)
+ assert 'export CC="gcc"' in content
+ assert 'export CXX="g++"' in content
+ assert 'export RC="windres"' in content
+ """)
+ client = TestClient()
+ client.save({
+ "host": host,
+ "build": build,
+ "tool/conanfile.py": tool,
+ "consumer/conanfile.py": consumer
+ })
+ client.run("export tool")
+ client.run("create consumer -pr:h host -pr:b build --build=missing")
+
+
+def test_autotools_crossbuild_ux():
+ client = TestClient()
+ profile_build = textwrap.dedent("""
+ [settings]
+ os = Macos
+ os.version=10.11
+ arch = armv7
+ compiler = apple-clang
+ compiler.version = 12.0
+ compiler.libcxx = libc++
+ """)
+ profile_host = textwrap.dedent("""
+ [settings]
+ arch=x86_64
+ build_type=Release
+ compiler=apple-clang
+ compiler.cppstd=gnu17
+ compiler.libcxx=libc++
+ compiler.version=15
+ os=Macos
+ [conf]
+ tools.apple:sdk_path=/my/sdk/path
+ """)
+ conanfile = textwrap.dedent("""
+ from conan import ConanFile
+ from conan.tools.gnu import GnuToolchain
+ from conan.tools.build import cross_building
+ class Pkg(ConanFile):
+ settings = "os", "arch", "compiler", "build_type"
+ def generate(self):
+ tc = GnuToolchain(self)
+ if cross_building(self):
+ host_arch = tc.triplets_info["host"]["machine"]
+ build_arch = tc.triplets_info["build"]["machine"]
+ if host_arch and build_arch:
+ tc.configure_args["--host"] = f"{host_arch}-my-triplet-host"
+ tc.configure_args["--build"] = f"{build_arch}-my-triplet-build"
+ tc.generate()
+ """)
+
+ client.save({"conanfile.py": conanfile,
+ "profile_build": profile_build,
+ "profile_host": profile_host})
+ client.run("install . --profile:build=profile_build --profile:host=profile_host")
+ conanbuild = client.load("conanbuild.conf")
+ host_flags = re.findall(r"--host=[\w-]*\b", conanbuild)
+ build_flags = re.findall(r"--build=[\w-]*\b", conanbuild)
+ assert len(host_flags) == 1
+ assert len(build_flags) == 1
+ assert host_flags[0] == '--host=x86_64-my-triplet-host'
+ assert build_flags[0] == '--build=arm-my-triplet-build'
+
+
+def test_msvc_profile_defaults():
+ """
+ Tests how GnuToolchain manages MSVC profile and its default env variables.
+ """
+ profile = textwrap.dedent("""\
+ [settings]
+ os=Windows
+ arch=x86_64
+ compiler=msvc
+ compiler.version=191
+ compiler.runtime=dynamic
+ build_type=Release
+
+ [conf]
+ tools.build:compiler_executables={"c": "clang", "cpp": "clang++"}
+ # Fake installation path
+ tools.microsoft.msbuild:installation_path={{os.getcwd()}}
+ """)
+ # Consumer with default values
+ consumer = textwrap.dedent("""\
+ import os
+ from conan import ConanFile
+ from conan.tools.files import load
+
+ class consumerRecipe(ConanFile):
+ name = "consumer"
+ version = "1.0"
+ # Binary configuration
+ settings = "os", "compiler", "build_type", "arch"
+ generators = "GnuToolchain"
+
+ def build(self):
+ toolchain = os.path.join(self.generators_folder, "conanautotoolstoolchain.bat")
+ content = load(self, toolchain)
+ # Default values and conf ones
+ assert r'set "CC=clang"' in content # conf value has precedence
+ assert r'set "CXX=clang++"' in content # conf value has precedence
+ assert 'set "LD=link -nologo"' in content
+ assert r'set "AR=lib"' in content
+ assert 'set "NM=dumpbin -symbols"' in content
+ assert 'set "OBJDUMP=:"' in content
+ assert 'set "RANLIB=:"' in content
+ assert 'set "STRIP=:"' in content
+ """)
+ client = TestClient()
+ client.save({
+ "profile": profile,
+ "consumer/conanfile.py": consumer
+ })
+ client.run("create consumer -pr:a profile --build=missing")
+ # Consumer changing default values
+ consumer = textwrap.dedent("""\
+ import os
+ from conan import ConanFile
+ from conan.tools.files import load
+ from conan.tools.gnu import GnuToolchain
+
+ class consumerRecipe(ConanFile):
+ name = "consumer"
+ version = "1.0"
+ # Binary configuration
+ settings = "os", "compiler", "build_type", "arch"
+
+ def generate(self):
+ tc = GnuToolchain(self)
+ # Prepending compiler wrappers
+ tc.extra_env.prepend("CC", "compile")
+ tc.extra_env.prepend("CXX", "compile")
+ tc.extra_env.prepend("AR", "ar-lib")
+ tc.extra_env.append("LD", "-fixed")
+ tc.extra_env.define("OBJDUMP", "other-value")
+ tc.extra_env.unset("RANLIB")
+ tc.generate()
+
+ def build(self):
+ toolchain = os.path.join(self.generators_folder, "conanautotoolstoolchain.bat")
+ content = load(self, toolchain)
+ # Default values
+ assert r'set "CC=compile clang"' in content
+ assert r'set "CXX=compile clang++"' in content
+ assert 'set "LD=link -nologo -fixed"' in content # appended new value
+ assert r'set "AR=ar-lib lib"' in content
+ assert 'set "NM=dumpbin -symbols"' in content
+ assert 'set "OBJDUMP=other-value"' in content # redefined
+ assert 'set "RANLIB=:"' not in content # removed
+ assert 'set "STRIP=:"' in content
+ """)
+ client.save({
+ "consumer/conanfile.py": consumer
+ })
+ client.run("create consumer -pr:a profile --build=missing")
diff --git a/conans/test/unittests/tools/gnu/test_gnutoolchain.py b/conans/test/unittests/tools/gnu/test_gnutoolchain.py
new file mode 100644
index 00000000000..faaa43da40e
--- /dev/null
+++ b/conans/test/unittests/tools/gnu/test_gnutoolchain.py
@@ -0,0 +1,196 @@
+from unittest.mock import patch
+
+import pytest
+
+from conan.tools.build import cmd_args_to_string
+from conan.tools.gnu import GnuToolchain
+from conans.errors import ConanException
+from conans.model.conf import Conf
+from conans.test.utils.mocks import ConanFileMock, MockSettings
+
+
+@pytest.fixture()
+def cross_building_conanfile():
+ settings_build = MockSettings({"os": "Linux",
+ "arch": "x86_64",
+ "compiler": "gcc",
+ "compiler.version": "11",
+ "compiler.libcxx": "libstdc++",
+ "build_type": "Release"})
+ settings_target = MockSettings({"os": "Android", "arch": "armv8"})
+ settings = MockSettings({"os": "Emscripten", "arch": "wasm"})
+ conanfile = ConanFileMock()
+ conanfile.settings = settings
+ conanfile.settings_build = settings_build
+ conanfile.settings_target = settings_target
+ return conanfile
+
+
+def test_get_gnu_triplet_for_cross_building():
+ """
+ Testing AutotoolsToolchainX and _get_gnu_triplet() function in case of
+ having os=Windows and cross compiling
+ """
+ # Issue: https://github.com/conan-io/conan/issues/10139
+ settings = MockSettings({"build_type": "Release",
+ "compiler": "gcc",
+ "compiler.version": "10.2",
+ "os": "Windows",
+ "arch": "x86_64"})
+ conanfile = ConanFileMock()
+ conanfile.settings = settings
+ conanfile.settings_build = MockSettings({"os": "Solaris", "arch": "x86"})
+ at = GnuToolchain(conanfile)
+ assert at.configure_args["--host"] == "x86_64-w64-mingw32"
+ assert at.configure_args["--build"] == "i686-solaris"
+
+
+def test_get_toolchain_cppstd():
+ settings = MockSettings({"build_type": "Release",
+ "compiler": "gcc",
+ "compiler.version": "10",
+ "compiler.cppstd": "20",
+ "os": "Linux",
+ "arch": "x86_64"})
+ conanfile = ConanFileMock()
+ conanfile.settings = settings
+ conanfile.settings_build = settings
+ at = GnuToolchain(conanfile)
+ assert at.cppstd == "-std=c++2a"
+ settings.values["compiler.version"] = "12"
+ at = GnuToolchain(conanfile)
+ assert at.cppstd == "-std=c++20"
+
+
+@pytest.mark.parametrize("runtime, runtime_type, expected",
+ [("static", "Debug", "MTd"),
+ ("static", "Release", "MT"),
+ ("dynamic", "Debug", "MDd"),
+ ("dynamic", "Release", "MD")])
+def test_msvc_runtime(runtime, runtime_type, expected):
+ """
+ Testing AutotoolsToolchainX with the msvc compiler adjust the runtime
+ """
+ # Issue: https://github.com/conan-io/conan/issues/10139
+ settings = MockSettings({"build_type": "Release",
+ "compiler": "msvc",
+ "compiler.runtime": runtime,
+ "compiler.runtime_type": runtime_type,
+ "os": "Windows",
+ "arch": "x86_64"})
+ conanfile = ConanFileMock()
+ conanfile.settings = settings
+ conanfile.settings_build = settings
+ at = GnuToolchain(conanfile)
+ expected_flag = "-{}".format(expected)
+ assert at.msvc_runtime_flag == expected_flag
+ env = at._environment.vars(conanfile)
+ assert expected_flag in env["CFLAGS"]
+ assert expected_flag in env["CXXFLAGS"]
+
+
+@pytest.mark.parametrize("runtime", ["MTd", "MT", "MDd", "MD"])
+def test_visual_runtime(runtime):
+ """
+ Testing AutotoolsToolchainX with the msvc compiler adjust the runtime
+ """
+ # Issue: https://github.com/conan-io/conan/issues/10139
+ settings = MockSettings({"build_type": "Release" if "d" not in runtime else "Debug",
+ "compiler": "msvc",
+ "compiler.runtime": "static" if "MT" in runtime else "dynamic",
+ "compiler.runtime_type": "Release" if "d" not in runtime else "Debug",
+ "os": "Windows",
+ "arch": "x86_64"})
+ conanfile = ConanFileMock()
+ conanfile.settings = settings
+ conanfile.settings_build = settings
+ at = GnuToolchain(conanfile)
+ expected_flag = "-{}".format(runtime)
+ assert at.msvc_runtime_flag == expected_flag
+ env = at._environment.vars(conanfile)
+ assert expected_flag in env["CFLAGS"]
+ assert expected_flag in env["CXXFLAGS"]
+
+
+def test_get_gnu_triplet_for_cross_building_raise_error():
+ """
+ Testing AutotoolsToolchainX and _get_gnu_triplet() function raises an error in case of
+ having os=Windows, cross compiling and not defined any compiler
+ """
+ # Issue: https://github.com/conan-io/conan/issues/10139
+ settings = MockSettings({"build_type": "Release",
+ "os": "Windows",
+ "arch": "x86_64"})
+ conanfile = ConanFileMock()
+ conanfile.settings = settings
+ conanfile.settings_build = MockSettings({"os": "Solaris", "arch": "x86"})
+ with pytest.raises(ConanException) as conan_error:
+ GnuToolchain(conanfile)
+ msg = "'compiler' parameter for 'get_gnu_triplet()' is not specified and " \
+ "needed for os=Windows"
+ assert msg == str(conan_error.value)
+
+
+def test_compilers_mapping():
+ autotools_mapping = {"c": "CC", "cpp": "CXX", "cuda": "NVCC", "fortran": "FC"}
+ compilers = {"c": "path_to_c", "cpp": "path_to_cpp", "cuda": "path_to_cuda",
+ "fortran": "path_to_fortran"}
+ settings = MockSettings({"build_type": "Release",
+ "os": "Windows",
+ "arch": "x86_64",
+ "compiler": "gcc"})
+ conanfile = ConanFileMock()
+ conanfile.conf = Conf()
+ conanfile.conf.define("tools.build:compiler_executables", compilers)
+ conanfile.settings = settings
+ at = GnuToolchain(conanfile)
+ env = at._environment.vars(conanfile)
+ for compiler, env_var in autotools_mapping.items():
+ assert env[env_var] == f"path_to_{compiler}"
+
+
+def test_linker_scripts():
+ conanfile = ConanFileMock()
+ conanfile.conf = Conf()
+ conanfile.conf.define("tools.build:linker_scripts", ["path_to_first_linker_script", "path_to_second_linker_script"])
+ settings = MockSettings({"build_type": "Release",
+ "os": "Windows",
+ "compiler": "gcc",
+ "arch": "x86_64"})
+ conanfile.settings = settings
+ at = GnuToolchain(conanfile)
+ env = at._environment.vars(conanfile)
+ assert "-T'path_to_first_linker_script'" in env["LDFLAGS"]
+ assert "-T'path_to_second_linker_script'" in env["LDFLAGS"]
+
+
+def test_update_or_prune_any_args(cross_building_conanfile):
+ # Issue: https://github.com/conan-io/conan/issues/12642
+ at = GnuToolchain(cross_building_conanfile)
+ at.configure_args.update({
+ "--with-cross-build": "my_path",
+ "--something-host": "my_host",
+ "--prefix": "/my/other/prefix"
+ })
+ new_configure_args = cmd_args_to_string(GnuToolchain._dict_to_list(at.configure_args))
+ assert "--build=x86_64-linux-gnu" in new_configure_args
+ assert "--host=wasm32-local-emscripten" in new_configure_args
+ assert "--with-cross-build=my_path" in new_configure_args
+ assert "--something-host=my_host" in new_configure_args
+ assert "--prefix=/my/other/prefix" in new_configure_args
+ # https://github.com/conan-io/conan/issues/12431
+ at.configure_args.pop("--build")
+ at.configure_args.pop("--host")
+ new_configure_args = cmd_args_to_string(GnuToolchain._dict_to_list(at.configure_args))
+ assert "--build=x86_64-linux-gnu" not in new_configure_args # removed
+ assert "--host=wasm32-local-emscripten" not in new_configure_args # removed
+ assert "--with-cross-build=my_path" in new_configure_args
+ assert "--something-host=my_host" in new_configure_args
+ # Update autoreconf_args
+ at.autoreconf_args.pop("--force")
+ new_autoreconf_args = cmd_args_to_string(GnuToolchain._dict_to_list(at.autoreconf_args))
+ assert "'--force" not in new_autoreconf_args
+ # Add new value to make_args
+ at.make_args.update({"--new-complex-flag": "new-value"})
+ new_make_args = cmd_args_to_string(GnuToolchain._dict_to_list(at.make_args))
+ assert "--new-complex-flag=new-value" in new_make_args
diff --git a/conans/test/unittests/tools/gnu/test_triplets.py b/conans/test/unittests/tools/gnu/test_triplets.py
index 481104ccd59..be07191404e 100644
--- a/conans/test/unittests/tools/gnu/test_triplets.py
+++ b/conans/test/unittests/tools/gnu/test_triplets.py
@@ -1,10 +1,10 @@
import pytest
-from conan.tools.gnu.get_gnu_triplet import _get_gnu_triplet
+from conan.tools.gnu.get_gnu_triplet import _get_gnu_triplet, _get_gnu_os, _get_gnu_arch
from conans.errors import ConanException
-@pytest.mark.parametrize("os_, arch, compiler, expected_triplet", [
+@pytest.mark.parametrize("os_, arch, compiler, expected", [
["Linux", "x86", None, "i686-linux-gnu"],
["Linux", "x86_64", None, "x86_64-linux-gnu"],
["Linux", "armv6", None, "arm-linux-gnueabi"],
@@ -66,9 +66,11 @@
["Linux", "riscv32", None, "riscv32-linux-gnu"],
["Linux", "riscv64", None, "riscv64-linux-gnu"],
])
-def test_get_gnu_triplet(os_, arch, compiler, expected_triplet):
- triplet = _get_gnu_triplet(os_, arch, compiler)
- assert triplet == expected_triplet
+def test_get_gnu_triplet(os_, arch, compiler, expected):
+ info = _get_gnu_triplet(os_, arch, compiler)
+ assert info["triplet"] == expected
+ assert info["machine"] == _get_gnu_arch(os_, arch)
+ assert info["system"] == _get_gnu_os(os_, arch, compiler)
def test_get_gnu_triplet_on_windows_without_compiler():
diff --git a/conans/test/utils/tools.py b/conans/test/utils/tools.py
index 5e96c8550cc..f9d2efe9aaf 100644
--- a/conans/test/utils/tools.py
+++ b/conans/test/utils/tools.py
@@ -365,10 +365,12 @@ def redirect_input(target):
sys.stdin = original_stdin
-class TestClient(object):
+class TestClient:
""" Test wrap of the conans application to launch tests in the same way as
in command line
"""
+ # Preventing Pytest collects any tests from here
+ __test__ = False
def __init__(self, cache_folder=None, current_folder=None, servers=None, inputs=None,
requester_class=None, path_with_spaces=True,
| [
{
"components": [
{
"doc": "",
"lines": [
4,
58
],
"name": "_get_gnu_arch",
"signature": "def _get_gnu_arch(os_, arch):",
"type": "function"
},
{
"doc": "",
"lines": [
61,
92
],
... | [
"conans/test/integration/toolchains/gnu/test_gnutoolchain.py::test_extra_flags_via_conf[Macos]",
"conans/test/integration/toolchains/gnu/test_gnutoolchain.py::test_extra_flags_via_conf[Linux]",
"conans/test/integration/toolchains/gnu/test_gnutoolchain.py::test_extra_flags_via_conf[Windows]",
"conans/test/inte... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
[POC] GnuToolchain
Changelog: Omit
Docs: omit
**NOTE**: Let's omit the documentation for a while. We need to verify that it's working as expected, i.e., migrating some recipes in the background. After that test period, we'll publish its official documentation.
Rationale:
`AutotoolsToolchain` has quite a limited UI, and it's difficult to implement some new mechanisms to improve the UX without introducing breaking changes. The main arguments are Python dictionaries to manage all the arguments, so it simplifies everything a lot. This generator could be a perfect replacement for the current `AutotoolsToolchain`.
**GnuToolchain** (main changes/additions)
* Managing most of the xxxx_args as dict objects.
* Added `extra_env` variable to let the users add their own `Environment` variables.
* Added new MSVC common env flags.
* ~~Added new conf `tools.build:compiler_wrappers` (e.g., automake wrappers).~~
* Added `yes_no` wrapper function.
* Removed `environment()` and `vars()` functions.
* Removed `scope` and `env` parameters from the `generate()` function.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conan/tools/gnu/get_gnu_triplet.py]
(definition of _get_gnu_arch:)
def _get_gnu_arch(os_, arch):
(definition of _get_gnu_os:)
def _get_gnu_os(os_, arch, compiler=None):
[end of new definitions in conan/tools/gnu/get_gnu_triplet.py]
[start of new definitions in conan/tools/gnu/gnutoolchain.py]
(definition of GnuToolchain:)
class GnuToolchain:
"""GnuToolchain generator.
Note: it's based on legacy AutotoolsToolchain but with a more modern and usable UX"""
(definition of GnuToolchain.__init__:)
def __init__(self, conanfile, namespace=None, prefix="/"):
""":param conanfile: The current recipe object. Always use ``self``.
:param namespace: This argument avoids collisions when you have multiple toolchain calls in
the same recipe. By setting this argument, the *conanbuild.conf* file used to pass
information to the build helper will be named as *<namespace>_conanbuild.conf*. The default
value is ``None`` meaning that the name of the generated file is *conanbuild.conf*. This
namespace must be also set with the same value in the constructor of the Autotools build
helper so that it reads the information from the proper file.
:param prefix: Folder to use for ``--prefix`` argument ("/" by default)."""
(definition of GnuToolchain.yes_no:)
def yes_no(self, option_name, default=None, negated=False):
"""Simple wrapper to return "yes" or "no" depending on whether option_name is
evaluated as True or False.
:param option_name: option name.
:param default: Default value to return.
:param negated: Negates the option value if True.
:return: "yes" or "no" depending on whether option_name is True or False."""
(definition of GnuToolchain._initialize_default_extra_env:)
def _initialize_default_extra_env(self):
"""Initialize the default environment variables."""
(definition of GnuToolchain._get_msvc_runtime_flag:)
def _get_msvc_runtime_flag(self):
(definition of GnuToolchain._msvc_extra_flags:)
def _msvc_extra_flags(self):
(definition of GnuToolchain._add_msvc_flags:)
def _add_msvc_flags(self, flags):
(definition of GnuToolchain._filter_list_empty_fields:)
def _filter_list_empty_fields(v):
(definition of GnuToolchain._dict_to_list:)
def _dict_to_list(flags):
(definition of GnuToolchain.cxxflags:)
def cxxflags(self):
(definition of GnuToolchain.cflags:)
def cflags(self):
(definition of GnuToolchain.ldflags:)
def ldflags(self):
(definition of GnuToolchain.defines:)
def defines(self):
(definition of GnuToolchain._get_default_configure_shared_flags:)
def _get_default_configure_shared_flags(self):
(definition of GnuToolchain._get_default_configure_install_flags:)
def _get_default_configure_install_flags(self):
(definition of GnuToolchain._get_default_triplets:)
def _get_default_triplets(self):
(definition of GnuToolchain._environment:)
def _environment(self):
(definition of GnuToolchain.generate:)
def generate(self):
[end of new definitions in conan/tools/gnu/gnutoolchain.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
deepset-ai__haystack-7461 | 7,461 | deepset-ai/haystack | null | 1ce12c7a6a0cf2276c7f5352cd726d4946e19b58 | 2024-04-03T14:40:58Z | diff --git a/haystack/components/evaluators/document_map.py b/haystack/components/evaluators/document_map.py
new file mode 100644
index 0000000000..483a7699c6
--- /dev/null
+++ b/haystack/components/evaluators/document_map.py
@@ -0,0 +1,84 @@
+from typing import Any, Dict, List
+
+from haystack import Document, component
+
+
+@component
+class DocumentMeanAveragePrecision:
+ """
+ Evaluator that calculates the mean average precision of the retrieved documents, a metric
+ that measures how high retrieved documents are ranked.
+ Each question can have multiple ground truth documents and multiple retrieved documents.
+
+ `DocumentMeanAveragePrecision` doesn't normalize its inputs, the `DocumentCleaner` component
+ should be used to clean and normalize the documents before passing them to this evaluator.
+
+ Usage example:
+ ```python
+ from haystack.components.evaluators import AnswerExactMatchEvaluator
+
+ evaluator = DocumentMeanAveragePrecision()
+ result = evaluator.run(
+ ground_truth_documents=[
+ [Document(content="France")],
+ [Document(content="9th century"), Document(content="9th")],
+ ],
+ retrieved_documents=[
+ [Document(content="France")],
+ [Document(content="9th century"), Document(content="10th century"), Document(content="9th")],
+ ],
+ )
+
+ print(result["individual_scores"])
+ # [1.0, 0.8333333333333333]
+ print(result["score"])
+ # 0.9166666666666666
+ ```
+ """
+
+ @component.output_types(score=float, individual_scores=List[float])
+ def run(
+ self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]
+ ) -> Dict[str, Any]:
+ """
+ Run the DocumentMeanAveragePrecision on the given inputs.
+ All lists must have the same length.
+
+ :param ground_truth_documents:
+ A list of expected documents for each question.
+ :param retrieved_documents:
+ A list of retrieved documents for each question.
+ :returns:
+ A dictionary with the following outputs:
+ - `score` - The average of calculated scores.
+ - `invididual_scores` - A list of numbers from 0.0 to 1.0 that represents how high retrieved documents are ranked.
+ """
+ if len(ground_truth_documents) != len(retrieved_documents):
+ msg = "The length of ground_truth_documents and retrieved_documents must be the same."
+ raise ValueError(msg)
+
+ individual_scores = []
+
+ for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):
+ score = 0.0
+ for ground_document in ground_truth:
+ if ground_document.content is None:
+ continue
+
+ average_precision = 0.0
+ relevant_documents = 0
+
+ for rank, retrieved_document in enumerate(retrieved):
+ if retrieved_document.content is None:
+ continue
+
+ if ground_document.content in retrieved_document.content:
+ relevant_documents += 1
+ average_precision += relevant_documents / (rank + 1)
+ if relevant_documents > 0:
+ score = average_precision / relevant_documents
+ individual_scores.append(score)
+
+ score = sum(individual_scores) / len(retrieved_documents)
+
+ return {"score": score, "individual_scores": individual_scores}
diff --git a/releasenotes/notes/document-map-evaluator-de896c94b54fe3fa.yaml b/releasenotes/notes/document-map-evaluator-de896c94b54fe3fa.yaml
new file mode 100644
index 0000000000..36e28b73db
--- /dev/null
+++ b/releasenotes/notes/document-map-evaluator-de896c94b54fe3fa.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Add DocumentMeanAveragePrecision, it can be used to calculate mean average precision of retrieved documents.
| diff --git a/test/components/evaluators/test_document_map.py b/test/components/evaluators/test_document_map.py
new file mode 100644
index 0000000000..f203dd01af
--- /dev/null
+++ b/test/components/evaluators/test_document_map.py
@@ -0,0 +1,78 @@
+import pytest
+
+from haystack import Document
+from haystack.components.evaluators.document_map import DocumentMeanAveragePrecision
+
+
+def test_run_with_all_matching():
+ evaluator = DocumentMeanAveragePrecision()
+ result = evaluator.run(
+ ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ retrieved_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ )
+
+ assert result == {"individual_scores": [1.0, 1.0], "score": 1.0}
+
+
+def test_run_with_no_matching():
+ evaluator = DocumentMeanAveragePrecision()
+ result = evaluator.run(
+ ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ retrieved_documents=[[Document(content="Paris")], [Document(content="London")]],
+ )
+
+ assert result == {"individual_scores": [0.0, 0.0], "score": 0.0}
+
+
+def test_run_with_partial_matching():
+ evaluator = DocumentMeanAveragePrecision()
+ result = evaluator.run(
+ ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ retrieved_documents=[[Document(content="Berlin")], [Document(content="London")]],
+ )
+
+ assert result == {"individual_scores": [1.0, 0.0], "score": 0.5}
+
+
+def test_run_with_complex_data():
+ evaluator = DocumentMeanAveragePrecision()
+ result = evaluator.run(
+ ground_truth_documents=[
+ [Document(content="France")],
+ [Document(content="9th century"), Document(content="9th")],
+ [Document(content="classical music"), Document(content="classical")],
+ [Document(content="11th century"), Document(content="the 11th")],
+ [Document(content="Denmark, Iceland and Norway")],
+ [Document(content="10th century"), Document(content="10th")],
+ ],
+ retrieved_documents=[
+ [Document(content="France")],
+ [Document(content="9th century"), Document(content="10th century"), Document(content="9th")],
+ [Document(content="classical"), Document(content="rock music"), Document(content="dubstep")],
+ [Document(content="11th"), Document(content="the 11th"), Document(content="11th century")],
+ [Document(content="Denmark"), Document(content="Norway"), Document(content="Iceland")],
+ [
+ Document(content="10th century"),
+ Document(content="the first half of the 10th century"),
+ Document(content="10th"),
+ Document(content="10th"),
+ ],
+ ],
+ )
+ assert result == {"individual_scores": [1.0, 0.8333333333333333, 1.0, 0.5, 0.0, 1.0], "score": 0.7222222222222222}
+
+
+def test_run_with_different_lengths():
+ with pytest.raises(ValueError):
+ evaluator = DocumentMeanAveragePrecision()
+ evaluator.run(
+ ground_truth_documents=[[Document(content="Berlin")]],
+ retrieved_documents=[[Document(content="Berlin")], [Document(content="London")]],
+ )
+
+ with pytest.raises(ValueError):
+ evaluator = DocumentMeanAveragePrecision()
+ evaluator.run(
+ ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ retrieved_documents=[[Document(content="Berlin")]],
+ )
| diff --git a/releasenotes/notes/document-map-evaluator-de896c94b54fe3fa.yaml b/releasenotes/notes/document-map-evaluator-de896c94b54fe3fa.yaml
new file mode 100644
index 0000000000..36e28b73db
--- /dev/null
+++ b/releasenotes/notes/document-map-evaluator-de896c94b54fe3fa.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Add DocumentMeanAveragePrecision, it can be used to calculate mean average precision of retrieved documents.
| [
{
"components": [
{
"doc": "Evaluator that calculates the mean average precision of the retrieved documents, a metric\nthat measures how high retrieved documents are ranked.\nEach question can have multiple ground truth documents and multiple retrieved documents.\n\n`DocumentMeanAveragePrecision` ... | [
"test/components/evaluators/test_document_map.py::test_run_with_all_matching",
"test/components/evaluators/test_document_map.py::test_run_with_no_matching",
"test/components/evaluators/test_document_map.py::test_run_with_partial_matching",
"test/components/evaluators/test_document_map.py::test_run_with_comple... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat: Add `DocumentMeanAveragePrecision`
### Related Issues
- fixes #6066
### Proposed Changes:
Add `DocumentMeanAveragePrecision` Component to calculate Mean Average Precision of a retrieved documents given a list of ground truth documents.
### How did you test it?
I added unit tests.
### Notes for the reviewer
N/A
### Checklist
- I have read the [contributors guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md) and the [code of conduct](https://github.com/deepset-ai/haystack/blob/main/code_of_conduct.txt)
- I have updated the related issue with new insights and changes
- I added unit tests and updated the docstrings
- I've used one of the [conventional commit types](https://www.conventionalcommits.org/en/v1.0.0/) for my PR title: `fix:`, `feat:`, `build:`, `chore:`, `ci:`, `docs:`, `style:`, `refactor:`, `perf:`, `test:`.
- I documented my code
- I ran [pre-commit hooks](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md#installation) and fixed any issue
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/components/evaluators/document_map.py]
(definition of DocumentMeanAveragePrecision:)
class DocumentMeanAveragePrecision:
"""Evaluator that calculates the mean average precision of the retrieved documents, a metric
that measures how high retrieved documents are ranked.
Each question can have multiple ground truth documents and multiple retrieved documents.
`DocumentMeanAveragePrecision` doesn't normalize its inputs, the `DocumentCleaner` component
should be used to clean and normalize the documents before passing them to this evaluator.
Usage example:
```python
from haystack.components.evaluators import AnswerExactMatchEvaluator
evaluator = DocumentMeanAveragePrecision()
result = evaluator.run(
ground_truth_documents=[
[Document(content="France")],
[Document(content="9th century"), Document(content="9th")],
],
retrieved_documents=[
[Document(content="France")],
[Document(content="9th century"), Document(content="10th century"), Document(content="9th")],
],
)
print(result["individual_scores"])
# [1.0, 0.8333333333333333]
print(result["score"])
# 0.9166666666666666
```"""
(definition of DocumentMeanAveragePrecision.run:)
def run( self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]] ) -> Dict[str, Any]:
"""Run the DocumentMeanAveragePrecision on the given inputs.
All lists must have the same length.
:param ground_truth_documents:
A list of expected documents for each question.
:param retrieved_documents:
A list of retrieved documents for each question.
:returns:
A dictionary with the following outputs:
- `score` - The average of calculated scores.
- `invididual_scores` - A list of numbers from 0.0 to 1.0 that represents how high retrieved documents are ranked."""
[end of new definitions in haystack/components/evaluators/document_map.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Implement function to calculate Mean Average Precision metric
As specified in proposal #5794 we need to implement a function to calculate the Mean Average Precision metric.
Ideally the function should be part of the private interface and called only through the `calculate_metrics` function (see #6063). `_calculate_map()` could be a nice name.
For more detailed information check out the original proposal.
----------
--------------------
</issues> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 |
google-deepmind__optax-899 | 899 | google-deepmind/optax | null | b0c04dceafb0d304c59077bbbf15285800f87770 | 2024-04-03T09:19:13Z | diff --git a/optax/losses/_classification.py b/optax/losses/_classification.py
index 5db806c78..68bda2146 100644
--- a/optax/losses/_classification.py
+++ b/optax/losses/_classification.py
@@ -103,6 +103,37 @@ def perceptron_loss(
return jnp.maximum(0, - predictor_outputs * targets)
+def sparsemax_loss(
+ logits: chex.Array,
+ labels: chex.Array,
+) -> chex.Array:
+ """Binary sparsemax loss.
+
+ This loss is zero if and only if `jax.nn.sparse_sigmoid(logits) == labels`.
+
+ References:
+ Learning with Fenchel-Young Losses. Mathieu Blondel, André F. T. Martins,
+ Vlad Niculae. JMLR 2020. (Sec. 4.4)
+
+ Args:
+ logits: score produced by the model (float).
+ labels: ground-truth integer label (0 or 1).
+
+ Returns:
+ loss value
+
+ .. versionadded:: 0.2.3
+ """
+ return jax.nn.sparse_plus(jnp.where(labels, -logits, logits))
+
+
+@functools.partial(
+ chex.warn_deprecated_function,
+ replacement='sparsemax_loss')
+def binary_sparsemax_loss(logits, labels):
+ return sparsemax_loss(logits, labels)
+
+
def softmax_cross_entropy(
logits: chex.Array,
labels: chex.Array,
@@ -183,6 +214,9 @@ def multiclass_hinge_loss(
) -> chex.Array:
"""Multiclass hinge loss.
+ References:
+ https://en.wikipedia.org/wiki/Hinge_loss
+
Args:
scores: scores produced by the model (floats).
labels: ground-truth integer label.
@@ -190,9 +224,6 @@ def multiclass_hinge_loss(
Returns:
loss value
- References:
- https://en.wikipedia.org/wiki/Hinge_loss
-
.. versionadded:: 0.2.3
"""
one_hot_labels = jax.nn.one_hot(labels, scores.shape[-1])
| diff --git a/optax/losses/_classification_test.py b/optax/losses/_classification_test.py
index 86ca25a06..1ad7d9db2 100644
--- a/optax/losses/_classification_test.py
+++ b/optax/losses/_classification_test.py
@@ -276,6 +276,43 @@ def reference_impl(label, scores):
np.testing.assert_allclose(result, expected, atol=1e-4)
+class SparsemaxTest(parameterized.TestCase):
+
+ def test_binary(self):
+ label = 1
+ score = 10.
+ def reference_impl(label, logit):
+ scores = -(2*label-1)*logit
+ if scores <= -1.0:
+ return 0.0
+ elif scores >= 1.0:
+ return scores
+ else:
+ return (scores + 1.0) ** 2 / 4
+ expected = reference_impl(label, score)
+ result = _classification.sparsemax_loss(
+ jnp.asarray(score), jnp.asarray(label))
+ np.testing.assert_allclose(result, expected, atol=1e-4)
+
+ def test_batched_binary(self):
+ labels = jnp.array([1, 0])
+ scores = jnp.array([10., 20.])
+ def reference_impl(label, logit):
+ scores = -(2*label-1)*logit
+ if scores <= -1.0:
+ return 0.0
+ elif scores >= 1.0:
+ return scores
+ else:
+ return (scores + 1.0) ** 2 / 4
+ expected = jnp.asarray([
+ reference_impl(labels[0], scores[0]),
+ reference_impl(labels[1], scores[1])])
+ # in the optax loss the leading dimensions are automatically handled.
+ result = _classification.sparsemax_loss(scores, labels)
+ np.testing.assert_allclose(result, expected, atol=1e-4)
+
+
class ConvexKLDivergenceTest(parameterized.TestCase):
def setUp(self):
| [
{
"components": [
{
"doc": "Binary sparsemax loss.\n\nThis loss is zero if and only if `jax.nn.sparse_sigmoid(logits) == labels`.\n\nReferences:\n Learning with Fenchel-Young Losses. Mathieu Blondel, André F. T. Martins,\n Vlad Niculae. JMLR 2020. (Sec. 4.4)\n\nArgs:\n logits: score produced by... | [
"optax/losses/_classification_test.py::SparsemaxTest::test_batched_binary",
"optax/losses/_classification_test.py::SparsemaxTest::test_binary"
] | [
"optax/losses/_classification_test.py::SoftmaxCrossEntropyTest::test_batched__with_device",
"optax/losses/_classification_test.py::SoftmaxCrossEntropyTest::test_batched__with_jit",
"optax/losses/_classification_test.py::SoftmaxCrossEntropyTest::test_batched__without_device",
"optax/losses/_classification_test... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Upstream sparsemax jaxopt loss to optax.
Upstream sparsemax jaxopt loss to optax.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in optax/losses/_classification.py]
(definition of sparsemax_loss:)
def sparsemax_loss( logits: chex.Array, labels: chex.Array, ) -> chex.Array:
"""Binary sparsemax loss.
This loss is zero if and only if `jax.nn.sparse_sigmoid(logits) == labels`.
References:
Learning with Fenchel-Young Losses. Mathieu Blondel, André F. T. Martins,
Vlad Niculae. JMLR 2020. (Sec. 4.4)
Args:
logits: score produced by the model (float).
labels: ground-truth integer label (0 or 1).
Returns:
loss value
.. versionadded:: 0.2.3"""
(definition of binary_sparsemax_loss:)
def binary_sparsemax_loss(logits, labels):
[end of new definitions in optax/losses/_classification.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 1e08bccf195ac54e7d9d766eb5e69345bf0e3230 | ||
google-deepmind__optax-897 | 897 | google-deepmind/optax | null | 246f002a64e21bcc8f0b1a3390eb4ee13da83ed5 | 2024-04-01T20:30:36Z | diff --git a/docs/api/losses.rst b/docs/api/losses.rst
index 95cef9f99..41ceab134 100644
--- a/docs/api/losses.rst
+++ b/docs/api/losses.rst
@@ -14,6 +14,7 @@ Losses
kl_divergence
l2_loss
log_cosh
+ ntxent
safe_softmax_cross_entropy
sigmoid_binary_cross_entropy
sigmoid_focal_loss
@@ -61,6 +62,10 @@ Log hyperbolic cosine loss
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autofunction:: log_cosh
+Normalized temperature scaled cross-entropy (NT-Xent) loss
+~~~~~~~~~~~~~~~~
+.. autofunction:: ntxent
+
Sigmoid binary cross-entropy
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autofunction:: sigmoid_binary_cross_entropy
diff --git a/optax/__init__.py b/optax/__init__.py
index 5edae96d1..4225d7ef2 100644
--- a/optax/__init__.py
+++ b/optax/__init__.py
@@ -203,6 +203,7 @@
kl_divergence = losses.kl_divergence
l2_loss = losses.l2_loss
log_cosh = losses.log_cosh
+ntxent = losses.ntxent
sigmoid_binary_cross_entropy = losses.sigmoid_binary_cross_entropy
smooth_labels = losses.smooth_labels
softmax_cross_entropy = losses.softmax_cross_entropy
@@ -306,6 +307,7 @@
"MultiTransformState",
"nadam",
"nadamw",
+ "ntxent",
"noisy_sgd",
"novograd",
"NonNegativeParamsState",
diff --git a/optax/losses/__init__.py b/optax/losses/__init__.py
index 8393dc310..65c171f24 100644
--- a/optax/losses/__init__.py
+++ b/optax/losses/__init__.py
@@ -35,3 +35,4 @@
from optax.losses._regression import log_cosh
from optax.losses._regression import squared_error
from optax.losses._smoothing import smooth_labels
+from optax.losses._self_supervised import ntxent
diff --git a/optax/losses/_self_supervised.py b/optax/losses/_self_supervised.py
new file mode 100644
index 000000000..8c756a591
--- /dev/null
+++ b/optax/losses/_self_supervised.py
@@ -0,0 +1,86 @@
+# Copyright 2024 DeepMind Technologies Limited. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Self supervised losses."""
+
+import chex
+from jax import lax
+import jax.numpy as jnp
+from optax.losses._regression import cosine_similarity
+
+
+def ntxent(
+ embeddings: chex.Array,
+ labels: chex.Array,
+ temperature: chex.Numeric = 0.07
+) -> chex.Numeric:
+ """Normalized temperature scaled cross entropy loss (NT-Xent).
+
+ References:
+ T. Chen et al `A Simple Framework for Contrastive Learning of Visual
+ Representations <http://arxiv.org/abs/2002.05709>`_, 2020
+ kevinmusgrave.github.io/pytorch-metric-learning/losses/#ntxentloss
+
+ Args:
+ emeddings: batch of embeddings, with shape [batch, feature_length]
+ labels: labels for groups that are positive pairs. e.g. if you have
+ a batch of 4 embeddings and the first two and last two were positive
+ pairs your `labels` should look like [0, 0, 1, 1]. labels SHOULD NOT
+ be all the same (e.g. [0, 0, 0, 0]) you will get a NaN result.
+ Shape [batch]
+ temperature: temperature scaling parameter.
+
+ Returns:
+ A scalar loss value of NT-Xent values averaged over all positive
+ pairs
+
+ .. versionadded:: 0.2.3
+ """
+ chex.assert_type([embeddings], float)
+ if labels.shape[0] != embeddings.shape[0]:
+ raise ValueError(
+ 'label dimension should match batch dimension in embeddings'
+ )
+
+ # cosine similarity matrix
+ xcs = cosine_similarity(
+ embeddings[None, :, :], embeddings[:, None, :]
+ ) / temperature
+
+ # finding positive and negative pairs
+ labels1 = jnp.expand_dims(labels, axis=1)
+ labels2 = jnp.expand_dims(labels, axis=0)
+ matches = labels1 == labels2
+ diffs = matches ^ 1
+ matches = jnp.bool_(matches - jnp.eye(matches.shape[0])) # no self cos
+
+ # replace 0 with -inf
+ xcs_diffs = jnp.where(diffs == 1, xcs, -jnp.inf)
+ xcs_matches = jnp.where(matches == 1, xcs, -jnp.inf)
+
+ # shifting for numeric stability
+ comb = jnp.concatenate((xcs_diffs, xcs_matches), axis=-1)
+ xcs_max = jnp.max(comb, axis=1, keepdims=True)
+ xcs_shift_diffs = xcs_diffs - lax.stop_gradient(xcs_max)
+ xcs_shift_matches = xcs_matches - lax.stop_gradient(xcs_max)
+
+ # calc loss
+ numer = xcs_shift_matches
+ numer_exp = jnp.exp(xcs_shift_matches)
+ denom = jnp.sum(jnp.exp(xcs_shift_diffs), axis=1, keepdims=True)
+ denom += numer_exp
+ log_softm = numer - jnp.log(denom)
+ loss = -jnp.where(matches == 1, log_softm, 0.0).sum() / matches.sum()
+
+ return loss
| diff --git a/optax/losses/_classification_test.py b/optax/losses/_classification_test.py
index b87d520ea..b75f3a088 100644
--- a/optax/losses/_classification_test.py
+++ b/optax/losses/_classification_test.py
@@ -862,6 +862,5 @@ def test_ignore_negative(self):
assert all(ce_loss[self.ts == 0] > 0)
assert all(focal_loss[self.ts == 0] == 0)
-
if __name__ == '__main__':
absltest.main()
diff --git a/optax/losses/_self_supervised_test.py b/optax/losses/_self_supervised_test.py
new file mode 100644
index 000000000..43823f6a3
--- /dev/null
+++ b/optax/losses/_self_supervised_test.py
@@ -0,0 +1,51 @@
+# Copyright 2024 DeepMind Technologies Limited. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for optax.losses._self_supervised."""
+
+from absl.testing import parameterized
+
+import chex
+import jax.numpy as jnp
+import numpy as np
+
+from optax.losses import _self_supervised
+
+
+class NtxentTest(parameterized.TestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.ys = jnp.array([
+ [-1.9540, 1.0780],
+ [ 0.2380, -0.5703],
+ [ 1.8745, -0.0195],
+ [-0.6719, -1.9210],
+ ])
+ self.ts_1 = jnp.array([0,0,1,1])
+ self.ts_2 = jnp.array([0,0,0,1])
+ # Calculated expected output
+ self.exp_1 = jnp.array(14.01032)
+ self.exp_2 = jnp.array(8.968544)
+
+ @chex.all_variants
+ def test_batched(self):
+ """Tests for a full batch."""
+ np.testing.assert_allclose(
+ self.variant(_self_supervised.ntxent)(self.ys, self.ts_1),
+ self.exp_1, atol=1e-4)
+
+ np.testing.assert_allclose(
+ self.variant(_self_supervised.ntxent)(self.ys, self.ts_2),
+ self.exp_2, atol=1e-4)
| diff --git a/docs/api/losses.rst b/docs/api/losses.rst
index 95cef9f99..41ceab134 100644
--- a/docs/api/losses.rst
+++ b/docs/api/losses.rst
@@ -14,6 +14,7 @@ Losses
kl_divergence
l2_loss
log_cosh
+ ntxent
safe_softmax_cross_entropy
sigmoid_binary_cross_entropy
sigmoid_focal_loss
@@ -61,6 +62,10 @@ Log hyperbolic cosine loss
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autofunction:: log_cosh
+Normalized temperature scaled cross-entropy (NT-Xent) loss
+~~~~~~~~~~~~~~~~
+.. autofunction:: ntxent
+
Sigmoid binary cross-entropy
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autofunction:: sigmoid_binary_cross_entropy
| [
{
"components": [
{
"doc": "Normalized temperature scaled cross entropy loss (NT-Xent).\n\nReferences:\n T. Chen et al `A Simple Framework for Contrastive Learning of Visual \n Representations <http://arxiv.org/abs/2002.05709>`_, 2020\n kevinmusgrave.github.io/pytorch-metric-learning/losses/#nt... | [
"optax/losses/_classification_test.py::SoftmaxCrossEntropyTest::test_batched__with_device",
"optax/losses/_classification_test.py::SoftmaxCrossEntropyTest::test_batched__with_jit",
"optax/losses/_classification_test.py::SoftmaxCrossEntropyTest::test_batched__without_device",
"optax/losses/_classification_test... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Added a NTXent loss
An normalized temperature scaled cross entropy (NTXent) loss for a contrastive learning objective. I am fairly new to submitting pull requests to public repos, so I didn't add a ton of tests for this outside a batched test. Let me know if there is anything else I should add!
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in optax/losses/_self_supervised.py]
(definition of ntxent:)
def ntxent( embeddings: chex.Array, labels: chex.Array, temperature: chex.Numeric = 0.07 ) -> chex.Numeric:
"""Normalized temperature scaled cross entropy loss (NT-Xent).
References:
T. Chen et al `A Simple Framework for Contrastive Learning of Visual
Representations <http://arxiv.org/abs/2002.05709>`_, 2020
kevinmusgrave.github.io/pytorch-metric-learning/losses/#ntxentloss
Args:
emeddings: batch of embeddings, with shape [batch, feature_length]
labels: labels for groups that are positive pairs. e.g. if you have
a batch of 4 embeddings and the first two and last two were positive
pairs your `labels` should look like [0, 0, 1, 1]. labels SHOULD NOT
be all the same (e.g. [0, 0, 0, 0]) you will get a NaN result.
Shape [batch]
temperature: temperature scaling parameter.
Returns:
A scalar loss value of NT-Xent values averaged over all positive
pairs
.. versionadded:: 0.2.3"""
[end of new definitions in optax/losses/_self_supervised.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 1e08bccf195ac54e7d9d766eb5e69345bf0e3230 | |
tobymao__sqlglot-3252 | 3,252 | tobymao/sqlglot | null | 150a8270c9e1c5a74aeaeeedac2773c41760cb14 | 2024-04-01T08:33:11Z | diff --git a/sqlglot/dialects/clickhouse.py b/sqlglot/dialects/clickhouse.py
index 5c953188a2..874ce9019a 100644
--- a/sqlglot/dialects/clickhouse.py
+++ b/sqlglot/dialects/clickhouse.py
@@ -261,6 +261,11 @@ class Parser(parser.Parser):
"ArgMax",
]
+ FUNC_TOKENS = {
+ *parser.Parser.FUNC_TOKENS,
+ TokenType.SET,
+ }
+
AGG_FUNC_MAPPING = (
lambda functions, suffixes: {
f"{f}{sfx}": (f, sfx) for sfx in (suffixes + [""]) for f in functions
@@ -321,6 +326,17 @@ class Parser(parser.Parser):
TokenType.FORMAT: lambda self: ("format", self._advance() or self._parse_id_var()),
}
+ CONSTRAINT_PARSERS = {
+ **parser.Parser.CONSTRAINT_PARSERS,
+ "INDEX": lambda self: self._parse_index_constraint(),
+ "CODEC": lambda self: self._parse_compress(),
+ }
+
+ SCHEMA_UNNAMED_CONSTRAINTS = {
+ *parser.Parser.SCHEMA_UNNAMED_CONSTRAINTS,
+ "INDEX",
+ }
+
def _parse_conjunction(self) -> t.Optional[exp.Expression]:
this = super()._parse_conjunction()
@@ -512,6 +528,27 @@ def _parse_on_property(self) -> t.Optional[exp.Expression]:
self._retreat(index)
return None
+ def _parse_index_constraint(
+ self, kind: t.Optional[str] = None
+ ) -> exp.IndexColumnConstraint:
+ # INDEX name1 expr TYPE type1(args) GRANULARITY value
+ this = self._parse_id_var()
+ expression = self._parse_conjunction()
+
+ index_type = self._match_text_seq("TYPE") and (
+ self._parse_function() or self._parse_var()
+ )
+
+ granularity = self._match_text_seq("GRANULARITY") and self._parse_term()
+
+ return self.expression(
+ exp.IndexColumnConstraint,
+ this=this,
+ expression=expression,
+ index_type=index_type,
+ granularity=granularity,
+ )
+
class Generator(generator.Generator):
QUERY_HINTS = False
STRUCT_DELIMITER = ("(", ")")
@@ -590,6 +627,9 @@ class Generator(generator.Generator):
exp.Array: inline_array_sql,
exp.CastToStrType: rename_func("CAST"),
exp.CountIf: rename_func("countIf"),
+ exp.CompressColumnConstraint: lambda self,
+ e: f"CODEC({self.expressions(e, key='this', flat=True)})",
+ exp.ComputedColumnConstraint: lambda self, e: f"ALIAS {self.sql(e, 'this')}",
exp.CurrentDate: lambda self, e: self.func("CURRENT_DATE"),
exp.DateAdd: date_delta_sql("DATE_ADD"),
exp.DateDiff: date_delta_sql("DATE_DIFF"),
@@ -742,3 +782,15 @@ def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> st
def prewhere_sql(self, expression: exp.PreWhere) -> str:
this = self.indent(self.sql(expression, "this"))
return f"{self.seg('PREWHERE')}{self.sep()}{this}"
+
+ def indexcolumnconstraint_sql(self, expression: exp.IndexColumnConstraint) -> str:
+ this = self.sql(expression, "this")
+ this = f" {this}" if this else ""
+ expr = self.sql(expression, "expression")
+ expr = f" {expr}" if expr else ""
+ index_type = self.sql(expression, "index_type")
+ index_type = f" TYPE {index_type}" if index_type else ""
+ granularity = self.sql(expression, "granularity")
+ granularity = f" GRANULARITY {granularity}" if granularity else ""
+
+ return f"INDEX{this}{expr}{index_type}{granularity}"
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 22f92b7528..9f72c290ad 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -1665,13 +1665,16 @@ class GeneratedAsRowColumnConstraint(ColumnConstraintKind):
# https://dev.mysql.com/doc/refman/8.0/en/create-table.html
+# https://github.com/ClickHouse/ClickHouse/blob/master/src/Parsers/ParserCreateQuery.h#L646
class IndexColumnConstraint(ColumnConstraintKind):
arg_types = {
"this": False,
- "schema": True,
+ "schema": False,
"kind": False,
"index_type": False,
"options": False,
+ "expression": False, # Clickhouse
+ "granularity": False,
}
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index a2fcf3d5f6..474ba0504b 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -4504,7 +4504,7 @@ def _parse_column_def(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.
constraints: t.List[exp.Expression] = []
- if not kind and self._match(TokenType.ALIAS):
+ if (not kind and self._match(TokenType.ALIAS)) or self._match_text_seq("ALIAS"):
constraints.append(
self.expression(
exp.ComputedColumnConstraint,
| diff --git a/tests/dialects/test_clickhouse.py b/tests/dialects/test_clickhouse.py
index f716584650..c5f9847cbb 100644
--- a/tests/dialects/test_clickhouse.py
+++ b/tests/dialects/test_clickhouse.py
@@ -154,7 +154,9 @@ def test_clickhouse(self):
self.validate_identity("TRUNCATE TABLE t1 ON CLUSTER test_cluster")
self.validate_identity("TRUNCATE DATABASE db")
self.validate_identity("TRUNCATE DATABASE db ON CLUSTER test_cluster")
-
+ self.validate_identity(
+ "CREATE TABLE t (foo String CODEC(LZ4HC(9), ZSTD, DELTA), size String ALIAS formatReadableSize(size_bytes), INDEX idx1 a TYPE bloom_filter(0.001) GRANULARITY 1, INDEX idx2 a TYPE set(100) GRANULARITY 2, INDEX idx3 a TYPE minmax GRANULARITY 3)"
+ )
self.validate_all(
"SELECT arrayJoin([1,2,3])",
write={
| [] | [
"tests/dialects/test_clickhouse.py::TestClickhouse::test_clickhouse"
] | [
"tests/dialects/test_clickhouse.py::TestClickhouse::test_cte",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_ddl",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_parameterization",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_signed_and_unsigned_types",
"tests/dialects/test_... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat(clickhouse): CREATE TABLE computed columns, column compression, index
Fixes #3243
Design notes
--------------------
- `ALIAS` is now parsed as `exp.ComputedColumnConstraint`, similarly to T-SQL's computed columns
- `CODEC(...)` is now parsed as `exp.CompressColumnConstraint`
- According to Clickhouse's parser, the index syntax is `INDEX name1 expr TYPE type1(args) GRANULARITY value`. As this is not properly documented yet, I decided to fully separate it from the existing `exp.IndexColumnConstraint` parsing/generation
Docs
---------
- [ALIAS](https://clickhouse.com/docs/en/sql-reference/statements/create/table#alias)
- [CODECs](https://clickhouse.com/docs/en/sql-reference/statements/create/table#column-compression-codecs)
- [Index syntax](https://github.com/ClickHouse/ClickHouse/blob/master/src/Parsers/ParserCreateQuery.h#L646)
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Extend support CREATE TABLE statement for clickhouse
In clickhouse, I cannot parse CREATE TABLE statements when there are:
* [ALIAS](https://clickhouse.com/docs/en/sql-reference/statements/create/table#alias)
* [Column Compression Codecs](https://clickhouse.com/docs/en/sql-reference/statements/create/table#column-compression-codecs)
* INDEXes - Although this is the most important for me, I am unable to find the relevant specification. Only source code [1](https://github.com/ClickHouse/ClickHouse/blob/master/src/Parsers/ParserCreateQuery.h#L646), [2](https://github.com/ClickHouse/ClickHouse/blob/master/src/Parsers/ParserCreateQuery.cpp#L175)
Examples of valid CREATE TABLE statements:
```
CREATE TABLE x
(
`a` String,
`b` ALIAS a
)
ENGINE = MergeTree
ORDER BY tuple()
```
```
CREATE TABLE y
(
`a` Float64 CODEC(LZ4HC(9)),
`b` Float32 CODEC(Delta, ZSTD),
`c` Float32 CODEC(NONE)
)
ENGINE = MergeTree
ORDER BY tuple()
```
```
CREATE TABLE z
(
`a` String,
INDEX a_idx1 a TYPE bloom_filter(0.001) GRANULARITY 1,
INDEX a_idx2 a TYPE set(100) GRANULARITY 2,
INDEX a_idx3 a TYPE minmax GRANULARITY 2
)
ENGINE = MergeTree
ORDER BY tuple()
```
----------
--------------------
</issues> | ceb42fabad60312699e4b15936aeebac00e22e4d | |
statsmodels__statsmodels-9186 | 9,186 | statsmodels/statsmodels | null | 8a844255a187b6549a9a9c2356098fd5f08835c0 | 2024-03-29T18:02:44Z | diff --git a/statsmodels/robust/norms.py b/statsmodels/robust/norms.py
index a00e5197145..b3993a68e61 100644
--- a/statsmodels/robust/norms.py
+++ b/statsmodels/robust/norms.py
@@ -39,6 +39,8 @@ class RobustNorm:
Springer, New York, 2002.
"""
+ continuous = 1
+
def rho(self, z):
"""
The robust criterion estimator function.
@@ -97,6 +99,12 @@ class LeastSquares(RobustNorm):
statsmodels.robust.norms.RobustNorm
"""
+ continuous = 2
+ redescending = "not"
+
+ def max_rho(self):
+ return np.inf
+
def rho(self, z):
"""
The least squares estimator rho function
@@ -166,6 +174,7 @@ def psi_deriv(self, z):
-----
Used to estimate the robust covariance matrix.
"""
+ z = np.asarray(z)
return np.ones(z.shape, np.float64)
@@ -184,9 +193,22 @@ class HuberT(RobustNorm):
statsmodels.robust.norms.RobustNorm
"""
+ continuous = 1
+ redescending = "not"
+
def __init__(self, t=1.345):
self.t = t
+ def _set_tuning_param(self, c):
+ """Set and change the tuning parameter of the Norm.
+
+ Warning: this needs to wipe cached attributes that depend on the param.
+ """
+ self.t = c
+
+ def max_rho(self):
+ return np.inf
+
def _subset(self, z):
"""
Huber's T is defined piecewise over the range for z
@@ -294,9 +316,15 @@ class RamsayE(RobustNorm):
statsmodels.robust.norms.RobustNorm
"""
+ continuous = 2
+ redescending = "soft"
+
def __init__(self, a=.3):
self.a = a
+ def max_rho(self):
+ return np.inf
+
def rho(self, z):
r"""
The robust criterion function for Ramsay's Ea.
@@ -384,9 +412,23 @@ class AndrewWave(RobustNorm):
--------
statsmodels.robust.norms.RobustNorm
"""
+
+ continuous = 1
+ redescending = "hard"
+
def __init__(self, a=1.339):
self.a = a
+ def _set_tuning_param(self, a):
+ """Set and change the tuning parameter of the Norm.
+
+ Warning: this needs to wipe cached attributes that depend on the param.
+ """
+ self.a = a
+
+ def max_rho(self):
+ return 2 * self.a**2
+
def _subset(self, z):
"""
Andrew's wave is defined piecewise over the range of z.
@@ -411,7 +453,7 @@ def rho(self, z):
.. math::
rho(z) & = a^2 *(1-cos(z/a)), |z| \leq a\pi \\
- rho(z) & = 2a, |z|>q\pi
+ rho(z) & = 2a^2, |z|>a\pi
"""
a = self.a
@@ -505,9 +547,22 @@ class TrimmedMean(RobustNorm):
statsmodels.robust.norms.RobustNorm
"""
+ continuous = 0
+ redescending = "hard"
+
def __init__(self, c=2.):
self.c = c
+ def _set_tuning_param(self, c):
+ """Set and change the tuning parameter of the Norm.
+
+ Warning: this needs to wipe cached attributes that depend on the param.
+ """
+ self.c = c
+
+ def max_rho(self):
+ return self.rho(self.c)
+
def _subset(self, z):
"""
Least trimmed mean is defined piecewise over the range of z.
@@ -611,11 +666,26 @@ class Hampel(RobustNorm):
statsmodels.robust.norms.RobustNorm
"""
+ continuous = 1
+ redescending = "hard"
+
def __init__(self, a=2., b=4., c=8.):
self.a = a
self.b = b
self.c = c
+ def _set_tuning_param(self, c):
+ """Set and change the tuning parameter of the Norm.
+
+ Warning: this needs to wipe cached attributes that depend on the param.
+ """
+ self.c = c
+ self.a = c / 4
+ self.b = c / 2
+
+ def max_rho(self):
+ return self.rho(self.c)
+
def _subset(self, z):
"""
Hampel's function is defined piecewise over the range of z
@@ -786,9 +856,22 @@ class TukeyBiweight(RobustNorm):
Tukey's biweight is sometime's called bisquare.
"""
+ continuous = 2
+ redescending = "hard"
+
def __init__(self, c=4.685):
self.c = c
+ def _set_tuning_param(self, c):
+ """Set and change the tuning parameter of the Norm.
+
+ Warning: this needs to wipe cached attributes that depend on the param.
+ """
+ self.c = c
+
+ def max_rho(self):
+ return self.rho(self.c)
+
def _subset(self, z):
"""
Tukey's biweight is defined piecewise over the range of z
@@ -857,7 +940,7 @@ def weights(self, z):
psi(z) = 0 for \|z\| > R
"""
-
+ z = np.asarray(z)
subset = self._subset(z)
return (1 - (z / self.c)**2)**2 * subset
@@ -874,6 +957,264 @@ def psi_deriv(self, z):
- (4*z**2/self.c**2) * (1-(z/self.c)**2))
+class TukeyQuartic(RobustNorm):
+ """
+
+ Varinant of Tukey's biweight function with power 4 for M-estimation.
+
+ Parameters
+ ----------
+ c : float, optional
+ The tuning constant for Tukey's Biweight. The default value is
+ c = ???.
+
+ Notes
+ -----
+ This is a variation of Tukey's biweight (bisquare) function where
+ the weight function has power 4 instead of power 2 in the inner term.
+ """
+
+ continuous = 2
+ redescending = "hard"
+
+ def __init__(self, c=3.61752, k=4):
+ # TODO: c needs to be changed if k != 4
+ # also, I think implementation assumes k is even integer
+ self.c = c
+ self.k = k
+
+ def _set_tuning_param(self, c):
+ """Set and change the tuning parameter of the Norm.
+
+ Warning: this needs to wipe cached attributes that depend on the param.
+ """
+ self.c = c
+
+ def max_rho(self):
+ return self.rho(self.c)
+
+ def _subset(self, z):
+ """
+ TukeyQuartic is defined piecewise over the range of z
+ """
+ z = np.abs(np.asarray(z))
+ return np.less_equal(z, self.c)
+
+ def rho(self, z):
+ r"""
+ The robust criterion function for TukeyQuartic norm.
+
+ Parameters
+ ----------
+ z : array_like
+ 1d array
+
+ Returns
+ -------
+ rho : ndarray
+ rho(z) = 1 / 2 * z**2 * (1 - 4 / (k + 2) * x**k +
+ 1 / (k + 1) * x**(2 * k)) for \|z\| <= c
+
+ rho(z) = 0 for \|z\| > c
+
+ where x = z / c
+ """
+ c = self.c
+ k = self.k
+ subset = self._subset(z)
+ x = z / c
+ rhoc = 1 / 2 * c**2 * (1 - 4 / (k + 2) + 1 / (k + 1))
+ # integral x (1 - x^k)^2 dx =
+ # 1/2 x^2 (x^(2 k)/(k + 1) - (4 x^k)/(k + 2) + 1) + constant
+ # integral x (1 - (x/c)^k)^2 dx =
+ # 1/2 x^2 (-(4 (x/c)^k)/(k + 2) + (x/c)^(2 k)/(k + 1) + 1) +
+ # constant
+ rh = (
+ subset * 1 / 2 * z**2 *
+ (1 - 4 / (k + 2) * x**k + 1 / (k + 1) * x**(2 * k)) + # noqa
+ (1 - subset) * rhoc
+ )
+ return rh
+
+ def psi(self, z):
+ r"""
+ The psi function of TukeyQuartic norm.
+
+ The analytic derivative of rho.
+
+ Parameters
+ ----------
+ z : array_like
+ 1d array
+
+ Returns
+ -------
+ psi : ndarray
+ psi(z) = z*(1 - (z/c)**4)**2 for \|z\| <= c
+
+ psi(z) = psi(c) for \|z\| > c
+ """
+ k = self.k
+ z = np.asarray(z)
+ subset = self._subset(z)
+ return z * (1 - (z / self.c)**k)**2 * subset
+
+ def weights(self, z):
+ r"""
+ TukeyQuartic weighting function for the IRLS algorithm.
+
+ The psi function scaled by z.
+
+ Parameters
+ ----------
+ z : array_like
+ 1d array
+
+ Returns
+ -------
+ weights : ndarray
+ psi(z) = (1 - (z/c)**4)**2 for \|z\| <= R
+
+ psi(z) = 0 for \|z\| > R
+ """
+ k = self.k
+ z = np.asarray(z)
+ subset = self._subset(z)
+ return (1 - (z / self.c)**k)**2 * subset
+
+ def psi_deriv(self, z):
+ """
+ The derivative of the TukeyQuartic psi function.
+
+ Notes
+ -----
+ Used to estimate the robust covariance matrix.
+ """
+ c = self.c
+ k = self.k
+ subset = self._subset(z)
+ x = z / c
+
+ # d/dx(x (1 - (x/c)^k)^2) = -(1 - (x/c)^k) (2 k (x/c)^k + (x/c)^k - 1)
+ return subset * (1 - x**k) * (1 - (2 * k + 1) * x**k)
+
+
+class StudentT(RobustNorm):
+ """Robust norm based on t distribution.
+
+ Rho is a rescaled version of the t-loglikelihood function after dropping
+ constant terms.
+ The norms are rescaled so that the largest weights are 1 and
+ the second derivative of the rho function at zero is equal to 1.
+
+ The maximum likelihood estimator based on the loglikelihood
+ function of the t-distribution is available in
+ ``statsmodels.miscmodels`, which can be used to also
+ estimate scale and degrees of freedom by MLE.
+
+ """
+
+ continuous = 2
+ redescending = "soft"
+
+ def __init__(self, c=2.3849, df=4):
+ self.c = c
+ self.df = df
+
+ def _set_tuning_param(self, c):
+ """Set and change the tuning parameter of the Norm.
+
+ Warning: this needs to wipe cached attributes that depend on the param.
+ """
+ self.c = c
+
+ def max_rho(self):
+ return np.inf
+
+ def rho(self, z):
+ """
+ The rho function of the StudentT norm.
+
+ Parameters
+ ----------
+ z : ndarray
+ 1d array
+
+ Returns
+ -------
+ rho : ndarray
+ rho(z) = (c**2 * df / 2.) * log(df + (z / c)**2) - const
+ The ``const`` shifts the rho function so that rho(0) = 0.
+ """
+ c = self.c
+ df = self.df
+ z = np.asarray(z)
+ const = (c**2 * df / 2.) * np.log(df) if df != 0 else 0
+ return (c**2 * df / 2.) * np.log(df + (z / c)**2) - const
+
+ def psi(self, z):
+ """
+ The psi function of the StudentT norm.
+
+ The analytic derivative of rho.
+
+ Parameters
+ ----------
+ z : array_like
+ 1d array
+
+ Returns
+ -------
+ psi : ndarray
+ psi(z) = z
+ """
+
+ c = self.c
+ df = self.df
+ z = np.asarray(z)
+ return z * df / (df + (z / c)**2)
+
+ def weights(self, z):
+ """
+ The weighting function for the IRLS algorithm of the StudentT norm.
+
+ The psi function scaled by the input z
+
+ Parameters
+ ----------
+ z : array_like
+ 1d array
+
+ Returns
+ -------
+ weights : ndarray
+ weights(z) = np.ones(z.shape)
+ """
+
+ c = self.c
+ df = self.df
+ z = np.asarray(z)
+ return df / (df + (z / c)**2)
+
+ def psi_deriv(self, z):
+ """
+ The derivative of the psi function of the StudentT norm.
+
+ Returns
+ -------
+ psi_deriv : ndarray
+ ones(z.shape)
+
+ Notes
+ -----
+ Used to estimate the robust covariance matrix.
+ """
+ c = self.c
+ df = self.df
+ x = np.asarray(z) / c
+ return - 2 * df * x**2 / (df + x**2)**2 + df / (df + x**2)
+
+
class MQuantileNorm(RobustNorm):
"""M-quantiles objective function based on a base norm
@@ -924,6 +1265,8 @@ class MQuantileNorm(RobustNorm):
doi:10.2307/1911031.
"""
+ continuous = 1
+
def __init__(self, q, base_norm):
self.q = q
self.base_norm = base_norm
diff --git a/statsmodels/robust/tools.py b/statsmodels/robust/tools.py
new file mode 100644
index 00000000000..a424d40f626
--- /dev/null
+++ b/statsmodels/robust/tools.py
@@ -0,0 +1,244 @@
+"""
+Created on Mar. 11, 2024 10:41:37 p.m.
+
+Author: Josef Perktold
+License: BSD-3
+"""
+
+import numpy as np
+from scipy import stats, integrate, optimize
+
+from statsmodels.tools.testing import Holder
+
+
+def _var_normal(norm):
+ """Variance factor for asymptotic relative efficiency of mean M-estimator.
+
+ The reference distribution is the standard normal distribution.
+ This assumes that the psi function is continuous.
+
+ Relative efficiency is 1 / var_normal
+
+ Parameters
+ ----------
+ norm : instance of a RobustNorm subclass.
+ Norm for which variance for relative efficiency is computed.
+
+ Returns
+ -------
+ Variance factor.
+
+ Notes
+ -----
+ This function does not verify that the assumption on the psi function and
+ it's derivative hold.
+
+ Examples
+ --------
+ The following computes the relative efficiency of an M-estimator for the
+ mean using HuberT norm. At the default tuning parameter, the relative
+ efficiency is 95%.
+
+ >>> import statsmodels.robust import norms
+ >>> v = _var_normal(norms.HuberT())
+ >>> eff = 1 / v
+ >>> v, eff
+ (1.0526312909084732, 0.9500002599551741)
+
+ Notes
+ -----
+ S-estimator for mean and regression also have the same variance and
+ efficiency computation as M-estimators. Therefore, this function can
+ be used also for S-estimators and other estimators that .
+
+ Reference
+ ---------
+ Menenez et al., but it's also in all text books for robust statistics.
+
+
+ """
+ num = stats.norm.expect(lambda x: norm.psi(x)**2)
+ denom = stats.norm.expect(lambda x: norm.psi_deriv(x))**2
+ return num / denom
+
+
+def _var_normal_jump(norm):
+ """Variance factor for asymptotic relative efficiency of mean M-estimator.
+
+ The reference distribution is the standard normal distribution.
+ This allows for the case when the psi function is not continuous, i.e.
+ has jumps as in TrimmedMean norm.
+
+ Relative efficiency is 1 / var_normal
+
+ Parameters
+ ----------
+ norm : instance of a RobustNorm subclass.
+ Norm for which variance for relative efficiency is computed.
+
+ Returns
+ -------
+ Variance factor.
+
+ Notes
+ -----
+ This function does not verify that the assumption on the psi function and
+ it's derivative hold.
+
+ Examples
+ --------
+
+ >>> import statsmodels.robust import norms
+ >>> v = _var_normal_jump(norms.HuberT())
+ >>> eff = 1 / v
+ >>> v, eff
+ (1.0526312908510451, 0.950000260007003)
+
+ Reference
+ ---------
+ Menenez et al., but it's also in all text books for robust statistics.
+
+
+ """
+ num = stats.norm.expect(lambda x: norm.psi(x)**2)
+
+ def func(x):
+ # derivative normal pdf
+ # d/dx(exp(-x^2/2)/sqrt(2 π)) = -(e^(-x^2/2) x)/sqrt(2 π)
+ return norm.psi(x) * (- x * np.exp(-x**2/2) / np.sqrt(2 * np.pi))
+
+ denom = integrate.quad(func, -np.inf, np.inf)[0]
+ return num / denom**2
+
+
+def _get_tuning_param(norm, eff, kwd="c", kwargs=None, use_jump=False,
+ bracket=None,
+ ):
+ """Tuning parameter for RLM norms for required relative efficiency.
+
+ Parameters
+ ----------
+ norm : instance of RobustNorm subclass
+ eff : float in (0, 1)
+ Required asymptotic relative efficiency compared to least squares
+ at the normal reference distribution. For example, ``eff=0.95`` for
+ 95% efficiency.
+ kwd : str
+ Name of keyword for tuning parameter.
+ kwargs : dict or None
+ Dict for other keyword parameters.
+ use_jump : bool
+ If False (default), then use computation that require continuous
+ psi function.
+ If True, then use computation then the psi function can have jump
+ discontinuities.
+ bracket : None or tuple
+ Bracket with lower and upper bounds to use for scipy.optimize.brentq.
+ If None, than a default bracket, currently [0.1, 10], is used.
+
+ Returns
+ -------
+ Float : Value of tuning parameter to achieve asymptotic relative
+ efficiency.
+
+ """
+ kwds = {} if kwargs is None else kwargs
+ if bracket is None:
+ bracket = [0.1, 10]
+
+ if not use_jump:
+ def func(c):
+ # kwds.update({kwd: c})
+ # return _var_normal(norm(**kwds)) - 1 / eff
+ norm._set_tuning_param(c)
+ return _var_normal(norm) - 1 / eff
+ else:
+ def func(c):
+ norm._set_tuning_param(c)
+ return _var_normal_jump(norm(**kwds) - 1 / eff)
+
+ res = optimize.brentq(func, *bracket)
+ return res
+
+
+def tuning_s_estimator_mean(norm, breakdown=None):
+ """Tuning parameter and scale bias correction for S-estimators of mean.
+
+ The reference distribution is the normal distribution.
+ This requires a (hard) redescending norm, i.e. with finite max rho.
+
+ Parameters
+ ----------
+ norm : instance of RobustNorm subclass
+ breakdown : float or iterable of float in (0, 0.5]
+ Desired breakdown point between 0 and 0.5.
+ Default if breakdown is None is a list of breakdown points.
+
+ Returns
+ -------
+ Holder instance with the following attributes :
+
+ - `breakdown` : breakdown point
+ - `eff` : relative efficiency
+ - `param` : tuning parameter for norm
+ - `scale_bias` : correction term for Fisher consistency.
+
+ Notes
+ -----
+ Based on Rousseeuw and Leroy (1987). See table 19, p. 142 that can be
+ replicated by this function for TukeyBiweight norm.
+ Note, the results of this function are based computation without rounding
+ to decimal precision, and differ in some cases in the last digit from
+ the table by Rousseeuw and Leroy.
+
+ Numerical expectation and root finding based on scipy integrate and
+ optimize.
+
+ TODO: more options for details, numeric approximation and root finding.
+ There is currently no feasibility check in functions.
+
+ Reference
+ ---------
+ Rousseeuw and Leroy book
+
+
+ """
+ if breakdown is None:
+ bps = [0.5, 0.45, 0.40, 0.35, 0.30, 0.25, 0.20, 0.15, 0.1, 0.05]
+ else:
+ # allow for scalar bp
+ try:
+ _ = iter(breakdown)
+ bps = breakdown
+ except TypeError:
+ bps = [breakdown]
+
+ def func(c):
+ norm_ = norm
+ norm_._set_tuning_param(c)
+ bp = stats.norm.expect(lambda x : norm_.rho(x)) / norm_.max_rho()
+ return bp
+
+ res = []
+ for bp in bps:
+ c_bp = optimize.brentq(lambda c0: func(c0) - bp, 0.1, 10)
+ norm._set_tuning_param(c_bp) # inplace modification
+ eff = 1 / _var_normal(norm)
+ b = stats.norm.expect(lambda x : norm.rho(x))
+ res.append([bp, eff, c_bp, b])
+
+ if np.size(bps) > 1:
+ res = np.asarray(res).T
+ else:
+ # use one list
+ res = res[0]
+
+ res2 = Holder(
+ breakdown=res[0],
+ eff=res[1],
+ param=res[2],
+ scale_bias=res[3],
+ all=res,
+ )
+
+ return res2
| diff --git a/statsmodels/robust/tests/test_norms.py b/statsmodels/robust/tests/test_norms.py
index cb87c65368b..c626c23eb06 100644
--- a/statsmodels/robust/tests/test_norms.py
+++ b/statsmodels/robust/tests/test_norms.py
@@ -18,8 +18,13 @@
norms_other = [
(norms.LeastSquares, ()),
(norms.TrimmedMean, (1.9,)), # avoid arg at integer used in example
+ (norms.HuberT, ()),
(norms.AndrewWave, ()),
(norms.RamsayE, ()),
+ (norms.Hampel, ()),
+ (norms.TukeyBiweight, ()),
+ (norms.TukeyQuartic, ()),
+ (norms.StudentT, ()),
# norms.MQuantileNorm, # requires keywords in init
]
@@ -75,7 +80,8 @@ def test_norms_consistent(case):
# test that norm methods are consistent with each other
ncls, args = case
norm = ncls(*args)
- x = np.array([-9, -6, -2, -1, 0, 1, 2, 6, 9], dtype=float)
+ x = np.array([-9, -6, -2, -1, 0, 1, 2 - 1e-4, 6, 9], dtype=float)
+ # 2 - 1e-4 because Hample psi has discontinuity at 2, numdiff problem
weights = norm.weights(x)
rho = norm.rho(x) # not used
@@ -87,9 +93,23 @@ def test_norms_consistent(case):
assert np.all(np.diff(rho[4:]) >= 0)
assert np.all(np.diff(rho[:4]) <= 0)
+ # check weights at and around zero
+ assert_allclose(weights[4], 1, atol=1e-12)
+ assert np.all(norm.weights([-1e-6, 1e-6]) >= 1 - 1e-5)
+
# avoid zero division nan:
assert_allclose(weights, (psi + 1e-50) / (x + 1e-50), rtol=1e-6, atol=1e-8)
psid = _approx_fprime_scalar(x, norm.rho)
assert_allclose(psi, psid, rtol=1e-6, atol=1e-6)
psidd = _approx_fprime_scalar(x, norm.psi)
assert_allclose(psi_deriv, psidd, rtol=1e-6, atol=1e-8)
+
+ # attributes
+ if norm.redescending == "hard":
+ assert_allclose(norm.max_rho(), norm.rho(100), rtol=1e-12)
+ else:
+ assert np.isposinf(norm.max_rho())
+
+ if norm.redescending == "soft":
+ # we don't have info where argmax psi is, use simple values for x
+ assert norm.psi(100) < norm.psi(2)
diff --git a/statsmodels/robust/tests/test_tools.py b/statsmodels/robust/tests/test_tools.py
new file mode 100644
index 00000000000..a3adbb708b6
--- /dev/null
+++ b/statsmodels/robust/tests/test_tools.py
@@ -0,0 +1,96 @@
+"""
+Created on Mar. 12, 2024 11:10:14 a.m.
+
+Author: Josef Perktold
+License: BSD-3
+"""
+
+import pytest
+
+import numpy as np
+from numpy.testing import assert_allclose
+
+# from scipy import stats
+
+from statsmodels.robust.norms import (
+ AndrewWave,
+ TrimmedMean,
+ TukeyBiweight,
+ TukeyQuartic,
+ Hampel,
+ HuberT,
+ StudentT,
+ )
+
+from statsmodels.robust.tools import (
+ _var_normal,
+ _var_normal_jump,
+ _get_tuning_param,
+ tuning_s_estimator_mean,
+ )
+
+
+effs = [0.9, 0.95, 0.98, 0.99]
+
+results_menenez = [
+ (HuberT(), [0.9818, 1.345, 1.7459, 2.0102]),
+ (TukeyBiweight(), [3.8827, 4.6851, 5.9207, 7.0414]),
+ (TukeyQuartic(), [3.1576, 3.6175, 4.2103, 4.6664]),
+ (TukeyQuartic(k=2), [3.8827, 4.6851, 5.9207, 7.0414]), # biweight
+ (StudentT(df=1), [1.7249, 2.3849, 3.3962, 4.2904]), # Cauchy
+ (AndrewWave(), [1.1117, 1.338, 1.6930, 2.0170]),
+ # (Hampel(), [4.4209, 5.4, 7.00609, 8.0456]),
+ # rounding problem in Hampel, menenez use a as tuning parameter
+ (Hampel(), [4.4208, 5.5275, 7.006, 8.0456]),
+ (TrimmedMean(), [2.5003, 2.7955, 3.1365, 3.3682]),
+ ]
+
+
+@pytest.mark.parametrize("case", results_menenez)
+def test_eff(case):
+ norm, res2 = case
+
+ if norm.continuous == 2:
+ var_func = _var_normal
+ else:
+ var_func = _var_normal_jump
+
+ res_eff = []
+ for c in res2:
+ norm._set_tuning_param(c)
+ res_eff.append(1 / var_func(norm))
+
+ assert_allclose(res_eff, effs, atol=0.0005)
+
+ for c in res2:
+ # bp = stats.norm.expect(lambda x : norm.rho(x)) / norm.rho(norm.c)
+ norm._set_tuning_param(c)
+ eff = 1 / _var_normal(norm)
+ tune = _get_tuning_param(norm, eff)
+ assert_allclose(tune, c, rtol=1e-6, atol=5e-4)
+
+
+def test_hampel_eff():
+ # we cannot solve for multiple tuning parameters
+ eff = 0.95
+ # tuning parameters from Menezes et al 2021
+ res_eff = 1 / _var_normal_jump(Hampel(a=1.35, b=2.70, c=5.40))
+ assert_allclose(res_eff, eff, atol=0.005)
+
+
+def test_tuning_biweight():
+ # regression numbers but verified at 5 decimals
+ norm = TukeyBiweight()
+ res = tuning_s_estimator_mean(norm, breakdown=0.5)
+ res1 = [0.28682611623149523, 1.5476449837305166, 0.1996004163055662]
+ assert_allclose(res.all[1:], res1, rtol=1e-7)
+
+
+@pytest.mark.parametrize("case", results_menenez)
+def test_tuning_smoke(case):
+ # regression numbers but verified at 5 decimals
+ norm, _ = case
+ # norm = Norm()
+ if np.isfinite(norm.max_rho()):
+ res = tuning_s_estimator_mean(norm, breakdown=0.5)
+ assert res is not None
| [
{
"components": [
{
"doc": "",
"lines": [
105,
106
],
"name": "LeastSquares.max_rho",
"signature": "def max_rho(self):",
"type": "function"
},
{
"doc": "Set and change the tuning parameter of the Norm.\n\nWarning: this... | [
"statsmodels/robust/tests/test_norms.py::test_norm[case0-int]",
"statsmodels/robust/tests/test_norms.py::test_norm[case0-float64]",
"statsmodels/robust/tests/test_norms.py::test_norm[case0-complex128]",
"statsmodels/robust/tests/test_norms.py::test_norm[case1-int]",
"statsmodels/robust/tests/test_norms.py::... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
ENH: robust: tools and more norms
related
#8808
#1372
this adds tools for asymptotic efficency, breakdown point computation and choice of tuning parameter of norms.
currently only for univariate
this adds some additional norms, TukeyQuartic and StudentT (I also have Cauchy norm, but might leave it out as special case of StudentT with df=1)
plan for next, either in this PR or next PR are more general (M-)scale estimators #9171
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in statsmodels/robust/norms.py]
(definition of LeastSquares.max_rho:)
def max_rho(self):
(definition of HuberT._set_tuning_param:)
def _set_tuning_param(self, c):
"""Set and change the tuning parameter of the Norm.
Warning: this needs to wipe cached attributes that depend on the param."""
(definition of HuberT.max_rho:)
def max_rho(self):
(definition of RamsayE.max_rho:)
def max_rho(self):
(definition of AndrewWave._set_tuning_param:)
def _set_tuning_param(self, a):
"""Set and change the tuning parameter of the Norm.
Warning: this needs to wipe cached attributes that depend on the param."""
(definition of AndrewWave.max_rho:)
def max_rho(self):
(definition of TrimmedMean._set_tuning_param:)
def _set_tuning_param(self, c):
"""Set and change the tuning parameter of the Norm.
Warning: this needs to wipe cached attributes that depend on the param."""
(definition of TrimmedMean.max_rho:)
def max_rho(self):
(definition of Hampel._set_tuning_param:)
def _set_tuning_param(self, c):
"""Set and change the tuning parameter of the Norm.
Warning: this needs to wipe cached attributes that depend on the param."""
(definition of Hampel.max_rho:)
def max_rho(self):
(definition of TukeyBiweight._set_tuning_param:)
def _set_tuning_param(self, c):
"""Set and change the tuning parameter of the Norm.
Warning: this needs to wipe cached attributes that depend on the param."""
(definition of TukeyBiweight.max_rho:)
def max_rho(self):
(definition of TukeyQuartic:)
class TukeyQuartic(RobustNorm):
"""Varinant of Tukey's biweight function with power 4 for M-estimation.
Parameters
----------
c : float, optional
The tuning constant for Tukey's Biweight. The default value is
c = ???.
Notes
-----
This is a variation of Tukey's biweight (bisquare) function where
the weight function has power 4 instead of power 2 in the inner term."""
(definition of TukeyQuartic.__init__:)
def __init__(self, c=3.61752, k=4):
(definition of TukeyQuartic._set_tuning_param:)
def _set_tuning_param(self, c):
"""Set and change the tuning parameter of the Norm.
Warning: this needs to wipe cached attributes that depend on the param."""
(definition of TukeyQuartic.max_rho:)
def max_rho(self):
(definition of TukeyQuartic._subset:)
def _subset(self, z):
"""TukeyQuartic is defined piecewise over the range of z"""
(definition of TukeyQuartic.rho:)
def rho(self, z):
"""The robust criterion function for TukeyQuartic norm.
Parameters
----------
z : array_like
1d array
Returns
-------
rho : ndarray
rho(z) = 1 / 2 * z**2 * (1 - 4 / (k + 2) * x**k +
1 / (k + 1) * x**(2 * k)) for \|z\| <= c
rho(z) = 0 for \|z\| > c
where x = z / c"""
(definition of TukeyQuartic.psi:)
def psi(self, z):
"""The psi function of TukeyQuartic norm.
The analytic derivative of rho.
Parameters
----------
z : array_like
1d array
Returns
-------
psi : ndarray
psi(z) = z*(1 - (z/c)**4)**2 for \|z\| <= c
psi(z) = psi(c) for \|z\| > c"""
(definition of TukeyQuartic.weights:)
def weights(self, z):
""" TukeyQuartic weighting function for the IRLS algorithm.
The psi function scaled by z.
Parameters
----------
z : array_like
1d array
Returns
-------
weights : ndarray
psi(z) = (1 - (z/c)**4)**2 for \|z\| <= R
psi(z) = 0 for \|z\| > R"""
(definition of TukeyQuartic.psi_deriv:)
def psi_deriv(self, z):
"""The derivative of the TukeyQuartic psi function.
Notes
-----
Used to estimate the robust covariance matrix."""
(definition of StudentT:)
class StudentT(RobustNorm):
"""Robust norm based on t distribution.
Rho is a rescaled version of the t-loglikelihood function after dropping
constant terms.
The norms are rescaled so that the largest weights are 1 and
the second derivative of the rho function at zero is equal to 1.
The maximum likelihood estimator based on the loglikelihood
function of the t-distribution is available in
``statsmodels.miscmodels`, which can be used to also
estimate scale and degrees of freedom by MLE."""
(definition of StudentT.__init__:)
def __init__(self, c=2.3849, df=4):
(definition of StudentT._set_tuning_param:)
def _set_tuning_param(self, c):
"""Set and change the tuning parameter of the Norm.
Warning: this needs to wipe cached attributes that depend on the param."""
(definition of StudentT.max_rho:)
def max_rho(self):
(definition of StudentT.rho:)
def rho(self, z):
"""The rho function of the StudentT norm.
Parameters
----------
z : ndarray
1d array
Returns
-------
rho : ndarray
rho(z) = (c**2 * df / 2.) * log(df + (z / c)**2) - const
The ``const`` shifts the rho function so that rho(0) = 0."""
(definition of StudentT.psi:)
def psi(self, z):
"""The psi function of the StudentT norm.
The analytic derivative of rho.
Parameters
----------
z : array_like
1d array
Returns
-------
psi : ndarray
psi(z) = z"""
(definition of StudentT.weights:)
def weights(self, z):
"""The weighting function for the IRLS algorithm of the StudentT norm.
The psi function scaled by the input z
Parameters
----------
z : array_like
1d array
Returns
-------
weights : ndarray
weights(z) = np.ones(z.shape)"""
(definition of StudentT.psi_deriv:)
def psi_deriv(self, z):
"""The derivative of the psi function of the StudentT norm.
Returns
-------
psi_deriv : ndarray
ones(z.shape)
Notes
-----
Used to estimate the robust covariance matrix."""
[end of new definitions in statsmodels/robust/norms.py]
[start of new definitions in statsmodels/robust/tools.py]
(definition of _var_normal:)
def _var_normal(norm):
"""Variance factor for asymptotic relative efficiency of mean M-estimator.
The reference distribution is the standard normal distribution.
This assumes that the psi function is continuous.
Relative efficiency is 1 / var_normal
Parameters
----------
norm : instance of a RobustNorm subclass.
Norm for which variance for relative efficiency is computed.
Returns
-------
Variance factor.
Notes
-----
This function does not verify that the assumption on the psi function and
it's derivative hold.
Examples
--------
The following computes the relative efficiency of an M-estimator for the
mean using HuberT norm. At the default tuning parameter, the relative
efficiency is 95%.
>>> import statsmodels.robust import norms
>>> v = _var_normal(norms.HuberT())
>>> eff = 1 / v
>>> v, eff
(1.0526312909084732, 0.9500002599551741)
Notes
-----
S-estimator for mean and regression also have the same variance and
efficiency computation as M-estimators. Therefore, this function can
be used also for S-estimators and other estimators that .
Reference
---------
Menenez et al., but it's also in all text books for robust statistics."""
(definition of _var_normal_jump:)
def _var_normal_jump(norm):
"""Variance factor for asymptotic relative efficiency of mean M-estimator.
The reference distribution is the standard normal distribution.
This allows for the case when the psi function is not continuous, i.e.
has jumps as in TrimmedMean norm.
Relative efficiency is 1 / var_normal
Parameters
----------
norm : instance of a RobustNorm subclass.
Norm for which variance for relative efficiency is computed.
Returns
-------
Variance factor.
Notes
-----
This function does not verify that the assumption on the psi function and
it's derivative hold.
Examples
--------
>>> import statsmodels.robust import norms
>>> v = _var_normal_jump(norms.HuberT())
>>> eff = 1 / v
>>> v, eff
(1.0526312908510451, 0.950000260007003)
Reference
---------
Menenez et al., but it's also in all text books for robust statistics."""
(definition of _var_normal_jump.func:)
def func(x):
(definition of _get_tuning_param:)
def _get_tuning_param(norm, eff, kwd="c", kwargs=None, use_jump=False, bracket=None, ):
"""Tuning parameter for RLM norms for required relative efficiency.
Parameters
----------
norm : instance of RobustNorm subclass
eff : float in (0, 1)
Required asymptotic relative efficiency compared to least squares
at the normal reference distribution. For example, ``eff=0.95`` for
95% efficiency.
kwd : str
Name of keyword for tuning parameter.
kwargs : dict or None
Dict for other keyword parameters.
use_jump : bool
If False (default), then use computation that require continuous
psi function.
If True, then use computation then the psi function can have jump
discontinuities.
bracket : None or tuple
Bracket with lower and upper bounds to use for scipy.optimize.brentq.
If None, than a default bracket, currently [0.1, 10], is used.
Returns
-------
Float : Value of tuning parameter to achieve asymptotic relative
efficiency."""
(definition of _get_tuning_param.func:)
def func(c):
(definition of _get_tuning_param.func:)
def func(c):
(definition of tuning_s_estimator_mean:)
def tuning_s_estimator_mean(norm, breakdown=None):
"""Tuning parameter and scale bias correction for S-estimators of mean.
The reference distribution is the normal distribution.
This requires a (hard) redescending norm, i.e. with finite max rho.
Parameters
----------
norm : instance of RobustNorm subclass
breakdown : float or iterable of float in (0, 0.5]
Desired breakdown point between 0 and 0.5.
Default if breakdown is None is a list of breakdown points.
Returns
-------
Holder instance with the following attributes :
- `breakdown` : breakdown point
- `eff` : relative efficiency
- `param` : tuning parameter for norm
- `scale_bias` : correction term for Fisher consistency.
Notes
-----
Based on Rousseeuw and Leroy (1987). See table 19, p. 142 that can be
replicated by this function for TukeyBiweight norm.
Note, the results of this function are based computation without rounding
to decimal precision, and differ in some cases in the last digit from
the table by Rousseeuw and Leroy.
Numerical expectation and root finding based on scipy integrate and
optimize.
TODO: more options for details, numeric approximation and root finding.
There is currently no feasibility check in functions.
Reference
---------
Rousseeuw and Leroy book"""
(definition of tuning_s_estimator_mean.func:)
def func(c):
[end of new definitions in statsmodels/robust/tools.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 589f167fed77ebf6031d01ad3de1aa7b0040ced3 | ||
tobymao__sqlglot-3242 | 3,242 | tobymao/sqlglot | null | 59f1d13bc5e37ebe6636b05e0381facc9725f7b0 | 2024-03-28T22:54:17Z | diff --git a/sqlglot/dialects/sqlite.py b/sqlglot/dialects/sqlite.py
index 63540a8e02..ef7d9aa14a 100644
--- a/sqlglot/dialects/sqlite.py
+++ b/sqlglot/dialects/sqlite.py
@@ -33,6 +33,14 @@ def _json_extract_sql(self: SQLite.Generator, expression: exp.JSONExtract) -> st
return arrow_json_extract_sql(self, expression)
+def _build_strftime(args: t.List) -> exp.Anonymous | exp.TimeToStr:
+ if len(args) == 1:
+ args.append(exp.CurrentTimestamp())
+ if len(args) == 2:
+ return exp.TimeToStr(this=exp.TsOrDsToTimestamp(this=args[1]), format=args[0])
+ return exp.Anonymous(this="STRFTIME", expressions=args)
+
+
def _transform_create(expression: exp.Expression) -> exp.Expression:
"""Move primary key to a column and enforce auto_increment on primary keys."""
schema = expression.this
@@ -82,6 +90,7 @@ class Parser(parser.Parser):
FUNCTIONS = {
**parser.Parser.FUNCTIONS,
"EDITDIST3": exp.Levenshtein.from_arg_list,
+ "STRFTIME": _build_strftime,
}
STRING_ALIASES = True
@@ -152,7 +161,9 @@ class Generator(generator.Generator):
),
exp.TableSample: no_tablesample_sql,
exp.TimeStrToTime: lambda self, e: self.sql(e, "this"),
+ exp.TimeToStr: lambda self, e: self.func("STRFTIME", e.args.get("format"), e.this),
exp.TryCast: no_trycast_sql,
+ exp.TsOrDsToTimestamp: lambda self, e: self.sql(e, "this"),
}
# SQLite doesn't generally support CREATE TABLE .. properties
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 742dfcea05..7575144414 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -5687,6 +5687,10 @@ class TsOrDsToTime(Func):
pass
+class TsOrDsToTimestamp(Func):
+ pass
+
+
class TsOrDiToDi(Func):
pass
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index caec78d826..a67c700338 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -3516,6 +3516,13 @@ def tsordstotime_sql(self, expression: exp.TsOrDsToTime) -> str:
return self.sql(exp.cast(this, "time"))
+ def tsordstotimestamp_sql(self, expression: exp.TsOrDsToTimestamp) -> str:
+ this = expression.this
+ if isinstance(this, exp.TsOrDsToTimestamp) or this.is_type(exp.DataType.Type.TIMESTAMP):
+ return self.sql(this)
+
+ return self.sql(exp.cast(this, "timestamp"))
+
def tsordstodate_sql(self, expression: exp.TsOrDsToDate) -> str:
this = expression.this
time_format = self.format_time(expression)
| diff --git a/tests/dialects/test_sqlite.py b/tests/dialects/test_sqlite.py
index e935c194a2..f3cde0b650 100644
--- a/tests/dialects/test_sqlite.py
+++ b/tests/dialects/test_sqlite.py
@@ -6,62 +6,6 @@
class TestSQLite(Validator):
dialect = "sqlite"
- def test_ddl(self):
- for conflict_action in ("ABORT", "FAIL", "IGNORE", "REPLACE", "ROLLBACK"):
- with self.subTest(f"ON CONFLICT {conflict_action}"):
- self.validate_identity("CREATE TABLE a (b, c, UNIQUE (b, c) ON CONFLICT IGNORE)")
-
- self.validate_identity("INSERT OR ABORT INTO foo (x, y) VALUES (1, 2)")
- self.validate_identity("INSERT OR FAIL INTO foo (x, y) VALUES (1, 2)")
- self.validate_identity("INSERT OR IGNORE INTO foo (x, y) VALUES (1, 2)")
- self.validate_identity("INSERT OR REPLACE INTO foo (x, y) VALUES (1, 2)")
- self.validate_identity("INSERT OR ROLLBACK INTO foo (x, y) VALUES (1, 2)")
- self.validate_identity("CREATE TABLE foo (id INTEGER PRIMARY KEY ASC)")
- self.validate_identity("CREATE TEMPORARY TABLE foo (id INTEGER)")
-
- self.validate_all(
- """
- CREATE TABLE "Track"
- (
- CONSTRAINT "PK_Track" FOREIGN KEY ("TrackId"),
- FOREIGN KEY ("AlbumId") REFERENCES "Album" (
- "AlbumId"
- ) ON DELETE NO ACTION ON UPDATE NO ACTION,
- FOREIGN KEY ("AlbumId") ON DELETE CASCADE ON UPDATE RESTRICT,
- FOREIGN KEY ("AlbumId") ON DELETE SET NULL ON UPDATE SET DEFAULT
- )
- """,
- write={
- "sqlite": """CREATE TABLE "Track" (
- CONSTRAINT "PK_Track" FOREIGN KEY ("TrackId"),
- FOREIGN KEY ("AlbumId") REFERENCES "Album" (
- "AlbumId"
- ) ON DELETE NO ACTION ON UPDATE NO ACTION,
- FOREIGN KEY ("AlbumId") ON DELETE CASCADE ON UPDATE RESTRICT,
- FOREIGN KEY ("AlbumId") ON DELETE SET NULL ON UPDATE SET DEFAULT
-)""",
- },
- pretty=True,
- )
- self.validate_all(
- "CREATE TABLE z (a INTEGER UNIQUE PRIMARY KEY AUTOINCREMENT)",
- read={
- "mysql": "CREATE TABLE z (a INT UNIQUE PRIMARY KEY AUTO_INCREMENT)",
- },
- write={
- "sqlite": "CREATE TABLE z (a INTEGER UNIQUE PRIMARY KEY AUTOINCREMENT)",
- "mysql": "CREATE TABLE z (a INT UNIQUE PRIMARY KEY AUTO_INCREMENT)",
- "postgres": "CREATE TABLE z (a INT GENERATED BY DEFAULT AS IDENTITY NOT NULL UNIQUE PRIMARY KEY)",
- },
- )
- self.validate_all(
- """CREATE TABLE "x" ("Name" NVARCHAR(200) NOT NULL)""",
- write={
- "sqlite": """CREATE TABLE "x" ("Name" TEXT(200) NOT NULL)""",
- "mysql": "CREATE TABLE `x` (`Name` VARCHAR(200) NOT NULL)",
- },
- )
-
def test_sqlite(self):
self.validate_identity("SELECT DATE()")
self.validate_identity("SELECT DATE('now', 'start of month', '+1 month', '-1 day')")
@@ -69,7 +13,6 @@ def test_sqlite(self):
self.validate_identity("SELECT DATETIME(1092941466, 'auto')")
self.validate_identity("SELECT DATETIME(1092941466, 'unixepoch', 'localtime')")
self.validate_identity("SELECT UNIXEPOCH()")
- self.validate_identity("SELECT STRFTIME('%s')")
self.validate_identity("SELECT JULIANDAY('now') - JULIANDAY('1776-07-04')")
self.validate_identity("SELECT UNIXEPOCH() - UNIXEPOCH('2004-01-01 02:34:56')")
self.validate_identity("SELECT DATE('now', 'start of year', '+9 months', 'weekday 2')")
@@ -149,6 +92,29 @@ def test_sqlite(self):
write={"snowflake": "LEAST(x, y, z)"},
)
+ def test_strftime(self):
+ self.validate_identity("SELECT STRFTIME('%Y/%m/%d', 'now')")
+ self.validate_identity("SELECT STRFTIME('%Y-%m-%d', '2016-10-16', 'start of month')")
+ self.validate_identity(
+ "SELECT STRFTIME('%s')",
+ "SELECT STRFTIME('%s', CURRENT_TIMESTAMP)",
+ )
+
+ self.validate_all(
+ "SELECT STRFTIME('%Y-%m-%d', '2020-01-01 12:05:03')",
+ write={
+ "duckdb": "SELECT STRFTIME(CAST('2020-01-01 12:05:03' AS TIMESTAMP), '%Y-%m-%d')",
+ "sqlite": "SELECT STRFTIME('%Y-%m-%d', '2020-01-01 12:05:03')",
+ },
+ )
+ self.validate_all(
+ "SELECT STRFTIME('%Y-%m-%d', CURRENT_TIMESTAMP)",
+ write={
+ "duckdb": "SELECT STRFTIME(CAST(CURRENT_TIMESTAMP AS TIMESTAMP), '%Y-%m-%d')",
+ "sqlite": "SELECT STRFTIME('%Y-%m-%d', CURRENT_TIMESTAMP)",
+ },
+ )
+
def test_datediff(self):
self.validate_all(
"DATEDIFF(a, b, 'day')",
@@ -194,3 +160,59 @@ def test_warnings(self):
)
self.assertIn("Named columns are not supported in table alias.", cm.output[0])
+
+ def test_ddl(self):
+ for conflict_action in ("ABORT", "FAIL", "IGNORE", "REPLACE", "ROLLBACK"):
+ with self.subTest(f"ON CONFLICT {conflict_action}"):
+ self.validate_identity("CREATE TABLE a (b, c, UNIQUE (b, c) ON CONFLICT IGNORE)")
+
+ self.validate_identity("INSERT OR ABORT INTO foo (x, y) VALUES (1, 2)")
+ self.validate_identity("INSERT OR FAIL INTO foo (x, y) VALUES (1, 2)")
+ self.validate_identity("INSERT OR IGNORE INTO foo (x, y) VALUES (1, 2)")
+ self.validate_identity("INSERT OR REPLACE INTO foo (x, y) VALUES (1, 2)")
+ self.validate_identity("INSERT OR ROLLBACK INTO foo (x, y) VALUES (1, 2)")
+ self.validate_identity("CREATE TABLE foo (id INTEGER PRIMARY KEY ASC)")
+ self.validate_identity("CREATE TEMPORARY TABLE foo (id INTEGER)")
+
+ self.validate_all(
+ """
+ CREATE TABLE "Track"
+ (
+ CONSTRAINT "PK_Track" FOREIGN KEY ("TrackId"),
+ FOREIGN KEY ("AlbumId") REFERENCES "Album" (
+ "AlbumId"
+ ) ON DELETE NO ACTION ON UPDATE NO ACTION,
+ FOREIGN KEY ("AlbumId") ON DELETE CASCADE ON UPDATE RESTRICT,
+ FOREIGN KEY ("AlbumId") ON DELETE SET NULL ON UPDATE SET DEFAULT
+ )
+ """,
+ write={
+ "sqlite": """CREATE TABLE "Track" (
+ CONSTRAINT "PK_Track" FOREIGN KEY ("TrackId"),
+ FOREIGN KEY ("AlbumId") REFERENCES "Album" (
+ "AlbumId"
+ ) ON DELETE NO ACTION ON UPDATE NO ACTION,
+ FOREIGN KEY ("AlbumId") ON DELETE CASCADE ON UPDATE RESTRICT,
+ FOREIGN KEY ("AlbumId") ON DELETE SET NULL ON UPDATE SET DEFAULT
+)""",
+ },
+ pretty=True,
+ )
+ self.validate_all(
+ "CREATE TABLE z (a INTEGER UNIQUE PRIMARY KEY AUTOINCREMENT)",
+ read={
+ "mysql": "CREATE TABLE z (a INT UNIQUE PRIMARY KEY AUTO_INCREMENT)",
+ },
+ write={
+ "sqlite": "CREATE TABLE z (a INTEGER UNIQUE PRIMARY KEY AUTOINCREMENT)",
+ "mysql": "CREATE TABLE z (a INT UNIQUE PRIMARY KEY AUTO_INCREMENT)",
+ "postgres": "CREATE TABLE z (a INT GENERATED BY DEFAULT AS IDENTITY NOT NULL UNIQUE PRIMARY KEY)",
+ },
+ )
+ self.validate_all(
+ """CREATE TABLE "x" ("Name" NVARCHAR(200) NOT NULL)""",
+ write={
+ "sqlite": """CREATE TABLE "x" ("Name" TEXT(200) NOT NULL)""",
+ "mysql": "CREATE TABLE `x` (`Name` VARCHAR(200) NOT NULL)",
+ },
+ )
| [] | [
"tests/dialects/test_sqlite.py::TestSQLite::test_strftime"
] | [
"tests/dialects/test_sqlite.py::TestSQLite::test_datediff",
"tests/dialects/test_sqlite.py::TestSQLite::test_ddl",
"tests/dialects/test_sqlite.py::TestSQLite::test_hexadecimal_literal",
"tests/dialects/test_sqlite.py::TestSQLite::test_longvarchar_dtype",
"tests/dialects/test_sqlite.py::TestSQLite::test_sqli... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Feat: mvp for transpling sqlite's STRFTIME
Fixes #3240
References:
- https://www.sqlite.org/lang_datefunc.html
- https://duckdb.org/docs/sql/functions/dateformat.html#strftime-examples
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
DuckDB STRFTIME arguments incorrectly ordered
**Fully reproducible code snippet**
```python
import sqlglot
print(sqlglot.transpile("""
select strftime('%Y-%m-%d', current_timestamp)
""", read="sqlite", write="duckdb", pretty=True)[0])
```
produces
```
SELECT
STRFTIME('%Y-%m-%d', CURRENT_TIMESTAMP)
```
but that fails on DuckDB with
```
duckdb> SELECT
...> STRFTIME('%Y-%m-%d', CURRENT_TIMESTAMP)
...> ;
Binder Error: No function matches the given name and argument types 'strftime(STRING_LITERAL, TIMESTAMP WITH TIME ZONE)'. You might need to add explicit type casts.
Candidate functions:
strftime(DATE, VARCHAR) -> VARCHAR
strftime(TIMESTAMP, VARCHAR) -> VARCHAR
strftime(VARCHAR, DATE) -> VARCHAR
strftime(VARCHAR, TIMESTAMP) -> VARCHAR
LINE 2: STRFTIME('%Y-%m-%d', CURRENT_TIMESTAMP)
^
duckdb>
```
**Official Documentation**
- DuckDB stftime https://duckdb.org/docs/sql/functions/dateformat.html
----------
--------------------
</issues> | ceb42fabad60312699e4b15936aeebac00e22e4d | |
scikit-learn__scikit-learn-28722 | 28,722 | scikit-learn/scikit-learn | 1.5 | c799133710d518f5fba2958bb0e0765ee280df12 | 2024-03-28T20:00:02Z | diff --git a/doc/whats_new/v1.5.rst b/doc/whats_new/v1.5.rst
index 1a8c50e408a0b..01f0384af5c1d 100644
--- a/doc/whats_new/v1.5.rst
+++ b/doc/whats_new/v1.5.rst
@@ -347,6 +347,8 @@ Changelog
- |Enhancement| :term:`CV splitters <CV splitter>` that ignores the group parameter now
raises a warning when groups are passed in to :term:`split`. :pr:`28210` by
+ `Thomas Fan`_.
+
- |Fix| the ``cv_results_`` attribute (of :class:`model_selection.GridSearchCV`) now
returns masked arrays of the appropriate NumPy dtype, as opposed to always returning
dtype ``object``. :pr:`28352` by :user:`Marco Gorelli<MarcoGorelli>`.
@@ -354,12 +356,19 @@ Changelog
- |Fix| :func:`sklearn.model_selection.train_test_score` works with Array API inputs.
Previously indexing was not handled correctly leading to exceptions when using strict
implementations of the Array API like CuPY.
- :pr:`28407` by `Tim Head <betatim>`.
+ :pr:`28407` by :user:`Tim Head <betatim>`.
+
+- |Enhancement| The HTML diagram representation of
+ :class:`~model_selection.GridSearchCV`,
+ :class:`~model_selection.RandomizedSearchCV`,
+ :class:`~model_selection.HalvingGridSearchCV`, and
+ :class:`~model_selection.HalvingRandomSearchCV` will show the best estimator when
+ `refit=True`. :pr:`28722` by :user:`Yao Xiao <Charlie-XIAO>` and `Thomas Fan`_.
:mod:`sklearn.multioutput`
..........................
-- |Enhancement| `chain_method` parameter added to `:class:`multioutput.ClassifierChain`.
+- |Enhancement| `chain_method` parameter added to :class:`multioutput.ClassifierChain`.
:pr:`27700` by :user:`Lucy Liu <lucyleeow>`.
:mod:`sklearn.neighbors`
diff --git a/sklearn/model_selection/_search.py b/sklearn/model_selection/_search.py
index 9b9072f1491a2..42fde09c16bce 100644
--- a/sklearn/model_selection/_search.py
+++ b/sklearn/model_selection/_search.py
@@ -33,6 +33,7 @@
get_scorer_names,
)
from ..utils import Bunch, check_random_state
+from ..utils._estimator_html_repr import _VisualBlock
from ..utils._param_validation import HasMethods, Interval, StrOptions
from ..utils._tags import _safe_tags
from ..utils.metadata_routing import (
@@ -1153,6 +1154,19 @@ def get_metadata_routing(self):
)
return router
+ def _sk_visual_block_(self):
+ if hasattr(self, "best_estimator_"):
+ key, estimator = "best_estimator_", self.best_estimator_
+ else:
+ key, estimator = "estimator", self.estimator
+
+ return _VisualBlock(
+ "parallel",
+ [estimator],
+ names=[f"{key}: {estimator.__class__.__name__}"],
+ name_details=[str(estimator)],
+ )
+
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
| diff --git a/sklearn/model_selection/tests/test_search.py b/sklearn/model_selection/tests/test_search.py
index 1ff4520034ff0..1a9230259d22e 100644
--- a/sklearn/model_selection/tests/test_search.py
+++ b/sklearn/model_selection/tests/test_search.py
@@ -13,6 +13,7 @@
import pytest
from scipy.stats import bernoulli, expon, uniform
+from sklearn import config_context
from sklearn.base import BaseEstimator, ClassifierMixin, is_classifier
from sklearn.cluster import KMeans
from sklearn.datasets import (
@@ -20,6 +21,7 @@
make_classification,
make_multilabel_classification,
)
+from sklearn.dummy import DummyClassifier
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.exceptions import FitFailedWarning
from sklearn.experimental import enable_halving_search_cv # noqa
@@ -27,6 +29,7 @@
from sklearn.impute import SimpleImputer
from sklearn.linear_model import (
LinearRegression,
+ LogisticRegression,
Ridge,
SGDClassifier,
)
@@ -60,6 +63,7 @@
from sklearn.naive_bayes import ComplementNB
from sklearn.neighbors import KernelDensity, KNeighborsClassifier, LocalOutlierFactor
from sklearn.pipeline import Pipeline
+from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC, LinearSVC
from sklearn.tests.metadata_routing_common import (
ConsumingScorer,
@@ -2523,6 +2527,34 @@ def test_search_with_2d_array():
np.testing.assert_array_equal(result.data, expected_data)
+def test_search_html_repr():
+ """Test different HTML representations for GridSearchCV."""
+ X, y = make_classification(random_state=42)
+
+ pipeline = Pipeline([("scale", StandardScaler()), ("clf", DummyClassifier())])
+ param_grid = {"clf": [DummyClassifier(), LogisticRegression()]}
+
+ # Unfitted shows the original pipeline
+ search_cv = GridSearchCV(pipeline, param_grid=param_grid, refit=False)
+ with config_context(display="diagram"):
+ repr_html = search_cv._repr_html_()
+ assert "<pre>DummyClassifier()</pre>" in repr_html
+
+ # Fitted with `refit=False` shows the original pipeline
+ search_cv.fit(X, y)
+ with config_context(display="diagram"):
+ repr_html = search_cv._repr_html_()
+ assert "<pre>DummyClassifier()</pre>" in repr_html
+
+ # Fitted with `refit=True` shows the best estimator
+ search_cv = GridSearchCV(pipeline, param_grid=param_grid, refit=True)
+ search_cv.fit(X, y)
+ with config_context(display="diagram"):
+ repr_html = search_cv._repr_html_()
+ assert "<pre>DummyClassifier()</pre>" not in repr_html
+ assert "<pre>LogisticRegression()</pre>" in repr_html
+
+
# Metadata Routing Tests
# ======================
| diff --git a/doc/whats_new/v1.5.rst b/doc/whats_new/v1.5.rst
index 1a8c50e408a0b..01f0384af5c1d 100644
--- a/doc/whats_new/v1.5.rst
+++ b/doc/whats_new/v1.5.rst
@@ -347,6 +347,8 @@ Changelog
- |Enhancement| :term:`CV splitters <CV splitter>` that ignores the group parameter now
raises a warning when groups are passed in to :term:`split`. :pr:`28210` by
+ `Thomas Fan`_.
+
- |Fix| the ``cv_results_`` attribute (of :class:`model_selection.GridSearchCV`) now
returns masked arrays of the appropriate NumPy dtype, as opposed to always returning
dtype ``object``. :pr:`28352` by :user:`Marco Gorelli<MarcoGorelli>`.
@@ -354,12 +356,19 @@ Changelog
- |Fix| :func:`sklearn.model_selection.train_test_score` works with Array API inputs.
Previously indexing was not handled correctly leading to exceptions when using strict
implementations of the Array API like CuPY.
- :pr:`28407` by `Tim Head <betatim>`.
+ :pr:`28407` by :user:`Tim Head <betatim>`.
+
+- |Enhancement| The HTML diagram representation of
+ :class:`~model_selection.GridSearchCV`,
+ :class:`~model_selection.RandomizedSearchCV`,
+ :class:`~model_selection.HalvingGridSearchCV`, and
+ :class:`~model_selection.HalvingRandomSearchCV` will show the best estimator when
+ `refit=True`. :pr:`28722` by :user:`Yao Xiao <Charlie-XIAO>` and `Thomas Fan`_.
:mod:`sklearn.multioutput`
..........................
-- |Enhancement| `chain_method` parameter added to `:class:`multioutput.ClassifierChain`.
+- |Enhancement| `chain_method` parameter added to :class:`multioutput.ClassifierChain`.
:pr:`27700` by :user:`Lucy Liu <lucyleeow>`.
:mod:`sklearn.neighbors`
| [
{
"components": [
{
"doc": "",
"lines": [
1157,
1167
],
"name": "BaseSearchCV._sk_visual_block_",
"signature": "def _sk_visual_block_(self):",
"type": "function"
}
],
"file": "sklearn/model_selection/_search.py"
}
] | [
"sklearn/model_selection/tests/test_search.py::test_search_html_repr"
] | [
"sklearn/model_selection/tests/test_search.py::test_validate_parameter_input[0-TypeError-Parameter",
"sklearn/model_selection/tests/test_search.py::test_validate_parameter_input[input1-TypeError-Parameter",
"sklearn/model_selection/tests/test_search.py::test_validate_parameter_input[input2-TypeError-Parameter",... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
ENH HTML repr show best estimator in `*SearchCV` when `refit=True`
Closes #21058 (supersedes).
Closes #20971.
This PR shows the best estimator in `*SearchCV` HTML repr whenever possible with a clear label saying it is `best_estimator_`.
Setup code:
```python
from sklearn import set_config
from sklearn.datasets import make_classification
from sklearn.dummy import DummyClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
set_config(display="diagram")
X, y = make_classification(random_state=42)
pipeline = Pipeline([("scale", StandardScaler()), ("clf", DummyClassifier())])
param_grid = {"clf": [DummyClassifier(), LogisticRegression()]}
```
Then try running:
| | `GridSearchCV(pipeline, param_grid)` | `GridSearchCV(pipeline, param_grid, refit=False).fit(X, y)` | `GridSearchCV(pipeline, param_grid).fit(X, y)` |
| :--: | :----------------------------------: | :--------------------------------------------------------: | :--------------------------------------------: |
| main |  |  |  |
| PR |  |  |  |
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sklearn/model_selection/_search.py]
(definition of BaseSearchCV._sk_visual_block_:)
def _sk_visual_block_(self):
[end of new definitions in sklearn/model_selection/_search.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
incompatible HTML representation for RandomizedSearchCV
### Describe the bug
I don't know if this is intended, but for now the HTML representation of a `RandomizedSearchCV` or `GridSearchCV` shows the estimators and parameters of the initialized pipeline instead of the 'optimized' one found in the search (`best_estimator_`).
### Steps/Code to Reproduce
A simple setup
```
from sklearn.datasets import load_iris
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from sklearn.svm import SVC
from sklearn.model_selection import RandomizedSearchCV
from sklearn.utils.fixes import loguniform
iris = load_iris()
pipeline = Pipeline([('clf', SVC())])
search_dists = [
{'clf': [LogisticRegression(random_state=42)],
'clf__C': loguniform(1e-2, 1e1),
'clf__penalty': ['l1', 'l2']
},
{'clf': [RidgeClassifier(random_state=42)],
'clf__alpha': loguniform(1e-2, 1e0)
},
{'clf': [SVC(random_state=42)],
'clf__C': loguniform(1e-2, 1e2),
'clf__gamma': loguniform(1e-3, 1e0),
}]
rand_search_cv = RandomizedSearchCV(pipeline, search_dists, n_jobs=-1, random_state=42)
rand_res = rand_search_cv.fit(iris.data, iris.target)
```
Try to view the HTML representation of the fitted `RandomizedSearchCV`
```
from sklearn import set_config
set_config(display='diagram')
# diplays HTML representation in a jupyter context of the initial `pipeline`
rand_res
```

### Expected Results
I expected to see the same results as
```
from sklearn import set_config
set_config(display='diagram')
# diplays HTML representation in a jupyter context of the initial `pipeline`
rand_res.best_estimator_
```

### Actual Results
I found the actual presentation irrelevant and doesn't make sense to have initial pipeline as result of visualizing a fitted GridSearch.
### Versions
System:
python: 3.7.10 | packaged by conda-forge | (default, Feb 19 2021, 16:07:37) [GCC 9.3.0]
executable: /opt/conda/bin/python
machine: Linux-5.4.120+-x86_64-with-debian-buster-sid
Python dependencies:
pip: 21.1.2
setuptools: 49.6.0.post20210108
sklearn: 0.24.2
numpy: 1.19.5
scipy: 1.6.3
Cython: 0.29.23
pandas: 1.2.4
matplotlib: 3.4.2
joblib: 1.0.1
threadpoolctl: 2.1.0
----------
The HTML representation shows the original `pipeline` before `fit`. It would make sense that if `fit` is called and `refit=True`, then the HTML representation will show the `best_estimator_` instead.
@thomasjpfan do you think this is feasible to do have a specific HTML diagram for the `SearchCV`?
Displaying the `best_estimator_` instead of the Search estimator would be potentially confusing IMHO. It could trick users into thinking that the estimator became a Pipeline object (or whatever `best_estimator_` is), when in reality it really is still a Search object.
But adding it as a field in the html repr is probably a good thing
@NicolasHug I think preserving the HTML representation title as it is now (like _'RandomizedSearchCV'_ for the above example) can help to avoid any possible confusion.
--------------------
</issues> | c799133710d518f5fba2958bb0e0765ee280df12 |
pyro-ppl__pyro-3351 | 3,351 | pyro-ppl/pyro | null | 0e08427d9719d720cc3e178cc9eab92d39cea11f | 2024-03-28T15:30:06Z | diff --git a/pyro/infer/importance.py b/pyro/infer/importance.py
index d25cf16680..ca088645cb 100644
--- a/pyro/infer/importance.py
+++ b/pyro/infer/importance.py
@@ -3,6 +3,7 @@
import math
import warnings
+from typing import List, Union
import torch
@@ -15,45 +16,12 @@
from .util import plate_log_prob_sum
-class Importance(TracePosterior):
+class LogWeightsMixin:
"""
- :param model: probabilistic model defined as a function
- :param guide: guide used for sampling defined as a function
- :param num_samples: number of samples to draw from the guide (default 10)
-
- This method performs posterior inference by importance sampling
- using the guide as the proposal distribution.
- If no guide is provided, it defaults to proposing from the model's prior.
+ Mixin class to compute analytics from a ``.log_weights`` attribute.
"""
- def __init__(self, model, guide=None, num_samples=None):
- """
- Constructor. default to num_samples = 10, guide = model
- """
- super().__init__()
- if num_samples is None:
- num_samples = 10
- warnings.warn(
- "num_samples not provided, defaulting to {}".format(num_samples)
- )
- if guide is None:
- # propose from the prior by making a guide from the model by hiding observes
- guide = poutine.block(model, hide_types=["observe"])
- self.num_samples = num_samples
- self.model = model
- self.guide = guide
-
- def _traces(self, *args, **kwargs):
- """
- Generator of weighted samples from the proposal distribution.
- """
- for i in range(self.num_samples):
- guide_trace = poutine.trace(self.guide).get_trace(*args, **kwargs)
- model_trace = poutine.trace(
- poutine.replay(self.model, trace=guide_trace)
- ).get_trace(*args, **kwargs)
- log_weight = model_trace.log_prob_sum() - guide_trace.log_prob_sum()
- yield (model_trace, log_weight)
+ log_weights: Union[List[Union[float, torch.Tensor]], torch.Tensor]
def get_log_normalizer(self):
"""
@@ -61,9 +29,13 @@ def get_log_normalizer(self):
(mean of the unnormalized weights)
"""
# ensure list is not empty
- if self.log_weights:
- log_w = torch.tensor(self.log_weights)
- log_num_samples = torch.log(torch.tensor(self.num_samples * 1.0))
+ if len(self.log_weights) > 0:
+ log_w = (
+ self.log_weights
+ if isinstance(self.log_weights, torch.Tensor)
+ else torch.tensor(self.log_weights)
+ )
+ log_num_samples = torch.log(torch.tensor(log_w.numel() * 1.0))
return torch.logsumexp(log_w - log_num_samples, 0)
else:
warnings.warn(
@@ -74,8 +46,12 @@ def get_normalized_weights(self, log_scale=False):
"""
Compute the normalized importance weights.
"""
- if self.log_weights:
- log_w = torch.tensor(self.log_weights)
+ if len(self.log_weights) > 0:
+ log_w = (
+ self.log_weights
+ if isinstance(self.log_weights, torch.Tensor)
+ else torch.tensor(self.log_weights)
+ )
log_w_norm = log_w - torch.logsumexp(log_w, 0)
return log_w_norm if log_scale else torch.exp(log_w_norm)
else:
@@ -87,7 +63,7 @@ def get_ESS(self):
"""
Compute (Importance Sampling) Effective Sample Size (ESS).
"""
- if self.log_weights:
+ if len(self.log_weights) > 0:
log_w_norm = self.get_normalized_weights(log_scale=True)
ess = torch.exp(-torch.logsumexp(2 * log_w_norm, 0))
else:
@@ -98,6 +74,47 @@ def get_ESS(self):
return ess
+class Importance(TracePosterior, LogWeightsMixin):
+ """
+ :param model: probabilistic model defined as a function
+ :param guide: guide used for sampling defined as a function
+ :param num_samples: number of samples to draw from the guide (default 10)
+
+ This method performs posterior inference by importance sampling
+ using the guide as the proposal distribution.
+ If no guide is provided, it defaults to proposing from the model's prior.
+ """
+
+ def __init__(self, model, guide=None, num_samples=None):
+ """
+ Constructor. default to num_samples = 10, guide = model
+ """
+ super().__init__()
+ if num_samples is None:
+ num_samples = 10
+ warnings.warn(
+ "num_samples not provided, defaulting to {}".format(num_samples)
+ )
+ if guide is None:
+ # propose from the prior by making a guide from the model by hiding observes
+ guide = poutine.block(model, hide_types=["observe"])
+ self.num_samples = num_samples
+ self.model = model
+ self.guide = guide
+
+ def _traces(self, *args, **kwargs):
+ """
+ Generator of weighted samples from the proposal distribution.
+ """
+ for i in range(self.num_samples):
+ guide_trace = poutine.trace(self.guide).get_trace(*args, **kwargs)
+ model_trace = poutine.trace(
+ poutine.replay(self.model, trace=guide_trace)
+ ).get_trace(*args, **kwargs)
+ log_weight = model_trace.log_prob_sum() - guide_trace.log_prob_sum()
+ yield (model_trace, log_weight)
+
+
def vectorized_importance_weights(model, guide, *args, **kwargs):
"""
:param model: probabilistic model defined as a function
diff --git a/pyro/infer/predictive.py b/pyro/infer/predictive.py
index 6be8b5cb5f..ea89aff5e5 100644
--- a/pyro/infer/predictive.py
+++ b/pyro/infer/predictive.py
@@ -2,13 +2,15 @@
# SPDX-License-Identifier: Apache-2.0
import warnings
+from dataclasses import dataclass
from functools import reduce
-from typing import List, NamedTuple, Union
+from typing import List, Union
import torch
import pyro
import pyro.poutine as poutine
+from pyro.infer.importance import LogWeightsMixin
from pyro.infer.util import plate_log_prob_sum
from pyro.poutine.trace_struct import Trace
from pyro.poutine.util import prune_subsample_sites
@@ -34,7 +36,8 @@ def _guess_max_plate_nesting(model, args, kwargs):
return max_plate_nesting
-class _predictiveResults(NamedTuple):
+@dataclass(frozen=True, eq=False)
+class _predictiveResults:
"""
Return value of call to ``_predictive`` and ``_predictive_sequential``.
"""
@@ -316,7 +319,8 @@ def get_vectorized_trace(self, *args, **kwargs):
).trace
-class WeighedPredictiveResults(NamedTuple):
+@dataclass(frozen=True, eq=False)
+class WeighedPredictiveResults(LogWeightsMixin):
"""
Return value of call to instance of :class:`WeighedPredictive`.
"""
| diff --git a/tests/infer/test_predictive.py b/tests/infer/test_predictive.py
index 1f28e1f05c..319a1196dd 100644
--- a/tests/infer/test_predictive.py
+++ b/tests/infer/test_predictive.py
@@ -46,6 +46,7 @@ def test_posterior_predictive_svi_manual_guide(parallel, predictive):
num_trials = (
torch.ones(5) * 400
) # Reduced to 400 from 1000 in order for guide optimization to converge
+ num_samples = 10000
num_success = dist.Binomial(num_trials, true_probs).sample()
conditioned_model = poutine.condition(model, data={"obs": num_success})
elbo = Trace_ELBO(num_particles=100, vectorize_particles=True)
@@ -57,7 +58,7 @@ def test_posterior_predictive_svi_manual_guide(parallel, predictive):
posterior_predictive = predictive(
model,
guide=beta_guide,
- num_samples=10000,
+ num_samples=num_samples,
parallel=parallel,
return_sites=["_RETURN"],
)
@@ -71,6 +72,8 @@ def test_posterior_predictive_svi_manual_guide(parallel, predictive):
assert marginal_return_vals.shape[:1] == weighed_samples.log_weights.shape
# Weights should be uniform as the guide has the same distribution as the model
assert weighed_samples.log_weights.std() < 0.6
+ # Effective sample size should be close to actual number of samples taken from the guide
+ assert weighed_samples.get_ESS() > 0.8 * num_samples
assert_close(marginal_return_vals.mean(dim=0), torch.ones(5) * 280, rtol=0.1)
| [
{
"components": [
{
"doc": "Mixin class to compute analytics from a ``.log_weights`` attribute.",
"lines": [
19,
74
],
"name": "LogWeightsMixin",
"signature": "class LogWeightsMixin:",
"type": "class"
},
{
"doc": "Esti... | [
"tests/infer/test_predictive.py::test_posterior_predictive_svi_manual_guide[False-WeighedPredictive]",
"tests/infer/test_predictive.py::test_posterior_predictive_svi_manual_guide[True-WeighedPredictive]"
] | [
"tests/infer/test_predictive.py::test_posterior_predictive_svi_manual_guide[False-Predictive]",
"tests/infer/test_predictive.py::test_posterior_predictive_svi_manual_guide[True-Predictive]",
"tests/infer/test_predictive.py::test_posterior_predictive_svi_auto_delta_guide[False-Predictive]",
"tests/infer/test_p... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add effective sample size analytics to WeighedPredictive results
1. Allows calculation of the effective sample size of weighed sample results (returned by ``pyro.infer.predictive.WeighedPredictive``) by calling the ``.get_ESS`` method.
2. Code is shared with ``pyro.infer.importance.Importance``.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in pyro/infer/importance.py]
(definition of LogWeightsMixin:)
class LogWeightsMixin:
"""Mixin class to compute analytics from a ``.log_weights`` attribute."""
(definition of LogWeightsMixin.get_log_normalizer:)
def get_log_normalizer(self):
"""Estimator of the normalizing constant of the target distribution.
(mean of the unnormalized weights)"""
(definition of LogWeightsMixin.get_normalized_weights:)
def get_normalized_weights(self, log_scale=False):
"""Compute the normalized importance weights."""
(definition of LogWeightsMixin.get_ESS:)
def get_ESS(self):
"""Compute (Importance Sampling) Effective Sample Size (ESS)."""
[end of new definitions in pyro/infer/importance.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 64e71eee1c14dc926d5cbc5e762b6337bb4750a6 | ||
astropy__astropy-16246 | 16,246 | astropy/astropy | v5.3 | 51839c4e81af854bfa88c26a6972b8f02031ae6b | 2024-03-27T15:40:51Z | diff --git a/astropy/coordinates/baseframe.py b/astropy/coordinates/baseframe.py
index 06ab3ce98290..fbb935a30844 100644
--- a/astropy/coordinates/baseframe.py
+++ b/astropy/coordinates/baseframe.py
@@ -28,13 +28,19 @@
from . import representation as r
from .angles import Angle, position_angle
from .attributes import Attribute
-from .transformations import TransformGraph
+from .errors import NonRotationTransformationError, NonRotationTransformationWarning
+from .transformations import (
+ DynamicMatrixTransform,
+ StaticMatrixTransform,
+ TransformGraph,
+)
if TYPE_CHECKING:
+ from typing import Literal
+
from astropy.coordinates import Latitude, Longitude, SkyCoord
from astropy.units import Unit
-
# the graph used for all transformations between frames
frame_transform_graph = TransformGraph()
@@ -1750,13 +1756,33 @@ def __ne__(self, value):
return np.logical_not(self == value)
def _prepare_unit_sphere_coords(
- self, other: BaseCoordinateFrame | SkyCoord
+ self,
+ other: BaseCoordinateFrame | SkyCoord,
+ origin_mismatch: Literal["ignore", "warn", "error"],
) -> tuple[Longitude, Latitude, Longitude, Latitude]:
+ other_frame = getattr(other, "frame", other)
+ if not (
+ origin_mismatch == "ignore"
+ or self.is_equivalent_frame(other_frame)
+ or all(
+ isinstance(comp, (StaticMatrixTransform, DynamicMatrixTransform))
+ for comp in frame_transform_graph.get_transform(
+ type(self), type(other_frame)
+ ).transforms
+ )
+ ):
+ if origin_mismatch == "warn":
+ warnings.warn(NonRotationTransformationWarning(self, other_frame))
+ elif origin_mismatch == "error":
+ raise NonRotationTransformationError(self, other_frame)
+ else:
+ raise ValueError(
+ f"{origin_mismatch=} is invalid. Allowed values are 'ignore', "
+ "'warn' or 'error'."
+ )
self_sph = self.represent_as(r.UnitSphericalRepresentation)
- other_sph = (
- getattr(other, "frame", other)
- .transform_to(self)
- .represent_as(r.UnitSphericalRepresentation)
+ other_sph = other_frame.transform_to(self).represent_as(
+ r.UnitSphericalRepresentation
)
return self_sph.lon, self_sph.lat, other_sph.lon, other_sph.lat
@@ -1791,20 +1817,17 @@ def position_angle(self, other: BaseCoordinateFrame | SkyCoord) -> Angle:
>>> c1.position_angle(c3).degree # doctest: +FLOAT_CMP
44.995636455344844
"""
- return position_angle(*self._prepare_unit_sphere_coords(other))
+ return position_angle(*self._prepare_unit_sphere_coords(other, "ignore"))
- def separation(self, other):
+ def separation(
+ self,
+ other: BaseCoordinateFrame | SkyCoord,
+ *,
+ origin_mismatch: Literal["ignore", "warn", "error"] = "warn",
+ ) -> Angle:
"""
Computes on-sky separation between this coordinate and another.
- .. note::
-
- If the ``other`` coordinate object is in a different frame, it is
- first transformed to the frame of this object. This can lead to
- unintuitive behavior if not accounted for. Particularly of note is
- that ``self.separation(other)`` and ``other.separation(self)`` may
- not give the same answer in this case.
-
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
@@ -1812,6 +1835,17 @@ def separation(self, other):
----------
other : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The coordinate to get the separation to.
+ origin_mismatch : {"warn", "ignore", "error"}, keyword-only
+ If the ``other`` coordinates are in a different frame then they
+ will have to be transformed, and if the transformation is not a
+ pure rotation then ``self.separation(other)`` can be
+ different from ``other.separation(self)``. With
+ ``origin_mismatch="warn"`` (default) the transformation is
+ always performed, but a warning is emitted if it is not a
+ pure rotation. If ``origin_mismatch="ignore"`` then the
+ required transformation is always performed without warnings.
+ If ``origin_mismatch="error"`` then only transformations
+ that are pure rotations are allowed.
Returns
-------
@@ -1829,7 +1863,10 @@ def separation(self, other):
from .angles import Angle, angular_separation
return Angle(
- angular_separation(*self._prepare_unit_sphere_coords(other)), unit=u.degree
+ angular_separation(
+ *self._prepare_unit_sphere_coords(other, origin_mismatch)
+ ),
+ unit=u.degree,
)
def separation_3d(self, other):
diff --git a/astropy/coordinates/errors.py b/astropy/coordinates/errors.py
index 821e839f304d..71abf5e656ef 100644
--- a/astropy/coordinates/errors.py
+++ b/astropy/coordinates/errors.py
@@ -2,7 +2,21 @@
"""This module defines custom errors and exceptions used in astropy.coordinates."""
-__all__ = ["ConvertError", "UnknownSiteException"]
+from __future__ import annotations
+
+__all__ = [
+ "ConvertError",
+ "NonRotationTransformationError",
+ "NonRotationTransformationWarning",
+ "UnknownSiteException",
+]
+
+from typing import TYPE_CHECKING
+
+from astropy.utils.exceptions import AstropyUserWarning
+
+if TYPE_CHECKING:
+ from astropy.coordinates import BaseCoordinateFrame
# TODO: consider if this should be used to `units`?
@@ -18,6 +32,28 @@ class ConvertError(Exception):
"""
+class NonRotationTransformationError(ValueError):
+ """
+ Raised for transformations that are not simple rotations. Such
+ transformations can change the angular separation between coordinates
+ depending on its direction.
+ """
+
+ def __init__(
+ self, frame_to: BaseCoordinateFrame, frame_from: BaseCoordinateFrame
+ ) -> None:
+ self.frame_to = frame_to
+ self.frame_from = frame_from
+
+ def __str__(self) -> str:
+ return (
+ "refusing to transform other coordinates from "
+ f"{self.frame_from.replicate_without_data()} to "
+ f"{self.frame_to.replicate_without_data()} because angular separation "
+ "can depend on the direction of the transformation"
+ )
+
+
class UnknownSiteException(KeyError):
def __init__(self, site, attribute, close_names=None):
message = (
@@ -31,3 +67,25 @@ def __init__(self, site, attribute, close_names=None):
self.attribute = attribute
self.close_names = close_names
return super().__init__(message)
+
+
+class NonRotationTransformationWarning(AstropyUserWarning):
+ """
+ Emitted for transformations that are not simple rotations. Such
+ transformations can change the angular separation between coordinates
+ depending on its direction.
+ """
+
+ def __init__(
+ self, frame_to: BaseCoordinateFrame, frame_from: BaseCoordinateFrame
+ ) -> None:
+ self.frame_to = frame_to
+ self.frame_from = frame_from
+
+ def __str__(self) -> str:
+ return (
+ "transforming other coordinates from "
+ f"{self.frame_from.replicate_without_data()} to "
+ f"{self.frame_to.replicate_without_data()}. Angular separation can depend "
+ "on the direction of the transformation."
+ )
diff --git a/docs/changes/coordinates/16246.feature.rst b/docs/changes/coordinates/16246.feature.rst
new file mode 100644
index 000000000000..a3058f14f276
--- /dev/null
+++ b/docs/changes/coordinates/16246.feature.rst
@@ -0,0 +1,10 @@
+By default the ``SkyCoord`` and ``BaseCoordinateFrame`` ``separation()``
+methods now emit a warning if they have to perform a coordinate transformation
+that is not a pure rotation to inform the user that the angular separation can
+depend on the direction of the transformation.
+It is possible to modify this behaviour with the new optional keyword-only
+``frame_origin_mismatch`` argument.
+Specifying ``frame_origin_mismatch="ignore"`` allows any transformation to
+succeed without warning, which has been the behaviour so far.
+``frame_origin_mismatch="error"`` forbids all transformations that are not
+pure rotations.
diff --git a/docs/coordinates/common_errors.rst b/docs/coordinates/common_errors.rst
index 1ff79694ddb6..c99ed07a2041 100644
--- a/docs/coordinates/common_errors.rst
+++ b/docs/coordinates/common_errors.rst
@@ -9,7 +9,7 @@ Object Separation
-----------------
When calculating the separation between objects, it is important to bear in mind that
-:meth:`~astropy.coordinates.BaseCoordinateFrame.separation` gives a different
+:meth:`~astropy.coordinates.BaseCoordinateFrame.separation` can give a different
answer depending upon the order in which is used.
For example::
@@ -20,10 +20,15 @@ For example::
>>> t = Time("2010-05-22T00:00")
>>> moon = SkyCoord(104.29*u.deg, 23.51*u.deg, 359367.3*u.km, frame=GCRS(obstime=t))
>>> star = SkyCoord(101.4*u.deg, 23.02*u.deg, frame='icrs')
- >>> star.separation(moon) # doctest: +FLOAT_CMP
+ >>> star.separation(moon) # doctest: +FLOAT_CMP, +SHOW_WARNINGS
<Angle 139.84211884 deg>
- >>> moon.separation(star) # doctest: +FLOAT_CMP
+ NonRotationTransformationWarning: transforming other coordinates from
+ <GCRS Frame (obstime=2010-05-22T00:00:00.000, obsgeoloc=(0., 0., 0.) m,
+ obsgeovel=(0., 0., 0.) m / s)> to <ICRS Frame>. Angular separation can
+ depend on the direction of the transformation.
+ >>> moon.separation(star) # doctest: +FLOAT_CMP, +SHOW_WARNINGS
<Angle 2.70390995 deg>
+ NonRotationTransformationWarning: transforming other coordinates from...
Why do these give such different answers?
@@ -33,6 +38,25 @@ So ``star.separation(moon)`` gives the angular separation in the ICRS frame.
This is the separation as it would appear from the Solar System Barycenter.
For a geocentric observer, ``moon.separation(star)`` gives the correct answer,
since ``moon`` is in a geocentric frame.
+As can be seen from the above example, by default an appropriate warning is
+emitted if the coordinate transformation can cause the angular separation value
+to be order-dependent.
+It is possible to always suppress the warning::
+
+ >>> moon.separation(star, origin_mismatch="ignore") # doctest: +FLOAT_CMP
+ <Angle 2.70390995 deg>
+
+It is also possible to forbid coordinate transformations that are not pure
+rotations::
+
+ >>> moon.separation(star, origin_mismatch="error")
+ Traceback (most recent call last):
+ ...
+ astropy.coordinates.errors.NonRotationTransformationError: refusing to
+ transform other coordinates from <ICRS Frame> to <GCRS Frame
+ (obstime=2010-05-22T00:00:00.000, obsgeoloc=(0., 0., 0.) m,
+ obsgeovel=(0., 0., 0.) m / s)> because angular separation can depend on
+ the direction of the transformation
AltAz calculations for Earth-based objects
------------------------------------------
diff --git a/docs/whatsnew/6.1.rst b/docs/whatsnew/6.1.rst
index 764fa25f8331..36b3afbc17ee 100644
--- a/docs/whatsnew/6.1.rst
+++ b/docs/whatsnew/6.1.rst
@@ -31,6 +31,69 @@ the `NumPy deprecation policy
<https://numpy.org/neps/nep-0029-deprecation_policy.html>`_.
+Order-dependent angular separations now come with warnings
+==========================================================
+
+Angular separation between two points depends on the point of view.
+For example, during a lunar eclipse and for an observer on the Earth the Sun
+and the Moon will be in (more-or-less) opposite directions, but at the same
+time for an observer at the Earth-Sun L2 point (where Gaia and James Webb Space
+Telescope are) the Sun and the Moon will be (more-or-less) in the same
+direction.
+The :meth:`~astropy.coordinates.BaseCoordinateFrame.separation` method
+automatically converts a coordinate given to it to the frame of the coordinate
+it belongs to, so the separation can be different if the coordinates are
+swapped.
+Such transformations are now accompanied by an appropriate warning::
+
+ >>> from astropy import units as u
+ >>> from astropy.coordinates import SkyCoord
+ >>> icrs = SkyCoord(0 * u.deg, 0 * u.deg, 10 * u.pc)
+ >>> gcrs = SkyCoord(0 * u.deg, 0 * u.deg, 380_000 * u.km, frame="gcrs")
+ >>> icrs.separation(gcrs) # doctest: +FLOAT_CMP +SHOW_WARNINGS
+ <Angle 100.67116925 deg>
+ NonRotationTransformationWarning: transforming other coordinates from
+ <GCRS Frame (obstime=J2000.000, obsgeoloc=(0., 0., 0.) m,
+ obsgeovel=(0., 0., 0.) m / s)> to <ICRS Frame>. Angular separation can
+ depend on the direction of the transformation.
+ >>> gcrs.separation(icrs) # doctest: +FLOAT_CMP +SHOW_WARNINGS
+ <Angle 0.0010732 deg>
+ NonRotationTransformationWarning: transforming other coordinates from
+ <ICRS Frame> to <GCRS Frame (obstime=J2000.000, obsgeoloc=(0., 0., 0.) m,
+ obsgeovel=(0., 0., 0.) m / s)>. Angular separation can depend on the
+ direction of the transformation.
+
+The warning is not emitted if the coordinate transformation is a pure rotation
+because such transformations do not change the origin of the coordinate frames,
+so the angular separation does not depend on the order of the coordinates::
+
+ >>> galactic = SkyCoord(0 * u.deg, 0 * u.deg, 10 * u.pc, frame="galactic")
+ >>> icrs.separation(galactic) # doctest: +FLOAT_CMP
+ <Angle 93.14572374 deg>
+ >>> galactic.separation(icrs) # doctest: +FLOAT_CMP
+ <Angle 93.14572374 deg>
+
+It is possible to suppress the warning::
+
+ >>> icrs.separation(gcrs, origin_mismatch="ignore") # doctest: +FLOAT_CMP +SHOW_WARNINGS
+ <Angle 100.67116925 deg>
+
+It is also possible to forbid non-rotation transformations::
+
+ >>> icrs.separation(gcrs, origin_mismatch="error") # doctest: +FLOAT_CMP
+ Traceback (most recent call last):
+ ...
+ astropy.coordinates.errors.NonRotationTransformationError: refusing to
+ transform other coordinates from <GCRS Frame (obstime=J2000.000,
+ obsgeoloc=(0., 0., 0.) m, obsgeovel=(0., 0., 0.) m / s)> to <ICRS Frame>
+ because angular separation can depend on the direction of the transformation
+
+Pure rotations will still succeed::
+
+ >>> galactic.separation(icrs, origin_mismatch="error") # doctest: +FLOAT_CMP
+ <Angle 93.14572374 deg>
+
+
.. _whatsnew-6.1-ascii-default-int-columns-as-int64:
``io.ascii`` uses 64-integers by default for integer columns
| diff --git a/astropy/coordinates/tests/test_exceptions.py b/astropy/coordinates/tests/test_exceptions.py
new file mode 100644
index 000000000000..e9cfa4d70aee
--- /dev/null
+++ b/astropy/coordinates/tests/test_exceptions.py
@@ -0,0 +1,80 @@
+# Licensed under a 3-clause BSD style license - see LICENSE.rst
+
+"""Tests for custom error and warning messages in `astropy.coordinates`."""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, NamedTuple
+
+import pytest
+
+from astropy import units as u
+from astropy.coordinates import (
+ GCRS,
+ ICRS,
+ Galactic,
+ NonRotationTransformationError,
+ NonRotationTransformationWarning,
+)
+
+if TYPE_CHECKING:
+ from astropy.coordinates import BaseCoordinateFrame
+
+
+class FrameDescription(NamedTuple):
+ frame: BaseCoordinateFrame
+ description: str
+ pytest_id: str
+
+
+galactic = FrameDescription(
+ Galactic(0 * u.deg, 0 * u.deg), "Galactic Frame", "Galactic"
+)
+gcrs_custom = FrameDescription(
+ GCRS(
+ 0 * u.deg,
+ 0 * u.deg,
+ obstime="J1950",
+ obsgeovel=[30, -7, 11] * u.km / u.s,
+ ),
+ (
+ "GCRS Frame (obstime=J1950.000, obsgeoloc=(0., 0., 0.) m, "
+ "obsgeovel=(30000., -7000., 11000.) m / s)"
+ ),
+ "custom_GCRS",
+)
+gcrs_default = FrameDescription(
+ GCRS(0 * u.deg, 0 * u.deg),
+ (
+ "GCRS Frame (obstime=J2000.000, obsgeoloc=(0., 0., 0.) m, "
+ "obsgeovel=(0., 0., 0.) m / s)"
+ ),
+ "default_GCRS",
+)
+icrs = FrameDescription(ICRS(0 * u.deg, 0 * u.deg), "ICRS Frame", "ICRS")
+
+
+@pytest.mark.parametrize(
+ "coord_from,coord_to",
+ [pytest.param(icrs, gcrs_custom), pytest.param(gcrs_default, galactic)],
+ ids=lambda x: x.pytest_id,
+)
+def test_NonRotationTransformationError_message(coord_from, coord_to):
+ assert str(NonRotationTransformationError(coord_to.frame, coord_from.frame)) == (
+ f"refusing to transform other coordinates from <{coord_from.description}> to "
+ f"<{coord_to.description}> because angular separation can depend on the "
+ "direction of the transformation"
+ )
+
+
+@pytest.mark.parametrize(
+ "coord_from,coord_to",
+ [pytest.param(icrs, gcrs_default), pytest.param(gcrs_custom, galactic)],
+ ids=lambda x: x.pytest_id,
+)
+def test_NonRotationTransformationWarning_message(coord_from, coord_to):
+ assert str(NonRotationTransformationWarning(coord_to.frame, coord_from.frame)) == (
+ f"transforming other coordinates from <{coord_from.description}> to "
+ f"<{coord_to.description}>. Angular separation can depend on the direction of "
+ "the transformation."
+ )
diff --git a/astropy/coordinates/tests/test_separation.py b/astropy/coordinates/tests/test_separation.py
index c35750b85e08..c98b0a2b15b5 100644
--- a/astropy/coordinates/tests/test_separation.py
+++ b/astropy/coordinates/tests/test_separation.py
@@ -6,6 +6,7 @@
instances, so they should be tested on both.
"""
+from contextlib import nullcontext
from typing import NamedTuple
import pytest
@@ -13,11 +14,14 @@
from astropy import units as u
from astropy.coordinates import (
FK5,
+ GCRS,
ICRS,
Angle,
BaseCoordinateFrame,
Distance,
Galactic,
+ NonRotationTransformationError,
+ NonRotationTransformationWarning,
SkyCoord,
)
from astropy.tests.helper import assert_quantity_allclose
@@ -351,3 +355,54 @@ def test_return_types(coord_class, method, output_type):
"""
coord = coord_class(0 * u.deg, 0 * u.deg, 1 * u.pc)
assert type(getattr(coord, method)(coord)) is output_type
+
+
+@pytest.mark.parametrize("coord_class", [SkyCoord, ICRS])
+@pytest.mark.parametrize(
+ "origin_mismatch_kwarg,expectation",
+ [
+ pytest.param({"origin_mismatch": "ignore"}, nullcontext(), id="ignore"),
+ pytest.param(
+ {"origin_mismatch": "warn"},
+ pytest.warns(
+ NonRotationTransformationWarning,
+ match="^transforming other coordinates from <GCRS Frame ",
+ ),
+ id="warn",
+ ),
+ pytest.param(
+ {"origin_mismatch": "error"},
+ pytest.raises(
+ NonRotationTransformationError,
+ match="^refusing to transform other coordinates from <GCRS Frame ",
+ ),
+ id="error",
+ ),
+ pytest.param(
+ {},
+ pytest.warns(
+ NonRotationTransformationWarning,
+ match="^transforming other coordinates from <GCRS Frame ",
+ ),
+ id="default",
+ ),
+ pytest.param(
+ {"origin_mismatch": "bad"},
+ pytest.raises(
+ ValueError,
+ match=(
+ r"^origin_mismatch='bad' is invalid\. Allowed values are 'ignore', "
+ r"'warn' or 'error'\.$"
+ ),
+ ),
+ id="invalid",
+ ),
+ ],
+)
+def test_separation_origin_mismatch_action(
+ coord_class, origin_mismatch_kwarg, expectation
+):
+ with expectation:
+ coord_class(0 * u.deg, 0 * u.deg).separation(
+ SkyCoord(0 * u.deg, 0 * u.deg, frame=GCRS), **origin_mismatch_kwarg
+ )
diff --git a/astropy/coordinates/tests/test_sky_coord.py b/astropy/coordinates/tests/test_sky_coord.py
index 99ffb28a57a3..9068a77b05b2 100644
--- a/astropy/coordinates/tests/test_sky_coord.py
+++ b/astropy/coordinates/tests/test_sky_coord.py
@@ -889,11 +889,14 @@ def test_directional_offset_by():
]:
# Find the displacement from sc1 to sc2,
posang = sc1.position_angle(sc2)
- sep = sc1.separation(sc2)
+ sep = sc1.separation(sc2, origin_mismatch="ignore")
# then do the offset from sc1 and verify that you are at sc2
sc2a = sc1.directional_offset_by(position_angle=posang, separation=sep)
- assert np.max(np.abs(sc2.separation(sc2a).arcsec)) < 1e-3
+ assert (
+ np.max(np.abs(sc2.separation(sc2a, origin_mismatch="ignore").arcsec))
+ < 1e-3
+ )
# Specific test cases
# Go over the North pole a little way, and
| diff --git a/docs/changes/coordinates/16246.feature.rst b/docs/changes/coordinates/16246.feature.rst
new file mode 100644
index 000000000000..a3058f14f276
--- /dev/null
+++ b/docs/changes/coordinates/16246.feature.rst
@@ -0,0 +1,10 @@
+By default the ``SkyCoord`` and ``BaseCoordinateFrame`` ``separation()``
+methods now emit a warning if they have to perform a coordinate transformation
+that is not a pure rotation to inform the user that the angular separation can
+depend on the direction of the transformation.
+It is possible to modify this behaviour with the new optional keyword-only
+``frame_origin_mismatch`` argument.
+Specifying ``frame_origin_mismatch="ignore"`` allows any transformation to
+succeed without warning, which has been the behaviour so far.
+``frame_origin_mismatch="error"`` forbids all transformations that are not
+pure rotations.
diff --git a/docs/coordinates/common_errors.rst b/docs/coordinates/common_errors.rst
index 1ff79694ddb6..c99ed07a2041 100644
--- a/docs/coordinates/common_errors.rst
+++ b/docs/coordinates/common_errors.rst
@@ -9,7 +9,7 @@ Object Separation
-----------------
When calculating the separation between objects, it is important to bear in mind that
-:meth:`~astropy.coordinates.BaseCoordinateFrame.separation` gives a different
+:meth:`~astropy.coordinates.BaseCoordinateFrame.separation` can give a different
answer depending upon the order in which is used.
For example::
@@ -20,10 +20,15 @@ For example::
>>> t = Time("2010-05-22T00:00")
>>> moon = SkyCoord(104.29*u.deg, 23.51*u.deg, 359367.3*u.km, frame=GCRS(obstime=t))
>>> star = SkyCoord(101.4*u.deg, 23.02*u.deg, frame='icrs')
- >>> star.separation(moon) # doctest: +FLOAT_CMP
+ >>> star.separation(moon) # doctest: +FLOAT_CMP, +SHOW_WARNINGS
<Angle 139.84211884 deg>
- >>> moon.separation(star) # doctest: +FLOAT_CMP
+ NonRotationTransformationWarning: transforming other coordinates from
+ <GCRS Frame (obstime=2010-05-22T00:00:00.000, obsgeoloc=(0., 0., 0.) m,
+ obsgeovel=(0., 0., 0.) m / s)> to <ICRS Frame>. Angular separation can
+ depend on the direction of the transformation.
+ >>> moon.separation(star) # doctest: +FLOAT_CMP, +SHOW_WARNINGS
<Angle 2.70390995 deg>
+ NonRotationTransformationWarning: transforming other coordinates from...
Why do these give such different answers?
@@ -33,6 +38,25 @@ So ``star.separation(moon)`` gives the angular separation in the ICRS frame.
This is the separation as it would appear from the Solar System Barycenter.
For a geocentric observer, ``moon.separation(star)`` gives the correct answer,
since ``moon`` is in a geocentric frame.
+As can be seen from the above example, by default an appropriate warning is
+emitted if the coordinate transformation can cause the angular separation value
+to be order-dependent.
+It is possible to always suppress the warning::
+
+ >>> moon.separation(star, origin_mismatch="ignore") # doctest: +FLOAT_CMP
+ <Angle 2.70390995 deg>
+
+It is also possible to forbid coordinate transformations that are not pure
+rotations::
+
+ >>> moon.separation(star, origin_mismatch="error")
+ Traceback (most recent call last):
+ ...
+ astropy.coordinates.errors.NonRotationTransformationError: refusing to
+ transform other coordinates from <ICRS Frame> to <GCRS Frame
+ (obstime=2010-05-22T00:00:00.000, obsgeoloc=(0., 0., 0.) m,
+ obsgeovel=(0., 0., 0.) m / s)> because angular separation can depend on
+ the direction of the transformation
AltAz calculations for Earth-based objects
------------------------------------------
diff --git a/docs/whatsnew/6.1.rst b/docs/whatsnew/6.1.rst
index 764fa25f8331..36b3afbc17ee 100644
--- a/docs/whatsnew/6.1.rst
+++ b/docs/whatsnew/6.1.rst
@@ -31,6 +31,69 @@ the `NumPy deprecation policy
<https://numpy.org/neps/nep-0029-deprecation_policy.html>`_.
+Order-dependent angular separations now come with warnings
+==========================================================
+
+Angular separation between two points depends on the point of view.
+For example, during a lunar eclipse and for an observer on the Earth the Sun
+and the Moon will be in (more-or-less) opposite directions, but at the same
+time for an observer at the Earth-Sun L2 point (where Gaia and James Webb Space
+Telescope are) the Sun and the Moon will be (more-or-less) in the same
+direction.
+The :meth:`~astropy.coordinates.BaseCoordinateFrame.separation` method
+automatically converts a coordinate given to it to the frame of the coordinate
+it belongs to, so the separation can be different if the coordinates are
+swapped.
+Such transformations are now accompanied by an appropriate warning::
+
+ >>> from astropy import units as u
+ >>> from astropy.coordinates import SkyCoord
+ >>> icrs = SkyCoord(0 * u.deg, 0 * u.deg, 10 * u.pc)
+ >>> gcrs = SkyCoord(0 * u.deg, 0 * u.deg, 380_000 * u.km, frame="gcrs")
+ >>> icrs.separation(gcrs) # doctest: +FLOAT_CMP +SHOW_WARNINGS
+ <Angle 100.67116925 deg>
+ NonRotationTransformationWarning: transforming other coordinates from
+ <GCRS Frame (obstime=J2000.000, obsgeoloc=(0., 0., 0.) m,
+ obsgeovel=(0., 0., 0.) m / s)> to <ICRS Frame>. Angular separation can
+ depend on the direction of the transformation.
+ >>> gcrs.separation(icrs) # doctest: +FLOAT_CMP +SHOW_WARNINGS
+ <Angle 0.0010732 deg>
+ NonRotationTransformationWarning: transforming other coordinates from
+ <ICRS Frame> to <GCRS Frame (obstime=J2000.000, obsgeoloc=(0., 0., 0.) m,
+ obsgeovel=(0., 0., 0.) m / s)>. Angular separation can depend on the
+ direction of the transformation.
+
+The warning is not emitted if the coordinate transformation is a pure rotation
+because such transformations do not change the origin of the coordinate frames,
+so the angular separation does not depend on the order of the coordinates::
+
+ >>> galactic = SkyCoord(0 * u.deg, 0 * u.deg, 10 * u.pc, frame="galactic")
+ >>> icrs.separation(galactic) # doctest: +FLOAT_CMP
+ <Angle 93.14572374 deg>
+ >>> galactic.separation(icrs) # doctest: +FLOAT_CMP
+ <Angle 93.14572374 deg>
+
+It is possible to suppress the warning::
+
+ >>> icrs.separation(gcrs, origin_mismatch="ignore") # doctest: +FLOAT_CMP +SHOW_WARNINGS
+ <Angle 100.67116925 deg>
+
+It is also possible to forbid non-rotation transformations::
+
+ >>> icrs.separation(gcrs, origin_mismatch="error") # doctest: +FLOAT_CMP
+ Traceback (most recent call last):
+ ...
+ astropy.coordinates.errors.NonRotationTransformationError: refusing to
+ transform other coordinates from <GCRS Frame (obstime=J2000.000,
+ obsgeoloc=(0., 0., 0.) m, obsgeovel=(0., 0., 0.) m / s)> to <ICRS Frame>
+ because angular separation can depend on the direction of the transformation
+
+Pure rotations will still succeed::
+
+ >>> galactic.separation(icrs, origin_mismatch="error") # doctest: +FLOAT_CMP
+ <Angle 93.14572374 deg>
+
+
.. _whatsnew-6.1-ascii-default-int-columns-as-int64:
``io.ascii`` uses 64-integers by default for integer columns
| [
{
"components": [
{
"doc": "Raised for transformations that are not simple rotations. Such\ntransformations can change the angular separation between coordinates\ndepending on its direction.",
"lines": [
35,
52
],
"name": "NonRotationTransformationError"... | [
"astropy/coordinates/tests/test_exceptions.py::test_NonRotationTransformationError_message[ICRS-custom_GCRS]",
"astropy/coordinates/tests/test_exceptions.py::test_NonRotationTransformationError_message[default_GCRS-Galactic]",
"astropy/coordinates/tests/test_exceptions.py::test_NonRotationTransformationWarning_... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Make computing angular separations less surprising
### Description
When computing the angular separation between coordinates in different frames they first need to be transformed into a common frame. However, the resulting angular separation value can depend on the direction of the transformation, unless it is a simple rotation. Users have found this to be surprising, so by default a warning should be emitted if a non-rotation transformation is needed. It should also be possible to always suppress the warning, or to forbid non-rotation transformations.
Computing the position angle can always be done silently because its value is expected to depend on the order of the coordinates anyways.
Closes #8505, closes #12189, closes #11388, closes #14812
<!-- Optional opt-out -->
- [x] By checking this box, the PR author has requested that maintainers do **NOT** use the "Squash and Merge" button. Maintainers should respect this when possible; however, the final decision is at the discretion of the maintainer that merges the PR.
----------
Thank you for your contribution to Astropy! 🌌 This checklist is meant to remind the package maintainers who will review this pull request of some common things to look for.
- [ ] Do the proposed changes actually accomplish desired goals?
- [ ] Do the proposed changes follow the [Astropy coding guidelines](https://docs.astropy.org/en/latest/development/codeguide.html)?
- [ ] Are tests added/updated as required? If so, do they follow the [Astropy testing guidelines](https://docs.astropy.org/en/latest/development/testguide.html)?
- [ ] Are docs added/updated as required? If so, do they follow the [Astropy documentation guidelines](https://docs.astropy.org/en/latest/development/docguide.html#astropy-documentation-rules-and-guidelines)?
- [ ] Is rebase and/or squash necessary? If so, please provide the author with appropriate instructions. Also see instructions for [rebase](https://docs.astropy.org/en/latest/development/workflow/development_workflow.html#rebase-if-necessary) and [squash](https://docs.astropy.org/en/latest/development/workflow/development_workflow.html#squash-if-necessary).
- [ ] Did the CI pass? If no, are the failures related? If you need to run daily and weekly cron jobs as part of the PR, please apply the "Extra CI" label. Codestyle issues can be fixed by the [bot](https://docs.astropy.org/en/latest/development/workflow/development_workflow.html#pre-commit).
- [ ] Is a change log needed? If yes, did the change log check pass? If no, add the "no-changelog-entry-needed" label. If this is a manual backport, use the "skip-changelog-checks" label unless special changelog handling is necessary.
- [ ] Is this a big PR that makes a "What's new?" entry worthwhile and if so, is (1) a "what's new" entry included in this PR and (2) the "whatsnew-needed" label applied?
- [ ] At the time of adding the milestone, if the milestone set requires a backport to release branch(es), apply the appropriate "backport-X.Y.x" label(s) *before* merge.
I'll write the What's New entry after the code and documentation changes are reviewed.
The RTD build is failing because Sphinx can't figure out how to link to the new type alias I've introduced. This can be addressed by adding a new entry to `docs/nitpick-exceptions`.
p.s. Personally not sure this needs a what's-new, since in the end this is a relatively small change, but leave that decision up to you.
FWIW, I agree with the suggestion of a keyword-only argument. I like the principle of a `StrEnum`, but feel that if we go that route, we should do that in a separate PR for more functions (and perhaps only when our minimum python version is 3.11, since really there is no hurry).
There's a few other places in `astropy` that allow users to either suppress warnings or to raise errors instead. For example, here's a configuration item in `utils` that uses the exact same values as `frame_mismatch`: https://github.com/astropy/astropy/blob/014036d15e17f8eb8ba3e7acd21f68c33e36fd65/astropy/utils/iers/iers.py#L190-L191
I agree that using `Enum` or `StrEnum` would be good idea, but it should be done consistently throughout `astropy` so it's beyond the scope for this pull request.
> p.s. Personally not sure this needs a what's-new, since in the end this is a relatively small change
This pull request closes 4 issues, and there are several closed duplicate issues. Clearly this is something that is important to the users and deserves to be highlighted.
I'll have to push another commit with the What's New entry, but everything else can be reviewed already.
> I like the principle of a StrEnum, but feel that if we go that route, we should do that in a separate PR for more functions (and perhaps only when our minimum python version is 3.11, since really there is no hurry).
> There's a few other places in astropy that allow users to either suppress warnings or to raise errors instead.
Dully noted, thank you both. This pattern is growing more and more onto my lately so I'm sure I'll remember to try it out at scale if an opportunity arises.
Anyway, back to this PR: thanks @eerovaher for addressing my comments, I'm happy to sign this off now !
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in astropy/coordinates/errors.py]
(definition of NonRotationTransformationError:)
class NonRotationTransformationError(ValueError):
"""Raised for transformations that are not simple rotations. Such
transformations can change the angular separation between coordinates
depending on its direction."""
(definition of NonRotationTransformationError.__init__:)
def __init__( self, frame_to: BaseCoordinateFrame, frame_from: BaseCoordinateFrame ) -> None:
(definition of NonRotationTransformationError.__str__:)
def __str__(self) -> str:
(definition of NonRotationTransformationWarning:)
class NonRotationTransformationWarning(AstropyUserWarning):
"""Emitted for transformations that are not simple rotations. Such
transformations can change the angular separation between coordinates
depending on its direction."""
(definition of NonRotationTransformationWarning.__init__:)
def __init__( self, frame_to: BaseCoordinateFrame, frame_from: BaseCoordinateFrame ) -> None:
(definition of NonRotationTransformationWarning.__str__:)
def __str__(self) -> str:
[end of new definitions in astropy/coordinates/errors.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
(Another) GCRS angular separation behavior
This issue is to report another counter-intuitive behavior of angular separations in the GCRS frame. It follows on similar reports in #5185 and #6633 (and may be of interest to @StuartLittlefair and @adrn).
The basic issue is that an object at fixed ICRS position observed at two different times will have different GCRS coordinates. However, the separation between these GCRS coordinates is calculated to be zero. Here is a MWE:
```
from astropy.coordinates import SkyCoord, GCRS, ICRS
from astropy.time import Time
obj = SkyCoord(ra=37.0*u.degree,dec=-4.5*u.degree, distance=3*u.au)
obj_t0 = obj.transform_to(GCRS(obstime=Time(57370.0, format='mjd')))
obj_t1 = obj.transform_to(GCRS(obstime=Time(57380.0, format='mjd')))
print(obj_t0)
print(obj_t1)
print("Obj(t0) RA,DEC: ", obj_t0.ra.deg,obj_t0.dec.deg)
print("Obj(t1) RA,DEC: ",obj_t1.ra.deg,obj_t1.dec.deg)
print(obj_t0.separation(obj_t1))
print(obj_t1.separation(obj_t0))
```
and the output:
```
<SkyCoord (GCRS: obstime=57370.0, obsgeoloc=(0., 0., 0.) m, obsgeovel=(0., 0., 0.) m / s): (ra, dec, distance) in (deg, deg, AU)
(22.02874564, -14.46782793, 2.49351509)>
<SkyCoord (GCRS: obstime=57380.0, obsgeoloc=(0., 0., 0.) m, obsgeovel=(0., 0., 0.) m / s): (ra, dec, distance) in (deg, deg, AU)
(20.38720594, -13.68925892, 2.64894343)>
Obj(t0) RA,DEC: 22.028745635550386 -14.467827929473025
Obj(t1) RA,DEC: 20.387205935596295 -13.689258920287072
0d00m00s
0d00m00s
```
My understanding is that the frames of `obj_t0` and `obj_t1` differ (both are GCRS, but at different `obstime`). Thus, `separation` is converting the two coordinates to the same `obstime`, and thus the angular separation is calculated to be zero.
```
print("Equivalent frame?",obj_t0.is_equivalent_frame(obj_t1))
```
While I'm guessing this is the "correct" behavior, it is definitely counter-intuitive (i.e., when trying to compute parallax corrections for solar system objects).
----------
This is the best version of this general issue yet. "All parallaxes are zero!"
This is an uninformed suggestion, but I wonder if `separation` should really be a method on a Representation, rather than (or in addition to) a Frame. And if there should be an option to calculate it that way when using higher-level objects like Frame or SkyCoord.
Thanks @kadrlica
Your diagnosis is spot on in terms of what is happening, that it is correct behaviour, and that it confounds expectation. When using `transform_to` on a ```SkyCoord``` you are getting a pointer to the *same position in spacetime* but in a different reference frame. ```SkyCoord.separation``` is doing the correct thing and returning a separation of zero because all three objects do indeed reference the same point, just in different frames.
However, there are many similar issues of people finding the behaviour of ```SkyCoord.separation``` counter intuitive that we really should look at improving the documentation to make this as clear as possible. The other classic example of this is people being surprised that ```a.separation(b)``` is not the same as ```b.separation(a)```.
I think the documentation could also do with some clear examples that explain the conceptual difference between ```SkyCoord``` and ```Representation``` objects. What you want in this case is the angle between the *representation* of the same *co-ordinate* in two different frames. The code snippet below does what you want neatly, but we should improve the docs so this isn't so confusing. Any advice as to how best to do that is greatly appreciated.
```python
repr_t0 = obj_t0.cartesian
repr_t1 = obj_t1.cartesian
# find the separation between these two vectors using dot product
sep = np.arccos(repr_t0.dot(repr_t1) / repr_t0.norm() / repr_t1.norm())
```
Thanks, glad to hear that this is on the radar. Improving the documentation may be a challenging solution. This operation seems intuitive, and I don't think that users are generally going to look for documentation before they try something like the example above. One idea could be to print a warning if `a` and `b` are the same, or if they are in different reference frames. However, I don't know the astropy policy on warnings (don't want to flood the user).
With regards to getting the angular separation value that I was interested in, I chose to use the lower-level `angle_utilities.angular_separation` with the spherical representations of each coordinate (i.e., following the source code for `SkyCoord.separation`, but without transforming the frames).
@StuartLittlefair your code snippet might be a useful temporary workaround, but I hope it's not intended as the permanent standard method to compute the desired quantity here. I would much prefer to use `repr_t0.separation(repr_t1)`. Or, something like`obj_t0.separation(obj_t1, representation=True)`.
It may be good to separate needs & wishes a bit. In the above I see three (all useful!):
1. `SkyCoord.separation` probably needs better documentation, to point the reader the fact that `SkyCoord` are treated as given points in space time, and *in that context* separation is meaningful only at a given time.
2. Something like a tutorial perhaps on how to deal with multiple measurements at different times, and, say, fit a single `SkyCoord` to them (i.e., inferring position, proper motion, parallax).
3. A new method or function that calculates separation/position angle, etc. for representations.
p.s. I don't think we should change `SkyCoord.separation` itself, or add a keyword argument to let it do something quite different conceptually.
This has come up again on the main AstroPy mailing list. Would it be useful to issue an `AstropyUserWarning` whenever the frames are different? The warning could briefly indicate a potential problem and provide a link to a documentation page that explains the issue in detail with a nice example.
@eteq, @adrn and I are wondering if it wouldn't be best to just raise an exception if the frames are not identical: it can be an origin that is different at the same time or different times with the same origin. Obviously, the exception message could then clarify things. (See also #11388)
I'd be in favour of raising an exception, but we should certainly deprecate this usage first, as this will break a lot of code in the wild, and people should get some warning...
--------------------Issue warning for (mis-)use of SkyCoord.separation with mixed frames
### What is the problem this feature will solve?
The calculation of the separation of my target from the Moon was wrong despite the code looking reasonable and no warning being printed.
Here is an example of inconsistent target-moon separations being calculated ...
```
>>> from astropy.coordinates import SkyCoord, get_moon
>>> from astropy.time import Time
>>> target_coo = SkyCoord.from_name('Kelt-11')
>>> time = Time('2020-03-09T14:50:41',scale='utc',format='isot')
>>> moon_coo = get_moon(time)
>>> print(moon_coo.separation(target_coo).degree,target_coo.separation(moon_coo).degree)
20.63726192992106 15.98210433623633
```
The first value is correct, but it is far from obvious that the second why to calculate this value will not give the correct value for an observer at the geocentre.
### Describe the desired outcome
A warning should be printed if SkyCoord.separation() is doing a coordinate transformation for one of the coordinates due to inconsistent reference frames.
### Additional context
_No response_
----------
It feels like different people keep getting bitten by this. Perhaps we need a warning box in https://docs.astropy.org/en/latest/coordinates/matchsep.html#separations ? cc @StuartLittlefair @adrn @eteq @eerovaher @mhvk
I thought we had a PR to clarify the docs but I cannot find it now. Maybe I misremember.
Agreed - I think a warning in the docs is necessary but insufficient. Something along the lines of
> c1.separation(c2) will calculate the separation of the two coordinates **in the frame of c1**. This can lead to unintended consequences if you do not pay attention to the frame of c1. For example, if you want to find the separation of two targets for an Earth-bound observer, c1 **must** be in an Earth-based frame like `GCRS`. If c1 is in the `ICRS` frame c1.separation(c2) will give the separation of the two coordinates, as seen from the solar system barycentre
Since people may not read the docs too closely, I also suggest raising a warning along similar lines if the two coordinates do not share the same frame.
This is related to #8505 and #12189. Apparently this is confusing enough for the users that raising a warning is justified.
Is it worthwhile to define a new class "Separation" based on Angle but that includes information on the origin of the frame in which the separation is measured? It would be easier to spot this type of problem if this class were returned instead of an angle.
Maybe it is better if there is an exception if the frames are not the same - let the user transform if they want to -- or add a keyword argument that can be set to exception, warning, or silent.
--------------------
</issues> | 2d281019494aaebf522f6626c0dae37510c16688 |
deepset-ai__haystack-7424 | 7,424 | deepset-ai/haystack | null | 189dfaf640caf7993d4ba367d6ea3bcb1b4eca11 | 2024-03-26T10:10:20Z | diff --git a/haystack/components/evaluators/__init__.py b/haystack/components/evaluators/__init__.py
index 479cd50063..0da03f913c 100644
--- a/haystack/components/evaluators/__init__.py
+++ b/haystack/components/evaluators/__init__.py
@@ -2,6 +2,7 @@
from .document_map import DocumentMAPEvaluator
from .document_mrr import DocumentMRREvaluator
from .document_recall import DocumentRecallEvaluator
+from .faithfulness import FaithfulnessEvaluator
from .llm_evaluator import LLMEvaluator
from .sas_evaluator import SASEvaluator
@@ -10,6 +11,7 @@
"DocumentMAPEvaluator",
"DocumentMRREvaluator",
"DocumentRecallEvaluator",
+ "FaithfulnessEvaluator",
"LLMEvaluator",
"SASEvaluator",
]
diff --git a/haystack/components/evaluators/faithfulness.py b/haystack/components/evaluators/faithfulness.py
new file mode 100644
index 0000000000..9ceb997330
--- /dev/null
+++ b/haystack/components/evaluators/faithfulness.py
@@ -0,0 +1,161 @@
+from typing import Any, Dict, List, Optional
+
+from numpy import mean as np_mean
+
+from haystack import default_from_dict
+from haystack.components.evaluators.llm_evaluator import LLMEvaluator
+from haystack.core.component import component
+from haystack.utils import Secret, deserialize_secrets_inplace
+
+
+class FaithfulnessEvaluator(LLMEvaluator):
+ """
+ Evaluator that checks if a generated answer can be inferred from the provided contexts.
+
+ An LLM separates the answer into multiple statements and checks whether the statement can be inferred from the
+ context or not. The final score for the full answer is a number from 0.0 to 1.0. It represents the proportion of
+ statements that can be inferred from the provided contexts.
+
+ Usage example:
+ ```python
+ from haystack.components.evaluators import FaithfulnessEvaluator
+
+ questions = ["Who created the Python language?"]
+ contexts = [
+ [
+ "Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming language. Its design philosophy emphasizes code readability, and its language constructs aim to help programmers write clear, logical code for both small and large-scale software projects."
+ ],
+ ]
+ responses = ["Python is a high-level general-purpose programming language that was created by George Lucas."]
+ evaluator = FaithfulnessEvaluator()
+ result = evaluator.run(questions=questions, contexts=contexts, responses=responses)
+ print(results["evaluator"])
+ # {'results': [{'statements': ['Python is a high-level general-purpose programming language.',
+ # 'Python was created by George Lucas.'], 'statement_scores':
+ # [1, 0], 'score': 0.5}], 'score': 0.5, 'individual_scores': [0.5]}
+
+ ```
+ """
+
+ def __init__(
+ self,
+ examples: Optional[List[Dict[str, Any]]] = None,
+ api: str = "openai",
+ api_key: Secret = Secret.from_env_var("OPENAI_API_KEY"),
+ ):
+ """
+ Creates an instance of LLMEvaluator.
+
+ :param examples:
+ Few-shot examples conforming to the expected input and output format of FaithfulnessEvaluator.
+ Each example must be a dictionary with keys "inputs" and "outputs".
+ "inputs" must be a dictionary with keys "questions", "contexts", and "responses".
+ "outputs" must be a dictionary with "statements" and "statement_scores".
+ Expected format:
+ [{
+ "inputs": {
+ "questions": "What is the capital of Italy?", "contexts": ["Rome is the capital of Italy."],
+ "responses": "Rome is the capital of Italy with more than 4 million inhabitants.",
+ },
+ "outputs": {
+ "statements": ["Rome is the capital of Italy.", "Rome has more than 4 million inhabitants."],
+ "statement_scores": [1, 0],
+ },
+ }]
+ :param api:
+ The API to use for calling an LLM through a Generator.
+ Supported APIs: "openai".
+ :param api_key:
+ The API key.
+
+ """
+ self.instructions = (
+ "Your task is to judge the faithfulness or groundedness of statements based "
+ "on context information. First, please extract statements from a provided "
+ "response to a question. Second, calculate a faithfulness score for each "
+ "statement made in the response. The score is 1 if the statement can be "
+ "inferred from the provided context or 0 if it cannot be inferred."
+ )
+ self.inputs = [("questions", List[str]), ("contexts", List[List[str]]), ("responses", List[str])]
+ self.outputs = ["statements", "statement_scores"]
+ self.examples = examples or [
+ {
+ "inputs": {
+ "questions": "What is the capital of Germany and when was it founded?",
+ "contexts": ["Berlin is the capital of Germany and was founded in 1244."],
+ "responses": "The capital of Germany, Berlin, was founded in the 13th century.",
+ },
+ "outputs": {
+ "statements": ["Berlin is the capital of Germany.", "Berlin was founded in 1244."],
+ "statement_scores": [1, 1],
+ },
+ },
+ {
+ "inputs": {
+ "questions": "What is the capital of France?",
+ "contexts": ["Berlin is the capital of Germany."],
+ "responses": "Paris",
+ },
+ "outputs": {"statements": ["Paris is the capital of France."], "statement_scores": [0]},
+ },
+ {
+ "inputs": {
+ "questions": "What is the capital of Italy?",
+ "contexts": ["Rome is the capital of Italy."],
+ "responses": "Rome is the capital of Italy with more than 4 million inhabitants.",
+ },
+ "outputs": {
+ "statements": ["Rome is the capital of Italy.", "Rome has more than 4 million inhabitants."],
+ "statement_scores": [1, 0],
+ },
+ },
+ ]
+ self.api = api
+ self.api_key = api_key
+
+ super().__init__(
+ instructions=self.instructions,
+ inputs=self.inputs,
+ outputs=self.outputs,
+ examples=self.examples,
+ api=self.api,
+ api_key=self.api_key,
+ )
+
+ @component.output_types(results=List[Dict[str, Any]])
+ def run(self, **inputs) -> Dict[str, Any]:
+ """
+ Run the LLM evaluator.
+
+ :param inputs:
+ The input values to evaluate. The keys are the input names and the values are lists of input values.
+ :returns:
+ A dictionary with the following outputs:
+ - `score`: Mean faithfulness score over all the provided input answers.
+ - `individual_scores`: A list of faithfulness scores for each input answer.
+ - `results`: A list of dictionaries with `statements` and `statement_scores` for each input answer.
+ """
+ result = super().run(**inputs)
+
+ # calculate average statement faithfulness score per query
+ for res in result["results"]:
+ res["score"] = np_mean(res["statement_scores"])
+
+ # calculate average answer faithfulness score over all queries
+ result["score"] = np_mean([res["score"] for res in result["results"]])
+ result["individual_scores"] = [res["score"] for res in result["results"]]
+
+ return result
+
+ @classmethod
+ def from_dict(cls, data: Dict[str, Any]) -> "FaithfulnessEvaluator":
+ """
+ Deserialize this component from a dictionary.
+
+ :param data:
+ The dictionary representation of this component.
+ :returns:
+ The deserialized component instance.
+ """
+ deserialize_secrets_inplace(data["init_parameters"], keys=["api_key"])
+ return default_from_dict(cls, data)
diff --git a/releasenotes/notes/faithfulness-evaluator-2e039a697c847d1c.yaml b/releasenotes/notes/faithfulness-evaluator-2e039a697c847d1c.yaml
new file mode 100644
index 0000000000..5279d0d9c8
--- /dev/null
+++ b/releasenotes/notes/faithfulness-evaluator-2e039a697c847d1c.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add a new FaithfulnessEvaluator component that can be used to evaluate faithfulness / groundedness / hallucinations of LLMs in a RAG pipeline.
+ Given a question, a list of retrieved document contents (contexts), and a predicted answer, FaithfulnessEvaluator returns a score ranging from 0 (poor faithfulness) to 1 (perfect faithfulness).
+ The score is the proportion of statements in the predicted answer that could by inferred from the documents.
| diff --git a/test/components/evaluators/test_faithfulness_evaluator.py b/test/components/evaluators/test_faithfulness_evaluator.py
new file mode 100644
index 0000000000..5776437366
--- /dev/null
+++ b/test/components/evaluators/test_faithfulness_evaluator.py
@@ -0,0 +1,129 @@
+from typing import List
+
+import pytest
+
+from haystack.components.evaluators import FaithfulnessEvaluator
+from haystack.utils.auth import Secret
+
+
+class TestFaithfulnessEvaluator:
+ def test_init_default(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
+ component = FaithfulnessEvaluator()
+ assert component.api == "openai"
+ assert component.generator.client.api_key == "test-api-key"
+ assert component.instructions == (
+ "Your task is to judge the faithfulness or groundedness of statements based "
+ "on context information. First, please extract statements from a provided "
+ "response to a question. Second, calculate a faithfulness score for each "
+ "statement made in the response. The score is 1 if the statement can be "
+ "inferred from the provided context or 0 if it cannot be inferred."
+ )
+ assert component.inputs == [("questions", List[str]), ("contexts", List[List[str]]), ("responses", List[str])]
+ assert component.outputs == ["statements", "statement_scores"]
+ assert component.examples == [
+ {
+ "inputs": {
+ "questions": "What is the capital of Germany and when was it founded?",
+ "contexts": ["Berlin is the capital of Germany and was founded in 1244."],
+ "responses": "The capital of Germany, Berlin, was founded in the 13th century.",
+ },
+ "outputs": {
+ "statements": ["Berlin is the capital of Germany.", "Berlin was founded in 1244."],
+ "statement_scores": [1, 1],
+ },
+ },
+ {
+ "inputs": {
+ "questions": "What is the capital of France?",
+ "contexts": ["Berlin is the capital of Germany."],
+ "responses": "Paris",
+ },
+ "outputs": {"statements": ["Paris is the capital of France."], "statement_scores": [0]},
+ },
+ {
+ "inputs": {
+ "questions": "What is the capital of Italy?",
+ "contexts": ["Rome is the capital of Italy."],
+ "responses": "Rome is the capital of Italy with more than 4 million inhabitants.",
+ },
+ "outputs": {
+ "statements": ["Rome is the capital of Italy.", "Rome has more than 4 million inhabitants."],
+ "statement_scores": [1, 0],
+ },
+ },
+ ]
+
+ def test_init_fail_wo_openai_api_key(self, monkeypatch):
+ monkeypatch.delenv("OPENAI_API_KEY", raising=False)
+ with pytest.raises(ValueError, match="None of the .* environment variables are set"):
+ FaithfulnessEvaluator()
+
+ def test_init_with_parameters(self):
+ component = FaithfulnessEvaluator(
+ api_key=Secret.from_token("test-api-key"),
+ api="openai",
+ examples=[
+ {"inputs": {"responses": "Damn, this is straight outta hell!!!"}, "outputs": {"custom_score": 1}},
+ {"inputs": {"responses": "Football is the most popular sport."}, "outputs": {"custom_score": 0}},
+ ],
+ )
+ assert component.generator.client.api_key == "test-api-key"
+ assert component.api == "openai"
+ assert component.examples == [
+ {"inputs": {"responses": "Damn, this is straight outta hell!!!"}, "outputs": {"custom_score": 1}},
+ {"inputs": {"responses": "Football is the most popular sport."}, "outputs": {"custom_score": 0}},
+ ]
+
+ def test_from_dict(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
+
+ data = {
+ "type": "haystack.components.evaluators.faithfulness.FaithfulnessEvaluator",
+ "init_parameters": {
+ "api_key": {"env_vars": ["OPENAI_API_KEY"], "strict": True, "type": "env_var"},
+ "api": "openai",
+ "examples": [{"inputs": {"responses": "Football is the most popular sport."}, "outputs": {"score": 0}}],
+ },
+ }
+ component = FaithfulnessEvaluator.from_dict(data)
+ assert component.api == "openai"
+ assert component.generator.client.api_key == "test-api-key"
+ assert component.examples == [
+ {"inputs": {"responses": "Football is the most popular sport."}, "outputs": {"score": 0}}
+ ]
+
+ def test_run_calculates_mean_score(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
+ component = FaithfulnessEvaluator()
+
+ def generator_run(self, *args, **kwargs):
+ if "Football" in kwargs["prompt"]:
+ return {"replies": ['{"statements": ["a", "b"], "statement_scores": [1, 0]}']}
+ else:
+ return {"replies": ['{"statements": ["c", "d"], "statement_scores": [1, 1]}']}
+
+ monkeypatch.setattr("haystack.components.generators.openai.OpenAIGenerator.run", generator_run)
+
+ questions = ["Which is the most popular global sport?", "Who created the Python language?"]
+ contexts = [
+ [
+ "The popularity of sports can be measured in various ways, including TV viewership, social media presence, number of participants, and economic impact. Football is undoubtedly the world's most popular sport with major events like the FIFA World Cup and sports personalities like Ronaldo and Messi, drawing a followership of more than 4 billion people."
+ ],
+ [
+ "Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming language. Its design philosophy emphasizes code readability, and its language constructs aim to help programmers write clear, logical code for both small and large-scale software projects."
+ ],
+ ]
+ responses = [
+ "Football is the most popular sport with around 4 billion followers worldwide.",
+ "Python is a high-level general-purpose programming language that was created by George Lucas.",
+ ]
+ results = component.run(questions=questions, contexts=contexts, responses=responses)
+ assert results == {
+ "individual_scores": [0.5, 1],
+ "results": [
+ {"score": 0.5, "statement_scores": [1, 0], "statements": ["a", "b"]},
+ {"score": 1, "statement_scores": [1, 1], "statements": ["c", "d"]},
+ ],
+ "score": 0.75,
+ }
| diff --git a/releasenotes/notes/faithfulness-evaluator-2e039a697c847d1c.yaml b/releasenotes/notes/faithfulness-evaluator-2e039a697c847d1c.yaml
new file mode 100644
index 0000000000..5279d0d9c8
--- /dev/null
+++ b/releasenotes/notes/faithfulness-evaluator-2e039a697c847d1c.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add a new FaithfulnessEvaluator component that can be used to evaluate faithfulness / groundedness / hallucinations of LLMs in a RAG pipeline.
+ Given a question, a list of retrieved document contents (contexts), and a predicted answer, FaithfulnessEvaluator returns a score ranging from 0 (poor faithfulness) to 1 (perfect faithfulness).
+ The score is the proportion of statements in the predicted answer that could by inferred from the documents.
| [
{
"components": [
{
"doc": "Evaluator that checks if a generated answer can be inferred from the provided contexts.\n\nAn LLM separates the answer into multiple statements and checks whether the statement can be inferred from the\ncontext or not. The final score for the full answer is a number fro... | [
"test/components/evaluators/test_faithfulness_evaluator.py::TestFaithfulnessEvaluator::test_init_default",
"test/components/evaluators/test_faithfulness_evaluator.py::TestFaithfulnessEvaluator::test_init_fail_wo_openai_api_key",
"test/components/evaluators/test_faithfulness_evaluator.py::TestFaithfulnessEvaluat... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat: Add FaithfulnessEvaluator component
### Related Issues
- fixes #7024
### Proposed Changes:
- Added a new component `FaithfulnessEvaluator` that returns one aggregated faithfulness score and individual faithfulness scores for inputs of queries, contexts and responses. Uses `LLMEvaluator` under the hood.
### How did you test it?
New unit tests and the following local example:
```python
from haystack import Pipeline
from haystack.components.evaluators import FaithfulnessEvaluator
QUESTIONS = ["Which is the most popular global sport?", "Who created the Python language?"]
CONTEXTS = [
[
"The popularity of sports can be measured in various ways, including TV viewership, social media presence, number of participants, and economic impact. Football is undoubtedly the world's most popular sport with major events like the FIFA World Cup and sports personalities like Ronaldo and Messi, drawing a followership of more than 4 billion people."
],
[
"Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming language. Its design philosophy emphasizes code readability, and its language constructs aim to help programmers write clear, logical code for both small and large-scale software projects."
],
]
RESPONSES = [
"Football is the most popular sport with around 4 billion followers worldwide.",
"Python is a high-level general-purpose programming language that was created by George Lucas.",
]
pipeline = Pipeline()
evaluator = FaithfulnessEvaluator()
pipeline.add_component("evaluator", evaluator)
results = pipeline.run({"evaluator": {"questions": QUESTIONS, "contexts": CONTEXTS, "responses": RESPONSES}})
print(results["evaluator"])
# {'results': [{'statements': ["Football is undoubtedly the world's most popular sport.", 'Football has around 4
# billion followers worldwide.'], 'statement_scores': [1, 1], 'name': 'llm', 'score': 1.0}, {'statements': ['Python
# is a high-level general-purpose programming language.', 'Python was created by George Lucas.'], 'statement_scores':
# [1, 0], 'score': 0.5}], 'score': 0.75, 'individual_scores': [1.0, 0.5]}
```
### Notes for the reviewer
We can discuss a good name separately. `FaithfulnessEvaluator`, `GroundednessEvaluator` or `HallucinationEvaluator` are good candidates. deepset Cloud has a groundedness metric already: https://docs.cloud.deepset.ai/docs/use-groundedness-observability
In contrast to the original issue description this PR doesn't calculate a binary score per answer but per statement in answer. This calculation is more complex but it's also more meaningful and standard in other eval frameworks.
Other frameworks do the splitting of an answer into statements in separate prompts. Here are examples:
- https://github.com/explodinggradients/ragas/blob/e9418237d0dd06f640b0f6bbae7a5a3f9c1029ea/src/ragas/metrics/_faithfulness.py#L91
- https://github.com/confident-ai/deepeval/blob/d5aa87b65308a0d683f09dafdb020715838c4e27/deepeval/metrics/faithfulness/template.py#L4
### Checklist
- I have read the [contributors guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md) and the [code of conduct](https://github.com/deepset-ai/haystack/blob/main/code_of_conduct.txt)
- I have updated the related issue with new insights and changes
- I added unit tests and updated the docstrings
- I've used one of the [conventional commit types](https://www.conventionalcommits.org/en/v1.0.0/) for my PR title: `fix:`, `feat:`, `build:`, `chore:`, `ci:`, `docs:`, `style:`, `refactor:`, `perf:`, `test:`.
- I documented my code
- I ran [pre-commit hooks](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md#installation) and fixed any issue
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/components/evaluators/faithfulness.py]
(definition of FaithfulnessEvaluator:)
class FaithfulnessEvaluator(LLMEvaluator):
"""Evaluator that checks if a generated answer can be inferred from the provided contexts.
An LLM separates the answer into multiple statements and checks whether the statement can be inferred from the
context or not. The final score for the full answer is a number from 0.0 to 1.0. It represents the proportion of
statements that can be inferred from the provided contexts.
Usage example:
```python
from haystack.components.evaluators import FaithfulnessEvaluator
questions = ["Who created the Python language?"]
contexts = [
[
"Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming language. Its design philosophy emphasizes code readability, and its language constructs aim to help programmers write clear, logical code for both small and large-scale software projects."
],
]
responses = ["Python is a high-level general-purpose programming language that was created by George Lucas."]
evaluator = FaithfulnessEvaluator()
result = evaluator.run(questions=questions, contexts=contexts, responses=responses)
print(results["evaluator"])
# {'results': [{'statements': ['Python is a high-level general-purpose programming language.',
# 'Python was created by George Lucas.'], 'statement_scores':
# [1, 0], 'score': 0.5}], 'score': 0.5, 'individual_scores': [0.5]}
```"""
(definition of FaithfulnessEvaluator.__init__:)
def __init__( self, examples: Optional[List[Dict[str, Any]]] = None, api: str = "openai", api_key: Secret = Secret.from_env_var("OPENAI_API_KEY"), ):
"""Creates an instance of LLMEvaluator.
:param examples:
Few-shot examples conforming to the expected input and output format of FaithfulnessEvaluator.
Each example must be a dictionary with keys "inputs" and "outputs".
"inputs" must be a dictionary with keys "questions", "contexts", and "responses".
"outputs" must be a dictionary with "statements" and "statement_scores".
Expected format:
[{
"inputs": {
"questions": "What is the capital of Italy?", "contexts": ["Rome is the capital of Italy."],
"responses": "Rome is the capital of Italy with more than 4 million inhabitants.",
},
"outputs": {
"statements": ["Rome is the capital of Italy.", "Rome has more than 4 million inhabitants."],
"statement_scores": [1, 0],
},
}]
:param api:
The API to use for calling an LLM through a Generator.
Supported APIs: "openai".
:param api_key:
The API key."""
(definition of FaithfulnessEvaluator.run:)
def run(self, **inputs) -> Dict[str, Any]:
"""Run the LLM evaluator.
:param inputs:
The input values to evaluate. The keys are the input names and the values are lists of input values.
:returns:
A dictionary with the following outputs:
- `score`: Mean faithfulness score over all the provided input answers.
- `individual_scores`: A list of faithfulness scores for each input answer.
- `results`: A list of dictionaries with `statements` and `statement_scores` for each input answer."""
(definition of FaithfulnessEvaluator.from_dict:)
def from_dict(cls, data: Dict[str, Any]) -> "FaithfulnessEvaluator":
"""Deserialize this component from a dictionary.
:param data:
The dictionary representation of this component.
:returns:
The deserialized component instance."""
[end of new definitions in haystack/components/evaluators/faithfulness.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
LLM Eval - Implement Faithfulness/Factual Accuracy metric
Depends on https://github.com/deepset-ai/haystack/issues/7022.
Wrap `LLMEvaluator` to provide a component that calculates the "Faithfulness" or "Factual accuracy" based on the following inputs:
- Questions
- Contexts
- Responses
This component is meant to be plug-n-play, meaning it will provide a good enough starting prompt and examples. These should also be customizable by the user.
A requirement for this component is that the LLM is expected to return a binary value for each input tuple. This will let us calculate a final score for the dataset ourselves.
----------
Packages might call it "Answer relevance":
- https://docs.ragas.io/en/stable/concepts/metrics/answer_relevance.html
- https://docs.confident-ai.com/docs/metrics-answer-relevancy
The point of this metric is to assess the quality of your answer when you don't have labels. You need to look at 2 things in that case:
- how well the output answers the input (this metric)?
- how relevant is the output given the retrieved contexts ([context relevance](https://github.com/deepset-ai/haystack/issues/7025))?
**EDIT**
Made mistake about metrics, see comment below
@mrm1001 I think we're mixing up two metrics.
The issue is about faithfulness and that's what we have in dC as groundedness. We check whether the answer to the question can be inferred from the retrieved documents.
In your comment you mention answer relevance. That would be a different metric, which requires only question and answer. It checks whether the answer addresses the question by measuring vector similarity.
Sorry, you're right! This is faithfulness:
- according to [ragas](https://docs.ragas.io/en/latest/concepts/metrics/faithfulness.html): measures the factual consistency of the generated answer against the given context
- according to [deepval](https://docs.confident-ai.com/docs/metrics-faithfulness): measures the quality of your RAG pipeline's generator by evaluating whether the actual_output factually aligns with the contents of your retrieval_context.
--------------------
</issues> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 |
pyro-ppl__pyro-3345 | 3,345 | pyro-ppl/pyro | null | 81def9c535113d2affd3d4a111a96d5a89fec52f | 2024-03-22T14:42:01Z | diff --git a/pyro/infer/__init__.py b/pyro/infer/__init__.py
index c0f3a26c3f..6934bd29fe 100644
--- a/pyro/infer/__init__.py
+++ b/pyro/infer/__init__.py
@@ -12,7 +12,7 @@
from pyro.infer.mcmc.hmc import HMC
from pyro.infer.mcmc.nuts import NUTS
from pyro.infer.mcmc.rwkernel import RandomWalkKernel
-from pyro.infer.predictive import Predictive
+from pyro.infer.predictive import Predictive, WeighedPredictive
from pyro.infer.renyi_elbo import RenyiELBO
from pyro.infer.rws import ReweightedWakeSleep
from pyro.infer.smcfilter import SMCFilter
@@ -62,4 +62,5 @@
"TraceTailAdaptive_ELBO",
"Trace_ELBO",
"Trace_MMD",
+ "WeighedPredictive",
]
diff --git a/pyro/infer/importance.py b/pyro/infer/importance.py
index d7c25a843d..d25cf16680 100644
--- a/pyro/infer/importance.py
+++ b/pyro/infer/importance.py
@@ -12,6 +12,7 @@
from .abstract_infer import TracePosterior
from .enum import get_importance_trace
+from .util import plate_log_prob_sum
class Importance(TracePosterior):
@@ -143,22 +144,9 @@ def _fn(*args, **kwargs):
log_weights = model_trace.log_prob_sum() - guide_trace.log_prob_sum()
else:
wd = guide_trace.plate_to_symbol["num_particles_vectorized"]
- log_weights = 0.0
- for site in model_trace.nodes.values():
- if site["type"] != "sample":
- continue
- log_weights += torch.einsum(
- site["packed"]["log_prob"]._pyro_dims + "->" + wd,
- [site["packed"]["log_prob"]],
- )
-
- for site in guide_trace.nodes.values():
- if site["type"] != "sample":
- continue
- log_weights -= torch.einsum(
- site["packed"]["log_prob"]._pyro_dims + "->" + wd,
- [site["packed"]["log_prob"]],
- )
+ log_weights = plate_log_prob_sum(model_trace, wd) - plate_log_prob_sum(
+ guide_trace, wd
+ )
if normalized:
log_weights = log_weights - torch.logsumexp(log_weights)
diff --git a/pyro/infer/predictive.py b/pyro/infer/predictive.py
index 9d8b1c7f76..6be8b5cb5f 100644
--- a/pyro/infer/predictive.py
+++ b/pyro/infer/predictive.py
@@ -3,11 +3,14 @@
import warnings
from functools import reduce
+from typing import List, NamedTuple, Union
import torch
import pyro
import pyro.poutine as poutine
+from pyro.infer.util import plate_log_prob_sum
+from pyro.poutine.trace_struct import Trace
from pyro.poutine.util import prune_subsample_sites
@@ -31,16 +34,20 @@ def _guess_max_plate_nesting(model, args, kwargs):
return max_plate_nesting
+class _predictiveResults(NamedTuple):
+ """
+ Return value of call to ``_predictive`` and ``_predictive_sequential``.
+ """
+
+ samples: dict
+ trace: Union[Trace, List[Trace]]
+
+
def _predictive_sequential(
- model,
- posterior_samples,
- model_args,
- model_kwargs,
- num_samples,
- return_site_shapes,
- return_trace=False,
+ model, posterior_samples, model_args, model_kwargs, num_samples, return_site_shapes
):
- collected = []
+ collected_samples = []
+ collected_trace = []
samples = [
{k: v[i] for k, v in posterior_samples.items()} for i in range(num_samples)
]
@@ -48,20 +55,21 @@ def _predictive_sequential(
trace = poutine.trace(poutine.condition(model, samples[i])).get_trace(
*model_args, **model_kwargs
)
- if return_trace:
- collected.append(trace)
- else:
- collected.append(
- {site: trace.nodes[site]["value"] for site in return_site_shapes}
- )
+ collected_trace.append(trace)
+ collected_samples.append(
+ {site: trace.nodes[site]["value"] for site in return_site_shapes}
+ )
- if return_trace:
- return collected
- else:
- return {
- site: torch.stack([s[site] for s in collected]).reshape(shape)
+ return _predictiveResults(
+ trace=collected_trace,
+ samples={
+ site: torch.stack([s[site] for s in collected_samples]).reshape(shape)
for site, shape in return_site_shapes.items()
- }
+ },
+ )
+
+
+_predictive_vectorize_plate_name = "_num_predictive_samples"
def _predictive(
@@ -69,15 +77,15 @@ def _predictive(
posterior_samples,
num_samples,
return_sites=(),
- return_trace=False,
parallel=False,
model_args=(),
model_kwargs={},
+ mask=True,
):
- model = torch.no_grad()(poutine.mask(model, mask=False))
+ model = torch.no_grad()(poutine.mask(model, mask=False) if mask else model)
max_plate_nesting = _guess_max_plate_nesting(model, model_args, model_kwargs)
vectorize = pyro.plate(
- "_num_predictive_samples", num_samples, dim=-max_plate_nesting - 1
+ _predictive_vectorize_plate_name, num_samples, dim=-max_plate_nesting - 1
)
model_trace = prune_subsample_sites(
poutine.trace(model).get_trace(*model_args, **model_kwargs)
@@ -93,12 +101,6 @@ def _predictive(
)
reshaped_samples[name] = sample
- if return_trace:
- trace = poutine.trace(
- poutine.condition(vectorize(model), reshaped_samples)
- ).get_trace(*model_args, **model_kwargs)
- return trace
-
return_site_shapes = {}
for site in model_trace.stochastic_nodes + model_trace.observation_nodes:
append_ndim = max_plate_nesting - len(model_trace.nodes[site]["fn"].batch_shape)
@@ -131,7 +133,6 @@ def _predictive(
model_kwargs,
num_samples,
return_site_shapes,
- return_trace=False,
)
trace = poutine.trace(
@@ -148,7 +149,7 @@ def _predictive(
else:
predictions[site] = value.reshape(shape)
- return predictions
+ return _predictiveResults(trace=trace, samples=predictions)
class Predictive(torch.nn.Module):
@@ -269,7 +270,7 @@ def forward(self, *args, **kwargs):
parallel=self.parallel,
model_args=args,
model_kwargs=kwargs,
- )
+ ).samples
return _predictive(
self.model,
posterior_samples,
@@ -278,7 +279,7 @@ def forward(self, *args, **kwargs):
parallel=self.parallel,
model_args=args,
model_kwargs=kwargs,
- )
+ ).samples
def get_samples(self, *args, **kwargs):
warnings.warn(
@@ -304,12 +305,144 @@ def get_vectorized_trace(self, *args, **kwargs):
parallel=self.parallel,
model_args=args,
model_kwargs=kwargs,
- )
+ ).samples
return _predictive(
self.model,
posterior_samples,
self.num_samples,
- return_trace=True,
+ parallel=True,
model_args=args,
model_kwargs=kwargs,
+ ).trace
+
+
+class WeighedPredictiveResults(NamedTuple):
+ """
+ Return value of call to instance of :class:`WeighedPredictive`.
+ """
+
+ samples: Union[dict, tuple]
+ log_weights: torch.Tensor
+ guide_log_prob: torch.Tensor
+ model_log_prob: torch.Tensor
+
+
+class WeighedPredictive(Predictive):
+ """
+ Class used to construct a weighed predictive distribution that is based
+ on the same initialization interface as :class:`Predictive`.
+
+ The methods `.forward` and `.call` can be called with an additional keyword argument
+ ``model_guide`` which is the model used to create and optimize the guide (if not
+ provided ``model_guide`` defaults to ``self.model``), and they return both samples and log_weights.
+
+ The weights are calculated as the per sample gap between the model_guide log-probability
+ and the guide log-probability (a guide must always be provided).
+
+ A typical use case would be based on a ``model`` :math:`p(x,z)=p(x|z)p(z)` and ``guide`` :math:`q(z)`
+ that has already been fitted to the model given observations :math:`p(X_{obs},z)`, both of which
+ are provided at itialization of :class:`WeighedPredictive` (same as you would do with :class:`Predictive`).
+ When calling an instance of :class:`WeighedPredictive` we provide the model given observations :math:`p(X_{obs},z)`
+ as the keyword argument ``model_guide``.
+ The resulting output would be the usual samples :math:`p(x|z)q(z)` returned by :class:`Predictive`,
+ along with per sample weights :math:`p(X_{obs},z)/q(z)`. The samples and weights can be fed into
+ :any:`weighed_quantile` in order to obtain the true quantiles of the resulting distribution.
+
+ Note that the ``model`` can be more elaborate with sample sites :math:`y` that are not observed
+ and are not part of the guide, if the samples sites :math:`y` are sampled after the observations
+ and the latent variables sampled by the guide, such that :math:`p(x,y,z)=p(y|x,z)p(x|z)p(z)` where
+ each element in the product represents a set of ``pyro.sample`` statements.
+ """
+
+ def call(self, *args, **kwargs):
+ """
+ Method `.call` that is backwards compatible with the same method found in :class:`Predictive`
+ but can be called with an additional keyword argument `model_guide`
+ which is the model used to create and optimize the guide.
+
+ Returns :class:`WeighedPredictiveResults` which has attributes ``.samples`` and per sample
+ weights ``.log_weights``.
+ """
+ result = self.forward(*args, **kwargs)
+ return WeighedPredictiveResults(
+ samples=tuple(v for _, v in sorted(result.items())),
+ log_weights=result.log_weights,
+ guide_log_prob=result.guide_log_prob,
+ model_log_prob=result.model_log_prob,
+ )
+
+ def forward(self, *args, **kwargs):
+ """
+ Method `.forward` that is backwards compatible with the same method found in :class:`Predictive`
+ but can be called with an additional keyword argument `model_guide`
+ which is the model used to create and optimize the guide.
+
+ Returns :class:`WeighedPredictiveResults` which has attributes ``.samples`` and per sample
+ weights ``.log_weights``.
+ """
+ model_guide = kwargs.pop("model_guide", self.model)
+ return_sites = self.return_sites
+ # return all sites by default if a guide is provided.
+ return_sites = None if not return_sites else return_sites
+ guide_predictive = _predictive(
+ self.guide,
+ self.posterior_samples,
+ self.num_samples,
+ return_sites=None,
+ parallel=self.parallel,
+ model_args=args,
+ model_kwargs=kwargs,
+ mask=False,
+ )
+ posterior_samples = guide_predictive.samples
+ model_predictive = _predictive(
+ model_guide,
+ posterior_samples,
+ self.num_samples,
+ return_sites=return_sites,
+ parallel=self.parallel,
+ model_args=args,
+ model_kwargs=kwargs,
+ mask=False,
+ )
+ if not isinstance(guide_predictive.trace, list):
+ guide_trace = prune_subsample_sites(guide_predictive.trace)
+ model_trace = prune_subsample_sites(model_predictive.trace)
+ guide_trace.compute_score_parts()
+ model_trace.compute_log_prob()
+ guide_trace.pack_tensors()
+ model_trace.pack_tensors(guide_trace.plate_to_symbol)
+ plate_symbol = guide_trace.plate_to_symbol[_predictive_vectorize_plate_name]
+ guide_log_prob = plate_log_prob_sum(guide_trace, plate_symbol)
+ model_log_prob = plate_log_prob_sum(model_trace, plate_symbol)
+ else:
+ guide_log_prob = torch.Tensor(
+ [
+ trace_element.log_prob_sum()
+ for trace_element in guide_predictive.trace
+ ]
+ )
+ model_log_prob = torch.Tensor(
+ [
+ trace_element.log_prob_sum()
+ for trace_element in model_predictive.trace
+ ]
+ )
+ return WeighedPredictiveResults(
+ samples=(
+ _predictive(
+ self.model,
+ posterior_samples,
+ self.num_samples,
+ return_sites=return_sites,
+ parallel=self.parallel,
+ model_args=args,
+ model_kwargs=kwargs,
+ ).samples
+ if model_guide is not self.model
+ else model_predictive.samples
+ ),
+ log_weights=model_log_prob - guide_log_prob,
+ guide_log_prob=guide_log_prob,
+ model_log_prob=model_log_prob,
)
diff --git a/pyro/infer/util.py b/pyro/infer/util.py
index 7ea460c1ec..13e1d9e12f 100644
--- a/pyro/infer/util.py
+++ b/pyro/infer/util.py
@@ -14,6 +14,7 @@
from pyro.ops import packed
from pyro.ops.einsum.adjoint import require_backward
from pyro.ops.rings import MarginalRing
+from pyro.poutine.trace_struct import Trace
from pyro.poutine.util import site_is_subsample
from .. import settings
@@ -342,3 +343,18 @@ def check_fully_reparametrized(guide_site):
raise NotImplementedError(
"All distributions in the guide must be fully reparameterized."
)
+
+
+def plate_log_prob_sum(trace: Trace, plate_symbol: str) -> torch.Tensor:
+ """
+ Get log probability sum from trace while keeping indexing over the specified plate.
+ """
+ log_prob_sum = 0.0
+ for site in trace.nodes.values():
+ if site["type"] != "sample":
+ continue
+ log_prob_sum += torch.einsum(
+ site["packed"]["log_prob"]._pyro_dims + "->" + plate_symbol,
+ [site["packed"]["log_prob"]],
+ )
+ return log_prob_sum
diff --git a/pyro/ops/stats.py b/pyro/ops/stats.py
index 2ec57d4784..8e0bd2631f 100644
--- a/pyro/ops/stats.py
+++ b/pyro/ops/stats.py
@@ -277,14 +277,17 @@ def weighed_quantile(
:param int dim: dimension to take quantiles from ``input``.
:returns torch.Tensor: quantiles of ``input`` at ``probs``.
- Example:
- >>> from pyro.ops.stats import weighed_quantile
- >>> import torch
- >>> input = torch.Tensor([[10, 50, 40], [20, 30, 0]])
- >>> probs = torch.Tensor([0.2, 0.8])
- >>> log_weights = torch.Tensor([0.4, 0.5, 0.1]).log()
- >>> result = weighed_quantile(input, probs, log_weights, -1)
- >>> torch.testing.assert_close(result, torch.Tensor([[40.4, 47.6], [9.0, 26.4]]))
+ **Example:**
+
+ .. doctest::
+
+ >>> from pyro.ops.stats import weighed_quantile
+ >>> import torch
+ >>> input = torch.Tensor([[10, 50, 40], [20, 30, 0]])
+ >>> probs = torch.Tensor([0.2, 0.8])
+ >>> log_weights = torch.Tensor([0.4, 0.5, 0.1]).log()
+ >>> result = weighed_quantile(input, probs, log_weights, -1)
+ >>> torch.testing.assert_close(result, torch.Tensor([[40.4, 47.6], [9.0, 26.4]]))
"""
dim = dim if dim >= 0 else (len(input.shape) + dim)
if isinstance(probs, (list, tuple)):
| diff --git a/tests/infer/test_predictive.py b/tests/infer/test_predictive.py
index fc6f63fa37..1f28e1f05c 100644
--- a/tests/infer/test_predictive.py
+++ b/tests/infer/test_predictive.py
@@ -8,7 +8,7 @@
import pyro.distributions as dist
import pyro.optim as optim
import pyro.poutine as poutine
-from pyro.infer import SVI, Predictive, Trace_ELBO
+from pyro.infer import SVI, Predictive, Trace_ELBO, WeighedPredictive
from pyro.infer.autoguide import AutoDelta, AutoDiagonalNormal
from tests.common import assert_close
@@ -39,29 +39,44 @@ def beta_guide(num_trials):
pyro.sample("phi", phi_posterior)
+@pytest.mark.parametrize("predictive", [Predictive, WeighedPredictive])
@pytest.mark.parametrize("parallel", [False, True])
-def test_posterior_predictive_svi_manual_guide(parallel):
+def test_posterior_predictive_svi_manual_guide(parallel, predictive):
true_probs = torch.ones(5) * 0.7
- num_trials = torch.ones(5) * 1000
+ num_trials = (
+ torch.ones(5) * 400
+ ) # Reduced to 400 from 1000 in order for guide optimization to converge
num_success = dist.Binomial(num_trials, true_probs).sample()
conditioned_model = poutine.condition(model, data={"obs": num_success})
elbo = Trace_ELBO(num_particles=100, vectorize_particles=True)
- svi = SVI(conditioned_model, beta_guide, optim.Adam(dict(lr=1.0)), elbo)
- for i in range(1000):
+ svi = SVI(conditioned_model, beta_guide, optim.Adam(dict(lr=3.0)), elbo)
+ for i in range(
+ 5000
+ ): # Increased to 5000 from 1000 in order for guide optimization to converge
svi.step(num_trials)
- posterior_predictive = Predictive(
+ posterior_predictive = predictive(
model,
guide=beta_guide,
num_samples=10000,
parallel=parallel,
return_sites=["_RETURN"],
)
- marginal_return_vals = posterior_predictive(num_trials)["_RETURN"]
- assert_close(marginal_return_vals.mean(dim=0), torch.ones(5) * 700, rtol=0.05)
+ if predictive is Predictive:
+ marginal_return_vals = posterior_predictive(num_trials)["_RETURN"]
+ else:
+ weighed_samples = posterior_predictive(
+ num_trials, model_guide=conditioned_model
+ )
+ marginal_return_vals = weighed_samples.samples["_RETURN"]
+ assert marginal_return_vals.shape[:1] == weighed_samples.log_weights.shape
+ # Weights should be uniform as the guide has the same distribution as the model
+ assert weighed_samples.log_weights.std() < 0.6
+ assert_close(marginal_return_vals.mean(dim=0), torch.ones(5) * 280, rtol=0.1)
+@pytest.mark.parametrize("predictive", [Predictive, WeighedPredictive])
@pytest.mark.parametrize("parallel", [False, True])
-def test_posterior_predictive_svi_auto_delta_guide(parallel):
+def test_posterior_predictive_svi_auto_delta_guide(parallel, predictive):
true_probs = torch.ones(5) * 0.7
num_trials = torch.ones(5) * 1000
num_success = dist.Binomial(num_trials, true_probs).sample()
@@ -70,15 +85,23 @@ def test_posterior_predictive_svi_auto_delta_guide(parallel):
svi = SVI(conditioned_model, guide, optim.Adam(dict(lr=1.0)), Trace_ELBO())
for i in range(1000):
svi.step(num_trials)
- posterior_predictive = Predictive(
+ posterior_predictive = predictive(
model, guide=guide, num_samples=10000, parallel=parallel
)
- marginal_return_vals = posterior_predictive.get_samples(num_trials)["obs"]
+ if predictive is Predictive:
+ marginal_return_vals = posterior_predictive.get_samples(num_trials)["obs"]
+ else:
+ weighed_samples = posterior_predictive.get_samples(
+ num_trials, model_guide=conditioned_model
+ )
+ marginal_return_vals = weighed_samples.samples["obs"]
+ assert marginal_return_vals.shape[:1] == weighed_samples.log_weights.shape
assert_close(marginal_return_vals.mean(dim=0), torch.ones(5) * 700, rtol=0.05)
+@pytest.mark.parametrize("predictive", [Predictive, WeighedPredictive])
@pytest.mark.parametrize("return_trace", [False, True])
-def test_posterior_predictive_svi_auto_diag_normal_guide(return_trace):
+def test_posterior_predictive_svi_auto_diag_normal_guide(return_trace, predictive):
true_probs = torch.ones(5) * 0.7
num_trials = torch.ones(5) * 1000
num_success = dist.Binomial(num_trials, true_probs).sample()
@@ -87,7 +110,7 @@ def test_posterior_predictive_svi_auto_diag_normal_guide(return_trace):
svi = SVI(conditioned_model, guide, optim.Adam(dict(lr=0.1)), Trace_ELBO())
for i in range(1000):
svi.step(num_trials)
- posterior_predictive = Predictive(
+ posterior_predictive = predictive(
model, guide=guide, num_samples=10000, parallel=True
)
if return_trace:
@@ -95,7 +118,14 @@ def test_posterior_predictive_svi_auto_diag_normal_guide(return_trace):
num_trials
).nodes["obs"]["value"]
else:
- marginal_return_vals = posterior_predictive.get_samples(num_trials)["obs"]
+ if predictive is Predictive:
+ marginal_return_vals = posterior_predictive.get_samples(num_trials)["obs"]
+ else:
+ weighed_samples = posterior_predictive.get_samples(
+ num_trials, model_guide=conditioned_model
+ )
+ marginal_return_vals = weighed_samples.samples["obs"]
+ assert marginal_return_vals.shape[:1] == weighed_samples.log_weights.shape
assert_close(marginal_return_vals.mean(dim=0), torch.ones(5) * 700, rtol=0.05)
@@ -113,8 +143,9 @@ def test_posterior_predictive_svi_one_hot():
assert_close(marginal_return_vals.mean(dim=0), true_probs.unsqueeze(0), rtol=0.1)
+@pytest.mark.parametrize("predictive", [Predictive, WeighedPredictive])
@pytest.mark.parametrize("parallel", [False, True])
-def test_shapes(parallel):
+def test_shapes(parallel, predictive):
num_samples = 10
def model():
@@ -132,22 +163,26 @@ def model():
expected = poutine.replay(vectorize(model), trace)()
# Use Predictive.
- predictive = Predictive(
+ actual = predictive(
model,
guide=guide,
return_sites=["x", "y"],
num_samples=num_samples,
parallel=parallel,
- )
- actual = predictive()
+ )()
+ if predictive is WeighedPredictive:
+ assert actual.samples["x"].shape[:1] == actual.log_weights.shape
+ assert actual.samples["y"].shape[:1] == actual.log_weights.shape
+ actual = actual.samples
assert set(actual) == set(expected)
assert actual["x"].shape == expected["x"].shape
assert actual["y"].shape == expected["y"].shape
+@pytest.mark.parametrize("predictive", [Predictive, WeighedPredictive])
@pytest.mark.parametrize("with_plate", [True, False])
@pytest.mark.parametrize("event_shape", [(), (2,)])
-def test_deterministic(with_plate, event_shape):
+def test_deterministic(with_plate, event_shape, predictive):
def model(y=None):
with pyro.util.optional(pyro.plate("plate", 3), with_plate):
x = pyro.sample("x", dist.Normal(0, 1).expand(event_shape).to_event())
@@ -162,9 +197,13 @@ def model(y=None):
for i in range(100):
svi.step(y)
- actual = Predictive(
+ actual = predictive(
model, guide=guide, return_sites=["x2", "x3"], num_samples=1000
)()
+ if predictive is WeighedPredictive:
+ assert actual.samples["x2"].shape[:1] == actual.log_weights.shape
+ assert actual.samples["x3"].shape[:1] == actual.log_weights.shape
+ actual = actual.samples
x2_batch_shape = (3,) if with_plate else ()
assert actual["x2"].shape == (1000,) + x2_batch_shape + event_shape
# x3 shape is prepended 1 to match Pyro shape semantics
| [
{
"components": [
{
"doc": "Return value of call to ``_predictive`` and ``_predictive_sequential``.",
"lines": [
37,
43
],
"name": "_predictiveResults",
"signature": "class _predictiveResults(NamedTuple):",
"type": "class"
},
... | [
"tests/infer/test_predictive.py::test_posterior_predictive_svi_manual_guide[False-Predictive]",
"tests/infer/test_predictive.py::test_posterior_predictive_svi_manual_guide[False-WeighedPredictive]",
"tests/infer/test_predictive.py::test_posterior_predictive_svi_manual_guide[True-Predictive]",
"tests/infer/tes... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Introducing pyro.infer.predictive.WeighedPredictive which reports weights along with predicted samples
# The Problem
When sampling from the posterior predictive distribution we are often using a guide as an approximation for the posterior. As mentioned in #3340 it is often desirable to correct for the non-uniform per sample gap between the model log-probability and the guide log-probability. This gap is essentially the weight that should be assigned to each sample.
The current implementation of `pyro.infer.predictive.Predictive` does not support calculation of these weights.
# The Proposed Solution
Add `pyro.infer.predictive.WeighedPredictive` which supports calculation of per sample weights.
The implementation relies on three objects:
- Model which samples from priors and observations (same as in instantiation of `pyro.infer.predictive.Predictive`).
- Guide which approximates the posterior given observations (same as in instantiation of `pyro.infer.predictive.Predictive`).
- Model with observations constrained to be the actual observations. This model was used in creating the guide and is provided to instances of `pyro.infer.predictive.WeighedPredictive` when called as the keyword argument `model_guide` (as in the model that was used when creating the guide).
The `model_guide` is what enables calculation of the weights. If not provided we use the model provided at instantiation of `pyro.infer.predictive.WeighedPredictive` as the `model_guide` (in this case the model provided at instantiation is usually already with observations constrained to be the actual observations).
# Design Considerations
- Maintain backwards compatibility of `pyro.infer.predictive.Predictive`.
- Reuse as much as possible from `pyro.infer.predictive.Predictive` when implementing `pyro.infer.predictive.WeighedPredictive`.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in pyro/infer/predictive.py]
(definition of _predictiveResults:)
class _predictiveResults(NamedTuple):
"""Return value of call to ``_predictive`` and ``_predictive_sequential``."""
(definition of WeighedPredictiveResults:)
class WeighedPredictiveResults(NamedTuple):
"""Return value of call to instance of :class:`WeighedPredictive`."""
(definition of WeighedPredictive:)
class WeighedPredictive(Predictive):
"""Class used to construct a weighed predictive distribution that is based
on the same initialization interface as :class:`Predictive`.
The methods `.forward` and `.call` can be called with an additional keyword argument
``model_guide`` which is the model used to create and optimize the guide (if not
provided ``model_guide`` defaults to ``self.model``), and they return both samples and log_weights.
The weights are calculated as the per sample gap between the model_guide log-probability
and the guide log-probability (a guide must always be provided).
A typical use case would be based on a ``model`` :math:`p(x,z)=p(x|z)p(z)` and ``guide`` :math:`q(z)`
that has already been fitted to the model given observations :math:`p(X_{obs},z)`, both of which
are provided at itialization of :class:`WeighedPredictive` (same as you would do with :class:`Predictive`).
When calling an instance of :class:`WeighedPredictive` we provide the model given observations :math:`p(X_{obs},z)`
as the keyword argument ``model_guide``.
The resulting output would be the usual samples :math:`p(x|z)q(z)` returned by :class:`Predictive`,
along with per sample weights :math:`p(X_{obs},z)/q(z)`. The samples and weights can be fed into
:any:`weighed_quantile` in order to obtain the true quantiles of the resulting distribution.
Note that the ``model`` can be more elaborate with sample sites :math:`y` that are not observed
and are not part of the guide, if the samples sites :math:`y` are sampled after the observations
and the latent variables sampled by the guide, such that :math:`p(x,y,z)=p(y|x,z)p(x|z)p(z)` where
each element in the product represents a set of ``pyro.sample`` statements."""
(definition of WeighedPredictive.call:)
def call(self, *args, **kwargs):
"""Method `.call` that is backwards compatible with the same method found in :class:`Predictive`
but can be called with an additional keyword argument `model_guide`
which is the model used to create and optimize the guide.
Returns :class:`WeighedPredictiveResults` which has attributes ``.samples`` and per sample
weights ``.log_weights``."""
(definition of WeighedPredictive.forward:)
def forward(self, *args, **kwargs):
"""Method `.forward` that is backwards compatible with the same method found in :class:`Predictive`
but can be called with an additional keyword argument `model_guide`
which is the model used to create and optimize the guide.
Returns :class:`WeighedPredictiveResults` which has attributes ``.samples`` and per sample
weights ``.log_weights``."""
[end of new definitions in pyro/infer/predictive.py]
[start of new definitions in pyro/infer/util.py]
(definition of plate_log_prob_sum:)
def plate_log_prob_sum(trace: Trace, plate_symbol: str) -> torch.Tensor:
"""Get log probability sum from trace while keeping indexing over the specified plate."""
[end of new definitions in pyro/infer/util.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 64e71eee1c14dc926d5cbc5e762b6337bb4750a6 | ||
falconry__falcon-2217 | 2,217 | falconry/falcon | null | a78cfb38a0c0f6031cc3ff39ff8bf4afd03ef008 | 2024-03-21T20:58:42Z | diff --git a/README.rst b/README.rst
index 26105b0dd..738b2f2b8 100644
--- a/README.rst
+++ b/README.rst
@@ -1027,7 +1027,7 @@ See also: `CONTRIBUTING.md <https://github.com/falconry/falcon/blob/master/CONTR
Legal
-----
-Copyright 2013-2023 by Individual and corporate contributors as
+Copyright 2013-2024 by Individual and corporate contributors as
noted in the individual source files.
Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/docs/_newsfragments/2066.newandimproved.rst b/docs/_newsfragments/2066.newandimproved.rst
new file mode 100644
index 000000000..8bbee4797
--- /dev/null
+++ b/docs/_newsfragments/2066.newandimproved.rst
@@ -0,0 +1,4 @@
+In Python 3.13, the ``cgi`` module is removed entirely from the stdlib,
+including its ``parse_header()`` method. Falcon addresses the issue by shipping
+an own implementation; :func:`falcon.parse_header` can also be used in your projects
+affected by the removal.
diff --git a/docs/api/util.rst b/docs/api/util.rst
index d46f8b6f8..2c645fede 100644
--- a/docs/api/util.rst
+++ b/docs/api/util.rst
@@ -34,6 +34,11 @@ HTTP Status
.. autofunction:: falcon.code_to_http_status
.. autofunction:: falcon.get_http_status
+Media types
+-----------
+
+.. autofunction:: falcon.parse_header
+
Async
-----
diff --git a/docs/user/recipes/pretty-json.rst b/docs/user/recipes/pretty-json.rst
index b6e5e4dc0..5faf59e22 100644
--- a/docs/user/recipes/pretty-json.rst
+++ b/docs/user/recipes/pretty-json.rst
@@ -52,7 +52,6 @@ implemented with a :ref:`custom media handler <custom-media-handler-type>`:
.. code:: python
- import cgi
import json
import falcon
@@ -66,7 +65,7 @@ implemented with a :ref:`custom media handler <custom-media-handler-type>`:
return json.loads(data.decode())
def serialize(self, media, content_type):
- _, params = cgi.parse_header(content_type)
+ _, params = falcon.parse_header(content_type)
indent = params.get('indent')
if indent is not None:
try:
diff --git a/falcon/__init__.py b/falcon/__init__.py
index 1d33539b3..745856058 100644
--- a/falcon/__init__.py
+++ b/falcon/__init__.py
@@ -77,6 +77,7 @@
from falcon.util import IS_64_BITS
from falcon.util import is_python_func
from falcon.util import misc
+from falcon.util import parse_header
from falcon.util import reader
from falcon.util import runs_sync
from falcon.util import secure_filename
diff --git a/falcon/asgi/multipart.py b/falcon/asgi/multipart.py
index b268c2b5a..52cb0505e 100644
--- a/falcon/asgi/multipart.py
+++ b/falcon/asgi/multipart.py
@@ -14,11 +14,10 @@
"""ASGI multipart form media handler components."""
-import cgi
-
from falcon.asgi.reader import BufferedReader
from falcon.errors import DelimiterError
from falcon.media import multipart
+from falcon.util.mediatypes import parse_header
_ALLOWED_CONTENT_HEADERS = multipart._ALLOWED_CONTENT_HEADERS
_CRLF = multipart._CRLF
@@ -54,7 +53,7 @@ async def get_media(self):
return self._media
async def get_text(self):
- content_type, options = cgi.parse_header(self.content_type)
+ content_type, options = parse_header(self.content_type)
if content_type != 'text/plain':
return None
diff --git a/falcon/media/multipart.py b/falcon/media/multipart.py
index c3fc37d56..5b55d4b4f 100644
--- a/falcon/media/multipart.py
+++ b/falcon/media/multipart.py
@@ -14,7 +14,6 @@
"""Multipart form media handler."""
-import cgi
import re
from urllib.parse import unquote_to_bytes
@@ -24,6 +23,7 @@
from falcon.stream import BoundedStream
from falcon.util import BufferedReader
from falcon.util import misc
+from falcon.util.mediatypes import parse_header
# TODO(vytas):
@@ -249,7 +249,7 @@ def get_text(self):
str: The part decoded as a text string provided the part is
encoded as ``text/plain``, ``None`` otherwise.
"""
- content_type, options = cgi.parse_header(self.content_type)
+ content_type, options = parse_header(self.content_type)
if content_type != 'text/plain':
return None
@@ -275,7 +275,7 @@ def filename(self):
if self._content_disposition is None:
value = self._headers.get(b'content-disposition', b'')
- self._content_disposition = cgi.parse_header(value.decode())
+ self._content_disposition = parse_header(value.decode())
_, params = self._content_disposition
@@ -311,7 +311,7 @@ def name(self):
if self._content_disposition is None:
value = self._headers.get(b'content-disposition', b'')
- self._content_disposition = cgi.parse_header(value.decode())
+ self._content_disposition = parse_header(value.decode())
_, params = self._content_disposition
self._name = params.get('name')
@@ -493,7 +493,7 @@ def __init__(self, parse_options=None):
def _deserialize_form(
self, stream, content_type, content_length, form_cls=MultipartForm
):
- _, options = cgi.parse_header(content_type)
+ _, options = parse_header(content_type)
try:
boundary = options['boundary']
except KeyError:
diff --git a/falcon/util/__init__.py b/falcon/util/__init__.py
index 3fec8b06e..1cead07e8 100644
--- a/falcon/util/__init__.py
+++ b/falcon/util/__init__.py
@@ -29,6 +29,7 @@
from falcon.util.deprecation import deprecated
from falcon.util.deprecation import deprecated_args
from falcon.util.deprecation import DeprecatedWarning
+from falcon.util.mediatypes import parse_header
from falcon.util.misc import code_to_http_status
from falcon.util.misc import dt_to_http
from falcon.util.misc import get_argnames
diff --git a/falcon/util/mediatypes.py b/falcon/util/mediatypes.py
new file mode 100644
index 000000000..c0dca5121
--- /dev/null
+++ b/falcon/util/mediatypes.py
@@ -0,0 +1,89 @@
+# Copyright 2023-2024 by Vytautas Liuolia.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Media (aka MIME) type parsing and matching utilities."""
+
+import typing
+
+
+def _parse_param_old_stdlib(s): # type: ignore
+ while s[:1] == ';':
+ s = s[1:]
+ end = s.find(';')
+ while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
+ end = s.find(';', end + 1)
+ if end < 0:
+ end = len(s)
+ f = s[:end]
+ yield f.strip()
+ s = s[end:]
+
+
+def _parse_header_old_stdlib(line): # type: ignore
+ """Parse a Content-type like header.
+
+ Return the main content-type and a dictionary of options.
+
+ Note:
+ This method has been copied (almost) verbatim from CPython 3.8 stdlib.
+ It is slated for removal from the stdlib in 3.13.
+ """
+ parts = _parse_param_old_stdlib(';' + line)
+ key = parts.__next__()
+ pdict = {}
+ for p in parts:
+ i = p.find('=')
+ if i >= 0:
+ name = p[:i].strip().lower()
+ value = p[i + 1 :].strip()
+ if len(value) >= 2 and value[0] == value[-1] == '"':
+ value = value[1:-1]
+ value = value.replace('\\\\', '\\').replace('\\"', '"')
+ pdict[name] = value
+ return key, pdict
+
+
+def parse_header(line: str) -> typing.Tuple[str, dict]:
+ """Parse a Content-type like header.
+
+ Return the main content-type and a dictionary of options.
+
+ Args:
+ line: A header value to parse.
+
+ Returns:
+ tuple: (the main content-type, dictionary of options).
+
+ Note:
+ This function replaces an equivalent method previously available in the
+ stdlib as ``cgi.parse_header()``.
+ It was removed from the stdlib in Python 3.13.
+ """
+ if '"' not in line and '\\' not in line:
+ key, semicolon, parts = line.partition(';')
+ if not semicolon:
+ return (key.strip(), {})
+
+ pdict = {}
+ for part in parts.split(';'):
+ name, equals, value = part.partition('=')
+ if equals:
+ pdict[name.strip().lower()] = value.strip()
+
+ return (key.strip(), pdict)
+
+ return _parse_header_old_stdlib(line)
+
+
+__all__ = ['parse_header']
diff --git a/falcon/vendor/mimeparse/mimeparse.py b/falcon/vendor/mimeparse/mimeparse.py
index 0218553cf..f96e63384 100755
--- a/falcon/vendor/mimeparse/mimeparse.py
+++ b/falcon/vendor/mimeparse/mimeparse.py
@@ -1,4 +1,4 @@
-import cgi
+from falcon.util.mediatypes import parse_header
__version__ = '1.6.0'
__author__ = 'Joe Gregorio'
@@ -23,7 +23,7 @@ def parse_mime_type(mime_type):
:rtype: (str,str,dict)
"""
- full_type, params = cgi.parse_header(mime_type)
+ full_type, params = parse_header(mime_type)
# Java URLConnection class sends an Accept header that includes a
# single '*'. Turn it into a legal wildcard.
if full_type == '*':
diff --git a/pyproject.toml b/pyproject.toml
index 5ed0c5fab..44a829feb 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -94,7 +94,6 @@ filterwarnings = [
"ignore:Using or importing the ABCs:DeprecationWarning",
"ignore:cannot collect test class 'TestClient':pytest.PytestCollectionWarning",
"ignore:inspect.getargspec\\(\\) is deprecated:DeprecationWarning",
- "ignore:.cgi. is deprecated and slated for removal:DeprecationWarning",
"ignore:path is deprecated\\. Use files\\(\\) instead:DeprecationWarning",
"ignore:This process \\(.+\\) is multi-threaded",
]
| diff --git a/falcon/testing/helpers.py b/falcon/testing/helpers.py
index 00959495a..39e8c12f8 100644
--- a/falcon/testing/helpers.py
+++ b/falcon/testing/helpers.py
@@ -23,7 +23,6 @@
"""
import asyncio
-import cgi
from collections import defaultdict
from collections import deque
import contextlib
@@ -51,6 +50,7 @@
from falcon.constants import SINGLETON_HEADERS
import falcon.request
from falcon.util import uri
+from falcon.util.mediatypes import parse_header
# NOTE(kgriffs): Changed in 3.0 from 'curl/7.24.0 (x86_64-apple-darwin12.0)'
DEFAULT_UA = 'falcon-client/' + falcon.__version__
@@ -802,7 +802,7 @@ def get_encoding_from_headers(headers):
if not content_type:
return None
- content_type, params = cgi.parse_header(content_type)
+ content_type, params = parse_header(content_type)
if 'charset' in params:
return params['charset'].strip('\'"')
diff --git a/tests/test_mediatypes.py b/tests/test_mediatypes.py
new file mode 100644
index 000000000..0fae79b43
--- /dev/null
+++ b/tests/test_mediatypes.py
@@ -0,0 +1,41 @@
+import pytest
+
+from falcon.util import mediatypes
+
+
+@pytest.mark.parametrize(
+ 'value,expected',
+ [
+ ('', ('', {})),
+ ('strange', ('strange', {})),
+ ('text/plain', ('text/plain', {})),
+ ('text/plain ', ('text/plain', {})),
+ (' text/plain', ('text/plain', {})),
+ (' text/plain ', ('text/plain', {})),
+ (' text/plain ', ('text/plain', {})),
+ (
+ 'falcon/peregrine; key1; key2=value; key3',
+ ('falcon/peregrine', {'key2': 'value'}),
+ ),
+ (
+ 'audio/pcm;rate=48000;encoding=float;bits=32',
+ ('audio/pcm', {'bits': '32', 'encoding': 'float', 'rate': '48000'}),
+ ),
+ (
+ 'falcon/*; genus=falco; family=falconidae; class=aves; ',
+ ('falcon/*', {'class': 'aves', 'family': 'falconidae', 'genus': 'falco'}),
+ ),
+ ('"falcon/peregrine" ; key="value"', ('"falcon/peregrine"', {'key': 'value'})),
+ ('falcon/peregrine; empty=""', ('falcon/peregrine', {'empty': ''})),
+ ('falcon/peregrine; quote="', ('falcon/peregrine', {'quote': '"'})),
+ ('text/plain; charset=utf-8', ('text/plain', {'charset': 'utf-8'})),
+ ('stuff/strange; missing-value; missing-another', ('stuff/strange', {})),
+ ('stuff/strange; missing-value\\missing-another', ('stuff/strange', {})),
+ (
+ 'application/falcon; P1 = "key; value"; P2="\\""',
+ ('application/falcon', {'p1': 'key; value', 'p2': '"'}),
+ ),
+ ],
+)
+def test_parse_header(value, expected):
+ assert mediatypes.parse_header(value) == expected
| diff --git a/README.rst b/README.rst
index 26105b0dd..738b2f2b8 100644
--- a/README.rst
+++ b/README.rst
@@ -1027,7 +1027,7 @@ See also: `CONTRIBUTING.md <https://github.com/falconry/falcon/blob/master/CONTR
Legal
-----
-Copyright 2013-2023 by Individual and corporate contributors as
+Copyright 2013-2024 by Individual and corporate contributors as
noted in the individual source files.
Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/docs/_newsfragments/2066.newandimproved.rst b/docs/_newsfragments/2066.newandimproved.rst
new file mode 100644
index 000000000..8bbee4797
--- /dev/null
+++ b/docs/_newsfragments/2066.newandimproved.rst
@@ -0,0 +1,4 @@
+In Python 3.13, the ``cgi`` module is removed entirely from the stdlib,
+including its ``parse_header()`` method. Falcon addresses the issue by shipping
+an own implementation; :func:`falcon.parse_header` can also be used in your projects
+affected by the removal.
diff --git a/docs/api/util.rst b/docs/api/util.rst
index d46f8b6f8..2c645fede 100644
--- a/docs/api/util.rst
+++ b/docs/api/util.rst
@@ -34,6 +34,11 @@ HTTP Status
.. autofunction:: falcon.code_to_http_status
.. autofunction:: falcon.get_http_status
+Media types
+-----------
+
+.. autofunction:: falcon.parse_header
+
Async
-----
diff --git a/docs/user/recipes/pretty-json.rst b/docs/user/recipes/pretty-json.rst
index b6e5e4dc0..5faf59e22 100644
--- a/docs/user/recipes/pretty-json.rst
+++ b/docs/user/recipes/pretty-json.rst
@@ -52,7 +52,6 @@ implemented with a :ref:`custom media handler <custom-media-handler-type>`:
.. code:: python
- import cgi
import json
import falcon
@@ -66,7 +65,7 @@ implemented with a :ref:`custom media handler <custom-media-handler-type>`:
return json.loads(data.decode())
def serialize(self, media, content_type):
- _, params = cgi.parse_header(content_type)
+ _, params = falcon.parse_header(content_type)
indent = params.get('indent')
if indent is not None:
try:
diff --git a/pyproject.toml b/pyproject.toml
index 5ed0c5fab..44a829feb 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -94,7 +94,6 @@ filterwarnings = [
"ignore:Using or importing the ABCs:DeprecationWarning",
"ignore:cannot collect test class 'TestClient':pytest.PytestCollectionWarning",
"ignore:inspect.getargspec\\(\\) is deprecated:DeprecationWarning",
- "ignore:.cgi. is deprecated and slated for removal:DeprecationWarning",
"ignore:path is deprecated\\. Use files\\(\\) instead:DeprecationWarning",
"ignore:This process \\(.+\\) is multi-threaded",
]
| [
{
"components": [
{
"doc": "",
"lines": [
20,
30
],
"name": "_parse_param_old_stdlib",
"signature": "def _parse_param_old_stdlib(s):",
"type": "function"
},
{
"doc": "Parse a Content-type like header.\n\nReturn the mai... | [
"tests/test_mediatypes.py::test_parse_header[-expected0]",
"tests/test_mediatypes.py::test_parse_header[strange-expected1]",
"tests/test_mediatypes.py::test_parse_header[text/plain-expected2]",
"tests/test_mediatypes.py::test_parse_header[text/plain",
"tests/test_mediatypes.py::test_parse_header[",
"tests... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat(parse_header): provide our own implementation of `parse_header()`
I have another branch which also introduces Cython's "pure Python mode" in order to speed up `parse_header()`, but we can circle back on that optimization in Falcon 4.1.
I'm also planning to reimplement the parts of `mimeparse` that we actually use, and unvendor it completely.
Fixes #2066
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in falcon/util/mediatypes.py]
(definition of _parse_param_old_stdlib:)
def _parse_param_old_stdlib(s):
(definition of _parse_header_old_stdlib:)
def _parse_header_old_stdlib(line):
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
Note:
This method has been copied (almost) verbatim from CPython 3.8 stdlib.
It is slated for removal from the stdlib in 3.13."""
(definition of parse_header:)
def parse_header(line: str) -> typing.Tuple[str, dict]:
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
Args:
line: A header value to parse.
Returns:
tuple: (the main content-type, dictionary of options).
Note:
This function replaces an equivalent method previously available in the
stdlib as ``cgi.parse_header()``.
It was removed from the stdlib in Python 3.13."""
[end of new definitions in falcon/util/mediatypes.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Replace usage of `cgi.parse_header()` (slated for removal in 3.13)
[PEP 594](https://peps.python.org/pep-0594) specifies removal of "dead batteries" in CPython 3.13, with deprecation announced in 3.11.
Apparently, [cgi](https://peps.python.org/pep-0594/#cgi) is also on the chopping block, so we'll have to replace `cgi.parse_header()`. At first glance, the suggested replacement (instantiating an `email.message.Message`) looks clunky and likely to perform worse. Maybe we can reimplement it in Cython instead?
----------
As an alternative why not just use the cgi sourcecode. It seems quite standalone?
https://github.com/python/cpython/blob/3.11/Lib/cgi.py#L238
```python
def _parseparam(s):
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
def parse_header(line):
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
parts = _parseparam(';' + line)
key = parts.__next__()
pdict = {}
for p in parts:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i+1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
```
Hi @mikael-epigram, and thanks for reaching out!
At a glance this doesn't look very performant, we'll try to write a faster version in both Python (for PyPy) and Cython. But yes, in a pinch, just vendoring this snippet would do.
I agree that it does not seem that performant, but that is what you are already using on CPython
--------------------
</issues> | 77d5e6394a88ead151c9469494749f95f06b24bf |
conan-io__conan-15914 | 15,914 | conan-io/conan | null | 59029095e86eacd402083fca7c308b06889f6301 | 2024-03-21T17:12:38Z | diff --git a/conan/tools/cmake/toolchain/blocks.py b/conan/tools/cmake/toolchain/blocks.py
index 36f725ae7c9..63181139a99 100644
--- a/conan/tools/cmake/toolchain/blocks.py
+++ b/conan/tools/cmake/toolchain/blocks.py
@@ -154,7 +154,7 @@ def context(self):
if not config_dict:
return None
-
+
vs_debugger_path = ""
for config, value in config_dict.items():
vs_debugger_path += f"$<$<CONFIG:{config}>:{value}>"
@@ -480,6 +480,9 @@ class FindFiles(Block):
{% if cmake_include_path %}
list(PREPEND CMAKE_INCLUDE_PATH {{ cmake_include_path }})
{% endif %}
+ {% if host_runtime_dirs %}
+ set(CONAN_RUNTIME_LIB_DIRS {{ host_runtime_dirs }} )
+ {% endif %}
{% if cross_building %}
if(NOT DEFINED CMAKE_FIND_ROOT_PATH_MODE_PACKAGE OR CMAKE_FIND_ROOT_PATH_MODE_PACKAGE STREQUAL "ONLY")
@@ -502,6 +505,40 @@ class FindFiles(Block):
{% endif %}
""")
+ def _runtime_dirs_value(self, dirs):
+ if is_multi_configuration(self._toolchain.generator):
+ return ' '.join(f'"$<$<CONFIG:{c}>:{i}>"' for c, v in dirs.items() for i in v)
+ else:
+ return ' '.join(f'"{item}"' for _, items in dirs.items() for item in items)
+
+ def _get_host_runtime_dirs(self, host_req):
+ settings = self._conanfile.settings
+ host_runtime_dirs = {}
+ is_win = self._conanfile.settings.get_safe("os") == "Windows"
+
+ # Get the previous configuration
+ if is_multi_configuration(self._toolchain.generator) and os.path.exists(CONAN_TOOLCHAIN_FILENAME):
+ existing_toolchain = load(CONAN_TOOLCHAIN_FILENAME)
+ pattern_lib_dirs = r"set\(CONAN_RUNTIME_LIB_DIRS ([^)]*)\)"
+ variable_match = re.search(pattern_lib_dirs, existing_toolchain)
+ if variable_match:
+ capture = variable_match.group(1)
+ matches = re.findall(r'"\$<\$<CONFIG:([A-Za-z]*)>:([^>]*)>"', capture)
+ host_runtime_dirs = {}
+ for k, v in matches:
+ host_runtime_dirs.setdefault(k, []).append(v)
+
+ # Calculate the dirs for the current build_type
+ runtime_dirs = []
+ for req in host_req:
+ cppinfo = req.cpp_info.aggregated_components()
+ runtime_dirs.extend(cppinfo.bindirs if is_win else cppinfo.libdirs)
+
+ build_type = settings.get_safe("build_type")
+ host_runtime_dirs[build_type] = [s.replace("\\", "/") for s in runtime_dirs]
+
+ return host_runtime_dirs
+
@staticmethod
def _join_paths(paths):
return " ".join(['"{}"'.format(p.replace('\\', '/')
@@ -524,6 +561,7 @@ def context(self):
host_req = self._conanfile.dependencies.filter({"build": False}).values()
build_paths = []
host_lib_paths = []
+ host_runtime_dirs = self._get_host_runtime_dirs(host_req)
host_framework_paths = []
host_include_paths = []
for req in host_req:
@@ -552,6 +590,7 @@ def context(self):
"cmake_include_path": self._join_paths(host_include_paths),
"is_apple": is_apple_,
"cross_building": cross_building(self._conanfile),
+ "host_runtime_dirs": self._runtime_dirs_value(host_runtime_dirs)
}
| diff --git a/conans/test/integration/lockfile/test_lock_requires_revisions.py b/conans/test/integration/lockfile/test_lock_requires_revisions.py
index e84680c0839..1f6e16ab7ad 100644
--- a/conans/test/integration/lockfile/test_lock_requires_revisions.py
+++ b/conans/test/integration/lockfile/test_lock_requires_revisions.py
@@ -132,20 +132,20 @@ def requirements(self):
client.run("install consumer --lockfile=consumer.lock -s os=Windows -s:b os=Windows")
assert "REV1!!!" in client.out
assert "REV2!!!" not in client.out
- assert "nix" not in client.out
+ assert "nix/0.1" not in client.out
client.run("install consumer -s os=Windows -s:b os=Windows")
assert "REV2!!!" in client.out
assert "REV1!!!" not in client.out
- assert "nix" not in client.out
+ assert "nix/0.1" not in client.out
client.run("install consumer --lockfile=consumer.lock -s os=Linux -s:b os=Linux")
assert "REV1!!!" in client.out
assert "REV2!!!" not in client.out
- assert "win" not in client.out
+ assert "win/0.1" not in client.out
client.run("install consumer -s os=Linux -s:b os=Linux")
assert "REV2!!!" in client.out
assert "REV1!!!" not in client.out
- assert "win" not in client.out
+ assert "win/0.1" not in client.out
@pytest.mark.parametrize("requires", ["requires", "tool_requires"])
diff --git a/conans/test/integration/toolchains/cmake/test_cmaketoolchain.py b/conans/test/integration/toolchains/cmake/test_cmaketoolchain.py
index ebaf4164929..9b2ecd90551 100644
--- a/conans/test/integration/toolchains/cmake/test_cmaketoolchain.py
+++ b/conans/test/integration/toolchains/cmake/test_cmaketoolchain.py
@@ -1,6 +1,7 @@
import json
import os
import platform
+import re
import textwrap
import pytest
@@ -352,6 +353,67 @@ def generate(self):
assert "/path/to/builddir" in contents
+@pytest.fixture
+def lib_dir_setup():
+ client = TestClient()
+ client.save({"conanfile.py": GenConanfile().with_generator("CMakeToolchain")})
+ client.run("create . --name=onelib --version=1.0")
+ client.run("create . --name=twolib --version=1.0")
+ conanfile = textwrap.dedent("""
+ from conan import ConanFile
+
+ class Conan(ConanFile):
+ requires = "onelib/1.0", "twolib/1.0"
+
+ """)
+ client.save({"conanfile.py": conanfile})
+ client.run("create . --name=dep --version=1.0")
+
+ conanfile = (GenConanfile().with_requires("dep/1.0").with_generator("CMakeToolchain")
+ .with_settings("os", "arch", "compiler", "build_type"))
+
+ client.save({"conanfile.py": conanfile})
+ return client
+
+def test_runtime_lib_dirs_single_conf(lib_dir_setup):
+ client = lib_dir_setup
+ generator = ""
+ is_windows = platform.system() == "Windows"
+ if is_windows:
+ generator = '-c tools.cmake.cmaketoolchain:generator=Ninja'
+
+ client.run(f'install . -s build_type=Release {generator}')
+ contents = client.load("conan_toolchain.cmake")
+ pattern_lib_path = r'list\(PREPEND CMAKE_LIBRARY_PATH (.*)\)'
+ pattern_lib_dirs = r'set\(CONAN_RUNTIME_LIB_DIRS (.*) \)'
+
+ # On *nix platforms: the list in `CMAKE_LIBRARY_PATH`
+ # is the same as `CONAN_RUNTIME_LIB_DIRS`
+ # On windows, it's the same but with `bin` instead of `lib`
+ cmake_library_path = re.search(pattern_lib_path, contents).group(1)
+ conan_runtime_lib_dirs = re.search(pattern_lib_dirs, contents).group(1)
+ lib_path = cmake_library_path.replace("/p/lib", "/p/bin") if is_windows else cmake_library_path
+
+ assert lib_path == conan_runtime_lib_dirs
+
+
+def test_runtime_lib_dirs_multiconf(lib_dir_setup):
+ client = lib_dir_setup
+ generator = ""
+ if platform.system() != "Windows":
+ generator = '-c tools.cmake.cmaketoolchain:generator="Ninja Multi-Config"'
+
+ client.run(f'install . -s build_type=Release {generator}')
+ client.run(f'install . -s build_type=Debug {generator}')
+
+ contents = client.load("conan_toolchain.cmake")
+ pattern_lib_dirs = r"set\(CONAN_RUNTIME_LIB_DIRS ([^)]*)\)"
+ runtime_lib_dirs = re.search(pattern_lib_dirs, contents).group(1)
+
+ assert "<CONFIG:Release>" in runtime_lib_dirs
+ assert "<CONFIG:Debug>" in runtime_lib_dirs
+
+
@pytest.mark.skipif(platform.system() != "Darwin", reason="Only OSX")
def test_cmaketoolchain_cmake_system_processor_cross_apple():
"""
| [
{
"components": [
{
"doc": "",
"lines": [
508,
512
],
"name": "FindFiles._runtime_dirs_value",
"signature": "def _runtime_dirs_value(self, dirs):",
"type": "function"
},
{
"doc": "",
"lines": [
514,
... | [
"conans/test/integration/toolchains/cmake/test_cmaketoolchain.py::test_runtime_lib_dirs_single_conf",
"conans/test/integration/toolchains/cmake/test_cmaketoolchain.py::test_runtime_lib_dirs_multiconf"
] | [
"conans/test/integration/lockfile/test_lock_requires_revisions.py::test_conanfile_txt_deps_revisions[requires]",
"conans/test/integration/lockfile/test_lock_requires_revisions.py::test_conanfile_txt_deps_revisions[tool_requires]",
"conans/test/integration/lockfile/test_lock_requires_revisions.py::test_conanfile... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat: Add CONAN_RUNTIME_LIB_DIRS to the conan_toolchain.cmake
Changelog: Feature: Add `CONAN_RUNTIME_LIB_DIRS` variable to the `conan_toolchain.cmake`.
Docs: https://github.com/conan-io/docs/pull/3698
Close: #15810
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conan/tools/cmake/toolchain/blocks.py]
(definition of FindFiles._runtime_dirs_value:)
def _runtime_dirs_value(self, dirs):
(definition of FindFiles._get_host_runtime_dirs:)
def _get_host_runtime_dirs(self, host_req):
[end of new definitions in conan/tools/cmake/toolchain/blocks.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
deepset-ai__haystack-7399 | 7,399 | deepset-ai/haystack | null | e779d4338498a528fb7cee9cc090b3aea70c8cb5 | 2024-03-21T14:59:28Z | diff --git a/haystack/components/evaluators/document_recall.py b/haystack/components/evaluators/document_recall.py
new file mode 100644
index 0000000000..0aaa2bd179
--- /dev/null
+++ b/haystack/components/evaluators/document_recall.py
@@ -0,0 +1,112 @@
+from enum import Enum
+from typing import Any, Dict, List, Union
+
+from haystack.core.component import component
+from haystack.dataclasses import Document
+
+
+class RecallMode(Enum):
+ """
+ Enum for the mode to use for calculating the recall score.
+ """
+
+ # Score is based on whether any document is retrieved.
+ SINGLE_HIT = "single_hit"
+ # Score is based on how many documents were retrieved.
+ MULTI_HIT = "multi_hit"
+
+ def __str__(self):
+ return self.value
+
+ @staticmethod
+ def from_str(string: str) -> "RecallMode":
+ enum_map = {e.value: e for e in RecallMode}
+ mode = enum_map.get(string)
+ if mode is None:
+ msg = f"Unknown recall mode '{string}'. Supported modes are: {list(enum_map.keys())}"
+ raise ValueError(msg)
+ return mode
+
+
+@component
+class DocumentRecallEvaluator:
+ """
+ Evaluator that calculates the Recall score for a list of questions.
+ Returns both a list of scores for each question and the average.
+ Each question can have multiple ground truth documents and multiple predicted documents.
+
+ Usage example:
+ ```python
+ from haystack.components.evaluators import DocumentRecallEvaluator
+ evaluator = DocumentRecallEvaluator()
+ result = evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_answers=[["Berlin"], ["Paris"]],
+ predicted_answers=[["Paris"], ["London"]],
+ )
+ print(result["individual_scores"])
+ # [0.0, 0.0]
+ print(result["score"])
+ # 0.0
+ ```
+ """
+
+ def __init__(self, mode: Union[str, RecallMode] = RecallMode.SINGLE_HIT):
+ """
+ Create a DocumentRecallEvaluator component.
+
+ :param mode:
+ Mode to use for calculating the recall score.
+ """
+ if isinstance(mode, str):
+ mode = RecallMode.from_str(mode)
+
+ mode_functions = {RecallMode.SINGLE_HIT: self._recall_single_hit, RecallMode.MULTI_HIT: self._recall_multi_hit}
+ self.mode_function = mode_functions[mode]
+
+ def _recall_single_hit(self, ground_truth_documents: List[Document], retrieved_documents: List[Document]) -> bool:
+ unique_truths = {g.content for g in ground_truth_documents}
+ unique_retrievals = {p.content for p in retrieved_documents}
+ retrieved_ground_truths = unique_truths.intersection(unique_retrievals)
+
+ return len(retrieved_ground_truths) > 0
+
+ def _recall_multi_hit(self, ground_truth_documents: List[Document], retrieved_documents: List[Document]) -> float:
+ unique_truths = {g.content for g in ground_truth_documents}
+ unique_retrievals = {p.content for p in retrieved_documents}
+ retrieved_ground_truths = unique_truths.intersection(unique_retrievals)
+
+ return len(retrieved_ground_truths) / len(ground_truth_documents)
+
+ @component.output_types(score=float, individual_scores=List[float])
+ def run(
+ self,
+ questions: List[str],
+ ground_truth_documents: List[List[Document]],
+ retrieved_documents: List[List[Document]],
+ ) -> Dict[str, Any]:
+ """
+ Run the DocumentRecallEvaluator on the given inputs.
+ All lists must have the same length.
+
+ :param questions:
+ A list of questions.
+ :param ground_truth_documents:
+ A list of expected documents for each question.
+ :param retrieved_documents:
+ A list of retrieved documents for each question.
+ A dictionary with the following outputs:
+ - `score` - The average of calculated scores.
+ - `invididual_scores` - A list of numbers from 0.0 to 1.0 that represents the proportion of matching documents retrieved.
+ If the mode is `single_hit`, the individual scores are True or False.
+ """
+ if not len(questions) == len(ground_truth_documents) == len(retrieved_documents):
+ msg = "The length of questions, ground_truth_documents, and predicted_documents must be the same."
+ raise ValueError(msg)
+
+ scores = []
+ for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):
+ score = self.mode_function(ground_truth, retrieved)
+ scores.append(score)
+
+ return {"score": sum(scores) / len(questions), "individual_scores": scores}
diff --git a/releasenotes/notes/recall-evaluator-5595470406e93ad2.yaml b/releasenotes/notes/recall-evaluator-5595470406e93ad2.yaml
new file mode 100644
index 0000000000..128b13e95d
--- /dev/null
+++ b/releasenotes/notes/recall-evaluator-5595470406e93ad2.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add `DocumentRecallEvaluator`, a Component that can be used to calculate the Recall single-hit or multi-hit
+ metric given a list of questions, a list of expected documents for each question and the list of predicted
+ documents for each question.
| diff --git a/test/components/evaluators/test_document_recall.py b/test/components/evaluators/test_document_recall.py
new file mode 100644
index 0000000000..d73406df07
--- /dev/null
+++ b/test/components/evaluators/test_document_recall.py
@@ -0,0 +1,192 @@
+import pytest
+
+from haystack.components.evaluators.document_recall import DocumentRecallEvaluator, RecallMode
+from haystack.dataclasses import Document
+
+
+def test_init_with_unknown_mode_string():
+ with pytest.raises(ValueError):
+ DocumentRecallEvaluator(mode="unknown_mode")
+
+
+class TestDocumentRecallEvaluatorSingleHit:
+ @pytest.fixture
+ def evaluator(self):
+ return DocumentRecallEvaluator(mode=RecallMode.SINGLE_HIT)
+
+ def test_run_with_all_matching(self, evaluator):
+ result = evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ retrieved_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ )
+
+ assert result == {"individual_scores": [1.0, 1.0], "score": 1.0}
+
+ def test_run_with_no_matching(self, evaluator):
+ result = evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ retrieved_documents=[[Document(content="Paris")], [Document(content="London")]],
+ )
+
+ assert result == {"individual_scores": [0.0, 0.0], "score": 0.0}
+
+ def test_run_with_partial_matching(self, evaluator):
+ result = evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ retrieved_documents=[[Document(content="Berlin")], [Document(content="London")]],
+ )
+
+ assert result == {"individual_scores": [1.0, 0.0], "score": 0.5}
+
+ def test_run_with_complex_data(self, evaluator):
+ result = evaluator.run(
+ questions=[
+ "In what country is Normandy located?",
+ "When was the Latin version of the word Norman first recorded?",
+ "What developed in Normandy during the 1100s?",
+ "In what century did important classical music developments occur in Normandy?",
+ "From which countries did the Norse originate?",
+ "What century did the Normans first gain their separate identity?",
+ ],
+ ground_truth_documents=[
+ [Document(content="France")],
+ [Document(content="9th century"), Document(content="9th")],
+ [Document(content="classical music"), Document(content="classical")],
+ [Document(content="11th century"), Document(content="the 11th")],
+ [Document(content="Denmark, Iceland and Norway")],
+ [Document(content="10th century"), Document(content="10th")],
+ ],
+ retrieved_documents=[
+ [Document(content="France")],
+ [Document(content="9th century"), Document(content="10th century"), Document(content="9th")],
+ [Document(content="classical"), Document(content="rock music"), Document(content="dubstep")],
+ [Document(content="11th"), Document(content="the 11th"), Document(content="11th century")],
+ [Document(content="Denmark"), Document(content="Norway"), Document(content="Iceland")],
+ [
+ Document(content="10th century"),
+ Document(content="the first half of the 10th century"),
+ Document(content="10th"),
+ Document(content="10th"),
+ ],
+ ],
+ )
+ assert result == {"individual_scores": [True, True, True, True, False, True], "score": 0.8333333333333334}
+
+ def test_run_with_different_lengths(self, evaluator):
+ with pytest.raises(ValueError):
+ evaluator.run(
+ questions=["What is the capital of Germany?"],
+ ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ retrieved_documents=[[Document(content="Berlin")], [Document(content="London")]],
+ )
+
+ with pytest.raises(ValueError):
+ evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_documents=[[Document(content="Berlin")]],
+ retrieved_documents=[[Document(content="Berlin")], [Document(content="London")]],
+ )
+
+ with pytest.raises(ValueError):
+ evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ retrieved_documents=[[Document(content="Berlin")]],
+ )
+
+
+class TestDocumentRecallEvaluatorMultiHit:
+ @pytest.fixture
+ def evaluator(self):
+ return DocumentRecallEvaluator(mode=RecallMode.MULTI_HIT)
+
+ def test_run_with_all_matching(self, evaluator):
+ result = evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ retrieved_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ )
+
+ assert result == {"individual_scores": [1.0, 1.0], "score": 1.0}
+
+ def test_run_with_no_matching(self, evaluator):
+ result = evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ retrieved_documents=[[Document(content="Paris")], [Document(content="London")]],
+ )
+
+ assert result == {"individual_scores": [0.0, 0.0], "score": 0.0}
+
+ def test_run_with_partial_matching(self, evaluator):
+ result = evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ retrieved_documents=[[Document(content="Berlin")], [Document(content="London")]],
+ )
+
+ assert result == {"individual_scores": [1.0, 0.0], "score": 0.5}
+
+ def test_run_with_complex_data(self, evaluator):
+ result = evaluator.run(
+ questions=[
+ "In what country is Normandy located?",
+ "When was the Latin version of the word Norman first recorded?",
+ "What developed in Normandy during the 1100s?",
+ "In what century did important classical music developments occur in Normandy?",
+ "From which countries did the Norse originate?",
+ "What century did the Normans first gain their separate identity?",
+ ],
+ ground_truth_documents=[
+ [Document(content="France")],
+ [Document(content="9th century"), Document(content="9th")],
+ [Document(content="classical music"), Document(content="classical")],
+ [Document(content="11th century"), Document(content="the 11th")],
+ [
+ Document(content="Denmark"),
+ Document(content="Iceland"),
+ Document(content="Norway"),
+ Document(content="Denmark, Iceland and Norway"),
+ ],
+ [Document(content="10th century"), Document(content="10th")],
+ ],
+ retrieved_documents=[
+ [Document(content="France")],
+ [Document(content="9th century"), Document(content="10th century"), Document(content="9th")],
+ [Document(content="classical"), Document(content="rock music"), Document(content="dubstep")],
+ [Document(content="11th"), Document(content="the 11th"), Document(content="11th century")],
+ [Document(content="Denmark"), Document(content="Norway"), Document(content="Iceland")],
+ [
+ Document(content="10th century"),
+ Document(content="the first half of the 10th century"),
+ Document(content="10th"),
+ Document(content="10th"),
+ ],
+ ],
+ )
+ assert result == {"individual_scores": [1.0, 1.0, 0.5, 1.0, 0.75, 1.0], "score": 0.875}
+
+ def test_run_with_different_lengths(self, evaluator):
+ with pytest.raises(ValueError):
+ evaluator.run(
+ questions=["What is the capital of Germany?"],
+ ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ retrieved_documents=[[Document(content="Berlin")], [Document(content="London")]],
+ )
+
+ with pytest.raises(ValueError):
+ evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_documents=[[Document(content="Berlin")]],
+ retrieved_documents=[[Document(content="Berlin")], [Document(content="London")]],
+ )
+
+ with pytest.raises(ValueError):
+ evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
+ retrieved_documents=[[Document(content="Berlin")]],
+ )
| diff --git a/releasenotes/notes/recall-evaluator-5595470406e93ad2.yaml b/releasenotes/notes/recall-evaluator-5595470406e93ad2.yaml
new file mode 100644
index 0000000000..128b13e95d
--- /dev/null
+++ b/releasenotes/notes/recall-evaluator-5595470406e93ad2.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add `DocumentRecallEvaluator`, a Component that can be used to calculate the Recall single-hit or multi-hit
+ metric given a list of questions, a list of expected documents for each question and the list of predicted
+ documents for each question.
| [
{
"components": [
{
"doc": "Enum for the mode to use for calculating the recall score.",
"lines": [
8,
28
],
"name": "RecallMode",
"signature": "class RecallMode(Enum):",
"type": "class"
},
{
"doc": "",
"lines"... | [
"test/components/evaluators/test_document_recall.py::test_init_with_unknown_mode_string",
"test/components/evaluators/test_document_recall.py::TestDocumentRecallEvaluatorSingleHit::test_run_with_all_matching",
"test/components/evaluators/test_document_recall.py::TestDocumentRecallEvaluatorSingleHit::test_run_wi... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat: Add `DocumentRecallEvaluator`
### Related Issues
- fixes #6064
### Proposed Changes:
Add `DocumentRecallEvaluator` Component. It can ben used to calculate Recall single-hit or multi-hit metric.
### How did you test it?
I added unit tests.
### Notes for the reviewer
I didn't add the component in the package `__init__.py` on purpose to avoid conflicts with future PRs.
When all the evaluators are done I'll update it.
### Checklist
- I have read the [contributors guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md) and the [code of conduct](https://github.com/deepset-ai/haystack/blob/main/code_of_conduct.txt)
- I have updated the related issue with new insights and changes
- I added unit tests and updated the docstrings
- I've used one of the [conventional commit types](https://www.conventionalcommits.org/en/v1.0.0/) for my PR title: `fix:`, `feat:`, `build:`, `chore:`, `ci:`, `docs:`, `style:`, `refactor:`, `perf:`, `test:`.
- I documented my code
- I ran [pre-commit hooks](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md#installation) and fixed any issue
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/components/evaluators/document_recall.py]
(definition of RecallMode:)
class RecallMode(Enum):
"""Enum for the mode to use for calculating the recall score."""
(definition of RecallMode.__str__:)
def __str__(self):
(definition of RecallMode.from_str:)
def from_str(string: str) -> "RecallMode":
(definition of DocumentRecallEvaluator:)
class DocumentRecallEvaluator:
"""Evaluator that calculates the Recall score for a list of questions.
Returns both a list of scores for each question and the average.
Each question can have multiple ground truth documents and multiple predicted documents.
Usage example:
```python
from haystack.components.evaluators import DocumentRecallEvaluator
evaluator = DocumentRecallEvaluator()
result = evaluator.run(
questions=["What is the capital of Germany?", "What is the capital of France?"],
ground_truth_answers=[["Berlin"], ["Paris"]],
predicted_answers=[["Paris"], ["London"]],
)
print(result["individual_scores"])
# [0.0, 0.0]
print(result["score"])
# 0.0
```"""
(definition of DocumentRecallEvaluator.__init__:)
def __init__(self, mode: Union[str, RecallMode] = RecallMode.SINGLE_HIT):
"""Create a DocumentRecallEvaluator component.
:param mode:
Mode to use for calculating the recall score."""
(definition of DocumentRecallEvaluator._recall_single_hit:)
def _recall_single_hit(self, ground_truth_documents: List[Document], retrieved_documents: List[Document]) -> bool:
(definition of DocumentRecallEvaluator._recall_multi_hit:)
def _recall_multi_hit(self, ground_truth_documents: List[Document], retrieved_documents: List[Document]) -> float:
(definition of DocumentRecallEvaluator.run:)
def run( self, questions: List[str], ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]], ) -> Dict[str, Any]:
"""Run the DocumentRecallEvaluator on the given inputs.
All lists must have the same length.
:param questions:
A list of questions.
:param ground_truth_documents:
A list of expected documents for each question.
:param retrieved_documents:
A list of retrieved documents for each question.
A dictionary with the following outputs:
- `score` - The average of calculated scores.
- `invididual_scores` - A list of numbers from 0.0 to 1.0 that represents the proportion of matching documents retrieved.
If the mode is `single_hit`, the individual scores are True or False."""
[end of new definitions in haystack/components/evaluators/document_recall.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Implement function to calculate Recall metric
As specified in proposal #5794 we need to implement a function to calculate the Recall metric.
Ideally the function should be part of the private interface and called only through the `calculate_metrics` function (see #6063). `_calculate_recall()` could be a nice name.
For more detailed information check out the original proposal.
----------
--------------------
</issues> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 |
tobymao__sqlglot-3180 | 3,180 | tobymao/sqlglot | null | a452276da4daaa436a9ac95566bcbb2954d149e3 | 2024-03-20T18:02:02Z | diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py
index 9837025ace..470fd7c314 100644
--- a/sqlglot/dialects/duckdb.py
+++ b/sqlglot/dialects/duckdb.py
@@ -38,10 +38,12 @@ def _ts_or_ds_add_sql(self: DuckDB.Generator, expression: exp.TsOrDsAdd) -> str:
return f"CAST({this} AS {self.sql(expression.return_type)}) + {interval}"
-def _date_delta_sql(self: DuckDB.Generator, expression: exp.DateAdd | exp.DateSub) -> str:
+def _date_delta_sql(
+ self: DuckDB.Generator, expression: exp.DateAdd | exp.DateSub | exp.TimeAdd
+) -> str:
this = self.sql(expression, "this")
unit = self.sql(expression, "unit").strip("'") or "DAY"
- op = "+" if isinstance(expression, exp.DateAdd) else "-"
+ op = "+" if isinstance(expression, (exp.DateAdd, exp.TimeAdd)) else "-"
return f"{this} {op} {self.sql(exp.Interval(this=expression.expression, unit=unit))}"
@@ -427,6 +429,7 @@ class Generator(generator.Generator):
"EPOCH", self.func("STRPTIME", e.this, self.format_time(e))
),
exp.Struct: _struct_sql,
+ exp.TimeAdd: _date_delta_sql,
exp.Timestamp: no_timestamp_sql,
exp.TimestampDiff: lambda self, e: self.func(
"DATE_DIFF", exp.Literal.string(e.unit), e.expression, e.this
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index 0ffe4cf0a3..c13c15ab20 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -78,6 +78,17 @@ def _build_datediff(args: t.List) -> exp.DateDiff:
)
+def _build_date_time_add(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
+ def _builder(args: t.List) -> E:
+ return expr_type(
+ this=seq_get(args, 2),
+ expression=seq_get(args, 1),
+ unit=_map_date_part(seq_get(args, 0)),
+ )
+
+ return _builder
+
+
# https://docs.snowflake.com/en/sql-reference/functions/div0
def _build_if_from_div0(args: t.List) -> exp.If:
cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0))
@@ -345,11 +356,7 @@ class Parser(parser.Parser):
"CONVERT_TIMEZONE": _build_convert_timezone,
"DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
"DATE_TRUNC": _date_trunc_to_time,
- "DATEADD": lambda args: exp.DateAdd(
- this=seq_get(args, 2),
- expression=seq_get(args, 1),
- unit=_map_date_part(seq_get(args, 0)),
- ),
+ "DATEADD": _build_date_time_add(exp.DateAdd),
"DATEDIFF": _build_datediff,
"DIV0": _build_if_from_div0,
"FLATTEN": exp.Explode.from_arg_list,
@@ -367,7 +374,9 @@ class Parser(parser.Parser):
"REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
"RLIKE": exp.RegexpLike.from_arg_list,
"SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
+ "TIMEADD": _build_date_time_add(exp.TimeAdd),
"TIMEDIFF": _build_datediff,
+ "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
"TIMESTAMPDIFF": _build_datediff,
"TIMESTAMPFROMPARTS": _build_timestamp_from_parts,
"TIMESTAMP_FROM_PARTS": _build_timestamp_from_parts,
@@ -792,6 +801,7 @@ class Generator(generator.Generator):
),
exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
exp.Stuff: rename_func("INSERT"),
+ exp.TimeAdd: date_delta_sql("TIMEADD"),
exp.TimestampDiff: lambda self, e: self.func(
"TIMESTAMPDIFF", e.unit, e.expression, e.this
),
| diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py
index 917230b22f..5cfcfea2ce 100644
--- a/tests/dialects/test_duckdb.py
+++ b/tests/dialects/test_duckdb.py
@@ -15,6 +15,17 @@ def test_duckdb(self):
"WITH _data AS (SELECT [STRUCT(1 AS a, 2 AS b), STRUCT(2 AS a, 3 AS b)] AS col) SELECT col.b FROM _data, UNNEST(_data.col) AS col WHERE col.a = 1",
)
+ self.validate_all(
+ "SELECT CAST('09:05:03' AS TIME) + INTERVAL 2 HOUR",
+ read={
+ "bigquery": "SELECT TIME_ADD(CAST('09:05:03' AS TIME), INTERVAL 2 HOUR)",
+ "snowflake": "SELECT TIMEADD(HOUR, 2, TO_TIME('09:05:03'))",
+ },
+ write={
+ "duckdb": "SELECT CAST('09:05:03' AS TIME) + INTERVAL '2' HOUR",
+ "snowflake": "SELECT CAST('09:05:03' AS TIME) + INTERVAL '2 HOUR'",
+ },
+ )
self.validate_all(
'STRUCT_PACK("a b" := 1)',
write={
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index 00e6169ac0..4d7d97c5a5 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -40,6 +40,7 @@ def test_snowflake(self):
)""",
)
+ self.validate_identity("SELECT TIMEADD(HOUR, 2, CAST('09:05:03' AS TIME))")
self.validate_identity("SELECT CAST(OBJECT_CONSTRUCT('a', 1) AS MAP(VARCHAR, INT))")
self.validate_identity("SELECT CAST(OBJECT_CONSTRUCT('a', 1) AS OBJECT(a CHAR NOT NULL))")
self.validate_identity("SELECT CAST([1, 2, 3] AS ARRAY(INT))")
@@ -956,6 +957,9 @@ def test_timestamps(self):
)
self.validate_all(
"DATEADD(DAY, 5, CAST('2008-12-25' AS DATE))",
+ read={
+ "snowflake": "TIMESTAMPADD(DAY, 5, CAST('2008-12-25' AS DATE))",
+ },
write={
"bigquery": "DATE_ADD(CAST('2008-12-25' AS DATE), INTERVAL 5 DAY)",
"snowflake": "DATEADD(DAY, 5, CAST('2008-12-25' AS DATE))",
| [] | [
"tests/dialects/test_duckdb.py::TestDuckDB::test_duckdb",
"tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps"
] | [
"tests/dialects/test_duckdb.py::TestDuckDB::test_array",
"tests/dialects/test_duckdb.py::TestDuckDB::test_array_index",
"tests/dialects/test_duckdb.py::TestDuckDB::test_bool_or",
"tests/dialects/test_duckdb.py::TestDuckDB::test_cast",
"tests/dialects/test_duckdb.py::TestDuckDB::test_encode_decode",
"tests... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Feat: transpile Snowflake's ADDTIME
References:
- https://duckdb.org/docs/sql/functions/time
- https://docs.snowflake.com/en/sql-reference/functions/timeadd
- https://cloud.google.com/bigquery/docs/reference/standard-sql/time_functions#time_add
- https://docs.snowflake.com/en/sql-reference/functions/timestampadd
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | ceb42fabad60312699e4b15936aeebac00e22e4d | ||
boto__botocore-3144 | 3,144 | boto/botocore | null | 0d6ed4aabb04edbab3e9552fc3651456b9e960a5 | 2024-03-19T17:09:43Z | diff --git a/botocore/client.py b/botocore/client.py
index 85b6045036..1e36232834 100644
--- a/botocore/client.py
+++ b/botocore/client.py
@@ -400,6 +400,9 @@ def _register_s3_events(
self._set_s3_presign_signature_version(
client.meta, client_config, scoped_config
)
+ client.meta.events.register(
+ 'before-parameter-build.s3', self._inject_s3_input_parameters
+ )
def _register_s3_control_events(
self,
@@ -456,6 +459,15 @@ def _set_s3_presign_signature_version(
'choose-signer.s3', self._default_s3_presign_to_sigv2
)
+ def _inject_s3_input_parameters(self, params, context, **kwargs):
+ context['input_params'] = {}
+ inject_parameters = ('Bucket', 'Delete', 'Key', 'Prefix')
+ for inject_parameter in inject_parameters:
+ if inject_parameter in params:
+ context['input_params'][inject_parameter] = params[
+ inject_parameter
+ ]
+
def _default_s3_presign_to_sigv2(self, signature_version, **kwargs):
"""
Returns the 's3' (sigv2) signer if presigning an s3 request. This is
diff --git a/botocore/utils.py b/botocore/utils.py
index 923217ead9..e2c5c17a00 100644
--- a/botocore/utils.py
+++ b/botocore/utils.py
@@ -1668,22 +1668,15 @@ def __init__(self, client, credential_cls, cache=None):
def register(self, event_emitter=None):
logger.debug('Registering S3Express Identity Resolver')
emitter = event_emitter or self._client.meta.events
- emitter.register(
- 'before-parameter-build.s3', self.inject_signing_cache_key
- )
emitter.register('before-call.s3', self.apply_signing_cache_key)
emitter.register('before-sign.s3', self.resolve_s3express_identity)
- def inject_signing_cache_key(self, params, context, **kwargs):
- if 'Bucket' in params:
- context['S3Express'] = {'bucket_name': params['Bucket']}
-
def apply_signing_cache_key(self, params, context, **kwargs):
endpoint_properties = context.get('endpoint_properties', {})
backend = endpoint_properties.get('backend', None)
# Add cache key if Bucket supplied for s3express request
- bucket_name = context.get('S3Express', {}).get('bucket_name')
+ bucket_name = context.get('input_params', {}).get('Bucket')
if backend == 'S3Express' and bucket_name is not None:
context.setdefault('signing', {})
context['signing']['cache_key'] = bucket_name
| diff --git a/tests/functional/test_s3.py b/tests/functional/test_s3.py
index 0a081532a1..e4f99ac97f 100644
--- a/tests/functional/test_s3.py
+++ b/tests/functional/test_s3.py
@@ -3664,3 +3664,25 @@ def test_does_not_set_100_continute_with_empty_body(self):
s3.put_object(**op_kwargs)
expect_header = http_stubber.requests[-1].headers.get("Expect")
self.assertIsNone(expect_header)
+
+
+class TestParameterInjection(BaseS3OperationTest):
+ BUCKET = "foo"
+ KEY = "bar"
+
+ def test_parameter_injection(self):
+ self.http_stubber.add_response()
+ self.client.meta.events.register(
+ 'before-sign.s3', self._verify_bucket_and_key_in_context
+ )
+ with self.http_stubber:
+ self.client.put_object(
+ Bucket=self.BUCKET,
+ Key=self.KEY,
+ )
+
+ def _verify_bucket_and_key_in_context(self, request, **kwargs):
+ self.assertEqual(
+ request.context['input_params']['Bucket'], self.BUCKET
+ )
+ self.assertEqual(request.context['input_params']['Key'], self.KEY)
| [
{
"components": [
{
"doc": "",
"lines": [
462,
468
],
"name": "ClientCreator._inject_s3_input_parameters",
"signature": "def _inject_s3_input_parameters(self, params, context, **kwargs):",
"type": "function"
}
],
"file": "bo... | [
"tests/functional/test_s3.py::TestParameterInjection::test_parameter_injection"
] | [
"tests/functional/test_s3.py::TestS3BucketValidation::test_invalid_bucket_name_raises_error",
"tests/functional/test_s3.py::TestS3ClientConfigResolution::test_client_config_us_east_1_regional_overrides_config_var",
"tests/functional/test_s3.py::TestS3ClientConfigResolution::test_client_config_us_east_1_regional... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Make bucket name injection for S3Express more generic
Abstracts the s3 Express bucket name from the context to be generically usable as well as adding additional keys to be stored
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in botocore/client.py]
(definition of ClientCreator._inject_s3_input_parameters:)
def _inject_s3_input_parameters(self, params, context, **kwargs):
[end of new definitions in botocore/client.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 5e4b564dd0f9aab16a404251ebd3e675c9681492 | ||
deepset-ai__haystack-7382 | 7,382 | deepset-ai/haystack | null | f69c3e5cd26046b826927a39cad02af93b2ccbbf | 2024-03-19T14:39:52Z | diff --git a/docs/pydoc/config/data_classess_api.yml b/docs/pydoc/config/data_classess_api.yml
index 3d807eb61e..a67f28db9d 100644
--- a/docs/pydoc/config/data_classess_api.yml
+++ b/docs/pydoc/config/data_classess_api.yml
@@ -2,7 +2,7 @@ loaders:
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
search_path: [../../../haystack/dataclasses]
modules:
- ["answer", "byte_stream", "chat_message", "document", "streaming_chunk"]
+ ["answer", "byte_stream", "chat_message", "document", "streaming_chunk", "sparse_embedding"]
ignore_when_discovered: ["__init__"]
processors:
- type: filter
diff --git a/haystack/dataclasses/__init__.py b/haystack/dataclasses/__init__.py
index 695f8c47fd..e665d0c9ef 100644
--- a/haystack/dataclasses/__init__.py
+++ b/haystack/dataclasses/__init__.py
@@ -2,6 +2,7 @@
from haystack.dataclasses.byte_stream import ByteStream
from haystack.dataclasses.chat_message import ChatMessage, ChatRole
from haystack.dataclasses.document import Document
+from haystack.dataclasses.sparse_embedding import SparseEmbedding
from haystack.dataclasses.streaming_chunk import StreamingChunk
__all__ = [
@@ -13,4 +14,5 @@
"ChatMessage",
"ChatRole",
"StreamingChunk",
+ "SparseEmbedding",
]
diff --git a/haystack/dataclasses/document.py b/haystack/dataclasses/document.py
index bfdf69dde1..ddf762bf45 100644
--- a/haystack/dataclasses/document.py
+++ b/haystack/dataclasses/document.py
@@ -8,6 +8,7 @@
from haystack import logging
from haystack.dataclasses.byte_stream import ByteStream
+from haystack.dataclasses.sparse_embedding import SparseEmbedding
logger = logging.getLogger(__name__)
@@ -57,7 +58,8 @@ class Document(metaclass=_BackwardCompatible):
:param blob: Binary data associated with the document, if the document has any binary data associated with it.
:param meta: Additional custom metadata for the document. Must be JSON-serializable.
:param score: Score of the document. Used for ranking, usually assigned by retrievers.
- :param embedding: Vector representation of the document.
+ :param embedding: dense vector representation of the document.
+ :param sparse_embedding: sparse vector representation of the document.
"""
id: str = field(default="")
@@ -67,6 +69,7 @@ class Document(metaclass=_BackwardCompatible):
meta: Dict[str, Any] = field(default_factory=dict)
score: Optional[float] = field(default=None)
embedding: Optional[List[float]] = field(default=None)
+ sparse_embedding: Optional[SparseEmbedding] = field(default=None)
def __repr__(self):
fields = []
@@ -84,6 +87,8 @@ def __repr__(self):
fields.append(f"score: {self.score}")
if self.embedding is not None:
fields.append(f"embedding: vector of size {len(self.embedding)}")
+ if self.sparse_embedding is not None:
+ fields.append(f"sparse_embedding: vector with {len(self.sparse_embedding.indices)} non-zero elements")
fields_str = ", ".join(fields)
return f"{self.__class__.__name__}(id={self.id}, {fields_str})"
@@ -114,7 +119,8 @@ def _create_id(self):
mime_type = self.blob.mime_type if self.blob is not None else None
meta = self.meta or {}
embedding = self.embedding if self.embedding is not None else None
- data = f"{text}{dataframe}{blob}{mime_type}{meta}{embedding}"
+ sparse_embedding = self.sparse_embedding.to_dict() if self.sparse_embedding is not None else ""
+ data = f"{text}{dataframe}{blob}{mime_type}{meta}{embedding}{sparse_embedding}"
return hashlib.sha256(data.encode("utf-8")).hexdigest()
def to_dict(self, flatten=True) -> Dict[str, Any]:
@@ -132,6 +138,9 @@ def to_dict(self, flatten=True) -> Dict[str, Any]:
if (blob := data.get("blob")) is not None:
data["blob"] = {"data": list(blob["data"]), "mime_type": blob["mime_type"]}
+ if (sparse_embedding := data.get("sparse_embedding")) is not None:
+ data["sparse_embedding"] = sparse_embedding.to_dict()
+
if flatten:
meta = data.pop("meta")
return {**data, **meta}
@@ -149,6 +158,9 @@ def from_dict(cls, data: Dict[str, Any]) -> "Document":
data["dataframe"] = read_json(io.StringIO(dataframe))
if blob := data.get("blob"):
data["blob"] = ByteStream(data=bytes(blob["data"]), mime_type=blob["mime_type"])
+ if sparse_embedding := data.get("sparse_embedding"):
+ data["sparse_embedding"] = SparseEmbedding.from_dict(sparse_embedding)
+
# Store metadata for a moment while we try un-flattening allegedly flatten metadata.
# We don't expect both a `meta=` keyword and flatten metadata keys so we'll raise a
# ValueError later if this is the case.
diff --git a/haystack/dataclasses/sparse_embedding.py b/haystack/dataclasses/sparse_embedding.py
new file mode 100644
index 0000000000..40b59369dd
--- /dev/null
+++ b/haystack/dataclasses/sparse_embedding.py
@@ -0,0 +1,26 @@
+from typing import List
+
+
+class SparseEmbedding:
+ """
+ Class representing a sparse embedding.
+ """
+
+ def __init__(self, indices: List[int], values: List[float]):
+ """
+ :param indices: List of indices of non-zero elements in the embedding.
+ :param values: List of values of non-zero elements in the embedding.
+
+ :raises ValueError: If the indices and values lists are not of the same length.
+ """
+ if len(indices) != len(values):
+ raise ValueError("Length of indices and values must be the same.")
+ self.indices = indices
+ self.values = values
+
+ def to_dict(self):
+ return {"indices": self.indices, "values": self.values}
+
+ @classmethod
+ def from_dict(cls, sparse_embedding_dict):
+ return cls(indices=sparse_embedding_dict["indices"], values=sparse_embedding_dict["values"])
diff --git a/releasenotes/notes/sparse-embedding-fd55b670437492be.yaml b/releasenotes/notes/sparse-embedding-fd55b670437492be.yaml
new file mode 100644
index 0000000000..455647b550
--- /dev/null
+++ b/releasenotes/notes/sparse-embedding-fd55b670437492be.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Introduce a new `SparseEmbedding` class which can be used to store a sparse
+ vector representation of a Document.
+ It will be instrumental to support Sparse Embedding Retrieval with
+ the subsequent introduction of Sparse Embedders and Sparse Embedding Retrievers.
| diff --git a/test/dataclasses/test_document.py b/test/dataclasses/test_document.py
index 72e45a4b6b..afe2327179 100644
--- a/test/dataclasses/test_document.py
+++ b/test/dataclasses/test_document.py
@@ -3,6 +3,7 @@
from haystack import Document
from haystack.dataclasses.byte_stream import ByteStream
+from haystack.dataclasses.sparse_embedding import SparseEmbedding
@pytest.mark.parametrize(
@@ -37,6 +38,7 @@ def test_init():
assert doc.meta == {}
assert doc.score == None
assert doc.embedding == None
+ assert doc.sparse_embedding == None
def test_init_with_wrong_parameters():
@@ -46,6 +48,7 @@ def test_init_with_wrong_parameters():
def test_init_with_parameters():
blob_data = b"some bytes"
+ sparse_embedding = SparseEmbedding(indices=[0, 2, 4], values=[0.1, 0.2, 0.3])
doc = Document(
content="test text",
dataframe=pd.DataFrame([0]),
@@ -53,8 +56,9 @@ def test_init_with_parameters():
meta={"text": "test text"},
score=0.812,
embedding=[0.1, 0.2, 0.3],
+ sparse_embedding=sparse_embedding,
)
- assert doc.id == "ec92455f3f4576d40031163c89b1b4210b34ea1426ee0ff68ebed86cb7ba13f8"
+ assert doc.id == "967b7bd4a21861ad9e863f638cefcbdd6bf6306bebdd30aa3fedf0c26bc636ed"
assert doc.content == "test text"
assert doc.dataframe is not None
assert doc.dataframe.equals(pd.DataFrame([0]))
@@ -63,6 +67,7 @@ def test_init_with_parameters():
assert doc.meta == {"text": "test text"}
assert doc.score == 0.812
assert doc.embedding == [0.1, 0.2, 0.3]
+ assert doc.sparse_embedding == sparse_embedding
def test_init_with_legacy_fields():
@@ -76,6 +81,7 @@ def test_init_with_legacy_fields():
assert doc.meta == {}
assert doc.score == 0.812
assert doc.embedding == [0.1, 0.2, 0.3]
+ assert doc.sparse_embedding == None
def test_init_with_legacy_field():
@@ -93,6 +99,7 @@ def test_init_with_legacy_field():
assert doc.meta == {"date": "10-10-2023", "type": "article"}
assert doc.score == 0.812
assert doc.embedding == [0.1, 0.2, 0.3]
+ assert doc.sparse_embedding == None
def test_basic_equality_type_mismatch():
@@ -121,6 +128,7 @@ def test_to_dict():
"blob": None,
"score": None,
"embedding": None,
+ "sparse_embedding": None,
}
@@ -134,6 +142,7 @@ def test_to_dict_without_flattening():
"meta": {},
"score": None,
"embedding": None,
+ "sparse_embedding": None,
}
@@ -145,6 +154,7 @@ def test_to_dict_with_custom_parameters():
meta={"some": "values", "test": 10},
score=0.99,
embedding=[10.0, 10.0],
+ sparse_embedding=SparseEmbedding(indices=[0, 2, 4], values=[0.1, 0.2, 0.3]),
)
assert doc.to_dict() == {
@@ -156,6 +166,7 @@ def test_to_dict_with_custom_parameters():
"test": 10,
"score": 0.99,
"embedding": [10.0, 10.0],
+ "sparse_embedding": {"indices": [0, 2, 4], "values": [0.1, 0.2, 0.3]},
}
@@ -167,6 +178,7 @@ def test_to_dict_with_custom_parameters_without_flattening():
meta={"some": "values", "test": 10},
score=0.99,
embedding=[10.0, 10.0],
+ sparse_embedding=SparseEmbedding(indices=[0, 2, 4], values=[0.1, 0.2, 0.3]),
)
assert doc.to_dict(flatten=False) == {
@@ -177,6 +189,7 @@ def test_to_dict_with_custom_parameters_without_flattening():
"meta": {"some": "values", "test": 10},
"score": 0.99,
"embedding": [10, 10],
+ "sparse_embedding": {"indices": [0, 2, 4], "values": [0.1, 0.2, 0.3]},
}
@@ -194,6 +207,7 @@ def from_from_dict_with_parameters():
"meta": {"text": "test text"},
"score": 0.812,
"embedding": [0.1, 0.2, 0.3],
+ "sparse_embedding": {"indices": [0, 2, 4], "values": [0.1, 0.2, 0.3]},
}
) == Document(
content="test text",
@@ -202,6 +216,7 @@ def from_from_dict_with_parameters():
meta={"text": "test text"},
score=0.812,
embedding=[0.1, 0.2, 0.3],
+ sparse_embedding=SparseEmbedding(indices=[0, 2, 4], values=[0.1, 0.2, 0.3]),
)
@@ -249,6 +264,7 @@ def test_from_dict_with_flat_meta():
"blob": {"data": list(blob_data), "mime_type": "text/markdown"},
"score": 0.812,
"embedding": [0.1, 0.2, 0.3],
+ "sparse_embedding": {"indices": [0, 2, 4], "values": [0.1, 0.2, 0.3]},
"date": "10-10-2023",
"type": "article",
}
@@ -258,6 +274,7 @@ def test_from_dict_with_flat_meta():
blob=ByteStream(blob_data, mime_type="text/markdown"),
score=0.812,
embedding=[0.1, 0.2, 0.3],
+ sparse_embedding=SparseEmbedding(indices=[0, 2, 4], values=[0.1, 0.2, 0.3]),
meta={"date": "10-10-2023", "type": "article"},
)
diff --git a/test/dataclasses/test_sparse_embedding.py b/test/dataclasses/test_sparse_embedding.py
new file mode 100644
index 0000000000..f3fc889aa5
--- /dev/null
+++ b/test/dataclasses/test_sparse_embedding.py
@@ -0,0 +1,23 @@
+import pytest
+
+from haystack.dataclasses.sparse_embedding import SparseEmbedding
+
+
+class TestSparseEmbedding:
+ def test_init(self):
+ se = SparseEmbedding(indices=[0, 2, 4], values=[0.1, 0.2, 0.3])
+ assert se.indices == [0, 2, 4]
+ assert se.values == [0.1, 0.2, 0.3]
+
+ def test_init_with_wrong_parameters(self):
+ with pytest.raises(ValueError):
+ SparseEmbedding(indices=[0, 2], values=[0.1, 0.2, 0.3, 0.4])
+
+ def test_to_dict(self):
+ se = SparseEmbedding(indices=[0, 2, 4], values=[0.1, 0.2, 0.3])
+ assert se.to_dict() == {"indices": [0, 2, 4], "values": [0.1, 0.2, 0.3]}
+
+ def test_from_dict(self):
+ se = SparseEmbedding.from_dict({"indices": [0, 2, 4], "values": [0.1, 0.2, 0.3]})
+ assert se.indices == [0, 2, 4]
+ assert se.values == [0.1, 0.2, 0.3]
diff --git a/test/tracing/test_utils.py b/test/tracing/test_utils.py
index a28cb6c6d3..a8f0cb9409 100644
--- a/test/tracing/test_utils.py
+++ b/test/tracing/test_utils.py
@@ -25,15 +25,15 @@ class TestTypeCoercion:
(NonSerializableClass(), "NonSerializableClass"),
(
Document(id="1", content="text"),
- '{"id": "1", "content": "text", "dataframe": null, "blob": null, "score": null, "embedding": null}',
+ '{"id": "1", "content": "text", "dataframe": null, "blob": null, "score": null, "embedding": null, "sparse_embedding": null}',
),
(
[Document(id="1", content="text")],
- '[{"id": "1", "content": "text", "dataframe": null, "blob": null, "score": null, "embedding": null}]',
+ '[{"id": "1", "content": "text", "dataframe": null, "blob": null, "score": null, "embedding": null, "sparse_embedding": null}]',
),
(
{"key": Document(id="1", content="text")},
- '{"key": {"id": "1", "content": "text", "dataframe": null, "blob": null, "score": null, "embedding": null}}',
+ '{"key": {"id": "1", "content": "text", "dataframe": null, "blob": null, "score": null, "embedding": null, "sparse_embedding": null}}',
),
],
)
| diff --git a/docs/pydoc/config/data_classess_api.yml b/docs/pydoc/config/data_classess_api.yml
index 3d807eb61e..a67f28db9d 100644
--- a/docs/pydoc/config/data_classess_api.yml
+++ b/docs/pydoc/config/data_classess_api.yml
@@ -2,7 +2,7 @@ loaders:
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
search_path: [../../../haystack/dataclasses]
modules:
- ["answer", "byte_stream", "chat_message", "document", "streaming_chunk"]
+ ["answer", "byte_stream", "chat_message", "document", "streaming_chunk", "sparse_embedding"]
ignore_when_discovered: ["__init__"]
processors:
- type: filter
diff --git a/releasenotes/notes/sparse-embedding-fd55b670437492be.yaml b/releasenotes/notes/sparse-embedding-fd55b670437492be.yaml
new file mode 100644
index 0000000000..455647b550
--- /dev/null
+++ b/releasenotes/notes/sparse-embedding-fd55b670437492be.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Introduce a new `SparseEmbedding` class which can be used to store a sparse
+ vector representation of a Document.
+ It will be instrumental to support Sparse Embedding Retrieval with
+ the subsequent introduction of Sparse Embedders and Sparse Embedding Retrievers.
| [
{
"components": [
{
"doc": "Class representing a sparse embedding.",
"lines": [
4,
26
],
"name": "SparseEmbedding",
"signature": "class SparseEmbedding:",
"type": "class"
},
{
"doc": ":param indices: List of indices of... | [
"test/dataclasses/test_document.py::test_document_str[doc0-content:",
"test/dataclasses/test_document.py::test_document_str[doc1-dataframe:",
"test/dataclasses/test_document.py::test_document_str[doc2-blob:",
"test/dataclasses/test_document.py::test_document_str[doc3-content:",
"test/dataclasses/test_docume... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat: introduce `SparseEmbedding`
### Related Issues
- part of #7355
### Proposed Changes:
Introduce a new class to store sparse embeddings.
Contains two fields: `indices` and `values`, which must have the same length.
### How did you test it?
New unit tests, change some existing tests, CI.
### Checklist
- I have read the [contributors guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md) and the [code of conduct](https://github.com/deepset-ai/haystack/blob/main/code_of_conduct.txt)
- I have updated the related issue with new insights and changes
- I added unit tests and updated the docstrings
- I've used one of the [conventional commit types](https://www.conventionalcommits.org/en/v1.0.0/) for my PR title: `fix:`, `feat:`, `build:`, `chore:`, `ci:`, `docs:`, `style:`, `refactor:`, `perf:`, `test:`.
- I documented my code
- I ran [pre-commit hooks](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md#installation) and fixed any issue
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/dataclasses/sparse_embedding.py]
(definition of SparseEmbedding:)
class SparseEmbedding:
"""Class representing a sparse embedding."""
(definition of SparseEmbedding.__init__:)
def __init__(self, indices: List[int], values: List[float]):
""":param indices: List of indices of non-zero elements in the embedding.
:param values: List of values of non-zero elements in the embedding.
:raises ValueError: If the indices and values lists are not of the same length."""
(definition of SparseEmbedding.to_dict:)
def to_dict(self):
(definition of SparseEmbedding.from_dict:)
def from_dict(cls, sparse_embedding_dict):
[end of new definitions in haystack/dataclasses/sparse_embedding.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 | |
deepset-ai__haystack-7381 | 7,381 | deepset-ai/haystack | null | f69c3e5cd26046b826927a39cad02af93b2ccbbf | 2024-03-19T14:33:17Z | diff --git a/haystack/components/evaluators/__init__.py b/haystack/components/evaluators/__init__.py
new file mode 100644
index 0000000000..9550a5f42d
--- /dev/null
+++ b/haystack/components/evaluators/__init__.py
@@ -0,0 +1,3 @@
+from .answer_exact_match import AnswerExactMatchEvaluator
+
+__all__ = ["AnswerExactMatchEvaluator"]
diff --git a/haystack/components/evaluators/answer_exact_match.py b/haystack/components/evaluators/answer_exact_match.py
new file mode 100644
index 0000000000..4927f4e142
--- /dev/null
+++ b/haystack/components/evaluators/answer_exact_match.py
@@ -0,0 +1,59 @@
+from typing import Dict, List
+
+from haystack.core.component import component
+
+
+@component
+class AnswerExactMatchEvaluator:
+ """
+ Evaluator that checks if the predicted answers matches any of the ground truth answers exactly.
+ The result is a number from 0.0 to 1.0, it represents the proportion of questions where any predicted answer
+ matched one of the ground truth answers.
+ Each question can have multiple ground truth answers and multiple predicted answers.
+
+ Usage example:
+ ```python
+ from haystack.components.evaluators import AnswerExactMatchEvaluator
+
+ evaluator = AnswerExactMatchEvaluator()
+ result = evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_answers=[["Berlin"], ["Paris"]],
+ predicted_answers=[["Berlin"], ["Paris"]],
+ )
+ print(result["result"])
+ # 1.0
+ ```
+ """
+
+ @component.output_types(result=float)
+ def run(
+ self, questions: List[str], ground_truth_answers: List[List[str]], predicted_answers: List[List[str]]
+ ) -> Dict[str, float]:
+ """
+ Run the AnswerExactMatchEvaluator on the given inputs.
+ All lists must have the same length.
+
+ :param questions:
+ A list of questions.
+ :param ground_truth_answers:
+ A list of expected answers for each question.
+ :param predicted_answers:
+ A list of predicted answers for each question.
+ :returns:
+ A dictionary with the following outputs:
+ - `result` - A number from 0.0 to 1.0 that represents the proportion of questions where any predicted
+ answer matched one of the ground truth answers.
+ """
+ if not len(questions) == len(ground_truth_answers) == len(predicted_answers):
+ raise ValueError("The length of questions, ground_truth_answers, and predicted_answers must be the same.")
+
+ matches = 0
+ for truths, extracted in zip(ground_truth_answers, predicted_answers):
+ if set(truths) & set(extracted):
+ matches += 1
+
+ # The proportion of questions where any predicted answer matched one of the ground truth answers
+ result = matches / len(questions)
+
+ return {"result": result}
diff --git a/releasenotes/notes/exact-match-evaluator-197bb87b65e19d0c.yaml b/releasenotes/notes/exact-match-evaluator-197bb87b65e19d0c.yaml
new file mode 100644
index 0000000000..ad380617d9
--- /dev/null
+++ b/releasenotes/notes/exact-match-evaluator-197bb87b65e19d0c.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add `AnswerExactMatchEvaluator`, a Component that can be used to calculate the Exact Match metric
+ given a list of questions, a list of expected answers for each question and the list of predicted
+ answers for each question.
| diff --git a/test/components/evaluators/test_answer_exact_match.py b/test/components/evaluators/test_answer_exact_match.py
new file mode 100644
index 0000000000..c179c74a25
--- /dev/null
+++ b/test/components/evaluators/test_answer_exact_match.py
@@ -0,0 +1,61 @@
+import pytest
+
+from haystack.components.evaluators import AnswerExactMatchEvaluator
+
+
+def test_run_with_all_matching():
+ evaluator = AnswerExactMatchEvaluator()
+ result = evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_answers=[["Berlin"], ["Paris"]],
+ predicted_answers=[["Berlin"], ["Paris"]],
+ )
+
+ assert result["result"] == 1.0
+
+
+def test_run_with_no_matching():
+ evaluator = AnswerExactMatchEvaluator()
+ result = evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_answers=[["Berlin"], ["Paris"]],
+ predicted_answers=[["Paris"], ["London"]],
+ )
+
+ assert result["result"] == 0.0
+
+
+def test_run_with_partial_matching():
+ evaluator = AnswerExactMatchEvaluator()
+ result = evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_answers=[["Berlin"], ["Paris"]],
+ predicted_answers=[["Berlin"], ["London"]],
+ )
+
+ assert result["result"] == 0.5
+
+
+def test_run_with_different_lengths():
+ evaluator = AnswerExactMatchEvaluator()
+
+ with pytest.raises(ValueError):
+ evaluator.run(
+ questions=["What is the capital of Germany?"],
+ ground_truth_answers=[["Berlin"], ["Paris"]],
+ predicted_answers=[["Berlin"], ["London"]],
+ )
+
+ with pytest.raises(ValueError):
+ evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_answers=[["Berlin"]],
+ predicted_answers=[["Berlin"], ["London"]],
+ )
+
+ with pytest.raises(ValueError):
+ evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_answers=[["Berlin"], ["Paris"]],
+ predicted_answers=[["Berlin"]],
+ )
| diff --git a/releasenotes/notes/exact-match-evaluator-197bb87b65e19d0c.yaml b/releasenotes/notes/exact-match-evaluator-197bb87b65e19d0c.yaml
new file mode 100644
index 0000000000..ad380617d9
--- /dev/null
+++ b/releasenotes/notes/exact-match-evaluator-197bb87b65e19d0c.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add `AnswerExactMatchEvaluator`, a Component that can be used to calculate the Exact Match metric
+ given a list of questions, a list of expected answers for each question and the list of predicted
+ answers for each question.
| [
{
"components": [
{
"doc": "Evaluator that checks if the predicted answers matches any of the ground truth answers exactly.\nThe result is a number from 0.0 to 1.0, it represents the proportion of questions where any predicted answer\nmatched one of the ground truth answers.\nEach question can hav... | [
"test/components/evaluators/test_answer_exact_match.py::test_run_with_all_matching",
"test/components/evaluators/test_answer_exact_match.py::test_run_with_no_matching",
"test/components/evaluators/test_answer_exact_match.py::test_run_with_partial_matching",
"test/components/evaluators/test_answer_exact_match.... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add `AnswerExactMatchEvaluator`
### Related Issues
- fixes #6067
### Proposed Changes:
Add `AnswerExactMatchEvaluator`. This Component calculates the Exact Match metrics given a list of questions, a list of expected answers for each question and the list of predicted answers for each question.
### How did you test it?
I added unit tests.
### Notes for the reviewer
N/A
### Checklist
- I have read the [contributors guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md) and the [code of conduct](https://github.com/deepset-ai/haystack/blob/main/code_of_conduct.txt)
- I have updated the related issue with new insights and changes
- I added unit tests and updated the docstrings
- I've used one of the [conventional commit types](https://www.conventionalcommits.org/en/v1.0.0/) for my PR title: `fix:`, `feat:`, `build:`, `chore:`, `ci:`, `docs:`, `style:`, `refactor:`, `perf:`, `test:`.
- I documented my code
- I ran [pre-commit hooks](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md#installation) and fixed any issue
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/components/evaluators/answer_exact_match.py]
(definition of AnswerExactMatchEvaluator:)
class AnswerExactMatchEvaluator:
"""Evaluator that checks if the predicted answers matches any of the ground truth answers exactly.
The result is a number from 0.0 to 1.0, it represents the proportion of questions where any predicted answer
matched one of the ground truth answers.
Each question can have multiple ground truth answers and multiple predicted answers.
Usage example:
```python
from haystack.components.evaluators import AnswerExactMatchEvaluator
evaluator = AnswerExactMatchEvaluator()
result = evaluator.run(
questions=["What is the capital of Germany?", "What is the capital of France?"],
ground_truth_answers=[["Berlin"], ["Paris"]],
predicted_answers=[["Berlin"], ["Paris"]],
)
print(result["result"])
# 1.0
```"""
(definition of AnswerExactMatchEvaluator.run:)
def run( self, questions: List[str], ground_truth_answers: List[List[str]], predicted_answers: List[List[str]] ) -> Dict[str, float]:
"""Run the AnswerExactMatchEvaluator on the given inputs.
All lists must have the same length.
:param questions:
A list of questions.
:param ground_truth_answers:
A list of expected answers for each question.
:param predicted_answers:
A list of predicted answers for each question.
:returns:
A dictionary with the following outputs:
- `result` - A number from 0.0 to 1.0 that represents the proportion of questions where any predicted
answer matched one of the ground truth answers."""
[end of new definitions in haystack/components/evaluators/answer_exact_match.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Implement function to calculate Exact Match metric
As specified in proposal #5794 we need to implement a function to calculate the Exact Match metric.
Ideally the function should be part of the private interface and called only through the `calculate_metrics` function (see #6063). `_calculate_em()` could be a nice name.
For more detailed information check out the original proposal.
----------
--------------------
</issues> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 |
sphinx-doc__sphinx-12131 | 12,131 | sphinx-doc/sphinx | 7.3 | aaecc9376d0662aeca5d3bd7c9d9fa36d6398478 | 2024-03-18T19:49:32Z | diff --git a/CHANGES.rst b/CHANGES.rst
index 8ffe9365d67..04733657806 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -22,6 +22,9 @@ Deprecated
Features added
--------------
+* #12131: Added :confval:`show_warning_types` configuration option.
+ Patch by Chris Sewell.
+
* #11701: HTML Search: Adopt the new `<search>`_ element.
Patch by Bénédikt Tran.
diff --git a/doc/conf.py b/doc/conf.py
index 37f86232dce..2816935e6c0 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -23,6 +23,7 @@
release = version
show_authors = True
nitpicky = True
+show_warning_types = True
html_theme = 'sphinx13'
html_theme_path = ['_themes']
diff --git a/doc/usage/configuration.rst b/doc/usage/configuration.rst
index c7b30d054af..b5a2b84934a 100644
--- a/doc/usage/configuration.rst
+++ b/doc/usage/configuration.rst
@@ -326,11 +326,19 @@ General configuration
.. versionadded:: 0.5
+.. confval:: show_warning_types
+
+ If ``True``, the type of each warning is added as a suffix to the warning message,
+ e.g., ``WARNING: [...] [index]`` or ``WARNING: [...] [toc.circular]``.
+ The default is ``False``.
+
+ .. versionadded:: 7.3.0
+
.. confval:: suppress_warnings
A list of warning types to suppress arbitrary warning messages.
- Sphinx supports following warning types:
+ Sphinx core supports following warning types:
* ``app.add_node``
* ``app.add_directive``
@@ -359,11 +367,11 @@ General configuration
* ``toc.not_readable``
* ``toc.secnum``
+ Then extensions can also define their own warning types.
+
You can choose from these types. You can also give only the first
component to exclude all warnings attached to it.
- Now, this option should be considered *experimental*.
-
.. versionadded:: 1.4
.. versionchanged:: 1.5
diff --git a/sphinx/config.py b/sphinx/config.py
index e90c7ebfcb8..dc8c1c11d72 100644
--- a/sphinx/config.py
+++ b/sphinx/config.py
@@ -227,6 +227,7 @@ class Config:
'template_bridge': _Opt(None, 'html', frozenset((str,))),
'keep_warnings': _Opt(False, 'env', ()),
'suppress_warnings': _Opt([], 'env', ()),
+ 'show_warning_types': _Opt(False, 'env', frozenset((bool,))),
'modindex_common_prefix': _Opt([], 'html', ()),
'rst_epilog': _Opt(None, 'env', frozenset((str,))),
'rst_prolog': _Opt(None, 'env', frozenset((str,))),
diff --git a/sphinx/util/logging.py b/sphinx/util/logging.py
index baff073253c..a74369862ba 100644
--- a/sphinx/util/logging.py
+++ b/sphinx/util/logging.py
@@ -480,6 +480,7 @@ class SphinxLogRecordTranslator(logging.Filter):
* Make a instance of SphinxLogRecord
* docname to path if location given
+ * append warning type/subtype to message if :confval:`show_warning_types` is ``True``
"""
LogRecordClass: type[logging.LogRecord]
@@ -522,6 +523,23 @@ class WarningLogRecordTranslator(SphinxLogRecordTranslator):
LogRecordClass = SphinxWarningLogRecord
+ def filter(self, record: SphinxWarningLogRecord) -> bool: # type: ignore[override]
+ ret = super().filter(record)
+
+ try:
+ show_warning_types = self.app.config.show_warning_types
+ except AttributeError:
+ # config is not initialized yet (ex. in conf.py)
+ show_warning_types = False
+ if show_warning_types:
+ if log_type := getattr(record, 'type', ''):
+ if log_subtype := getattr(record, 'subtype', ''):
+ record.msg += f' [{log_type}.{log_subtype}]'
+ else:
+ record.msg += f' [{log_type}]'
+
+ return ret
+
def get_node_location(node: Node) -> str | None:
source, line = get_source_line(node)
| diff --git a/tests/test_util/test_util_logging.py b/tests/test_util/test_util_logging.py
index 4d506a8a862..8c621880313 100644
--- a/tests/test_util/test_util_logging.py
+++ b/tests/test_util/test_util_logging.py
@@ -10,7 +10,7 @@
from sphinx.errors import SphinxWarning
from sphinx.testing.util import strip_escseq
from sphinx.util import logging, osutil
-from sphinx.util.console import colorize
+from sphinx.util.console import colorize, strip_colors
from sphinx.util.logging import is_suppressed_warning, prefixed_warnings
from sphinx.util.parallel import ParallelTasks
@@ -396,3 +396,20 @@ def test_get_node_location_abspath():
location = logging.get_node_location(n)
assert location == absolute_filename + ':'
+
+
+@pytest.mark.sphinx(confoverrides={'show_warning_types': True})
+def test_show_warning_types(app, status, warning):
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+ logger.warning('message2')
+ logger.warning('message3', type='test')
+ logger.warning('message4', type='test', subtype='logging')
+
+ warnings = strip_colors(warning.getvalue()).splitlines()
+
+ assert warnings == [
+ 'WARNING: message2',
+ 'WARNING: message3 [test]',
+ 'WARNING: message4 [test.logging]',
+ ]
| diff --git a/CHANGES.rst b/CHANGES.rst
index 8ffe9365d67..04733657806 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -22,6 +22,9 @@ Deprecated
Features added
--------------
+* #12131: Added :confval:`show_warning_types` configuration option.
+ Patch by Chris Sewell.
+
* #11701: HTML Search: Adopt the new `<search>`_ element.
Patch by Bénédikt Tran.
diff --git a/doc/usage/configuration.rst b/doc/usage/configuration.rst
index c7b30d054af..b5a2b84934a 100644
--- a/doc/usage/configuration.rst
+++ b/doc/usage/configuration.rst
@@ -326,11 +326,19 @@ General configuration
.. versionadded:: 0.5
+.. confval:: show_warning_types
+
+ If ``True``, the type of each warning is added as a suffix to the warning message,
+ e.g., ``WARNING: [...] [index]`` or ``WARNING: [...] [toc.circular]``.
+ The default is ``False``.
+
+ .. versionadded:: 7.3.0
+
.. confval:: suppress_warnings
A list of warning types to suppress arbitrary warning messages.
- Sphinx supports following warning types:
+ Sphinx core supports following warning types:
* ``app.add_node``
* ``app.add_directive``
@@ -359,11 +367,11 @@ General configuration
* ``toc.not_readable``
* ``toc.secnum``
+ Then extensions can also define their own warning types.
+
You can choose from these types. You can also give only the first
component to exclude all warnings attached to it.
- Now, this option should be considered *experimental*.
-
.. versionadded:: 1.4
.. versionchanged:: 1.5
| [
{
"components": [
{
"doc": "",
"lines": [
526,
541
],
"name": "WarningLogRecordTranslator.filter",
"signature": "def filter(self, record: SphinxWarningLogRecord) -> bool:",
"type": "function"
}
],
"file": "sphinx/util/loggin... | [
"tests/test_util/test_util_logging.py::test_show_warning_types"
] | [
"tests/test_util/test_util_logging.py::test_info_and_warning",
"tests/test_util/test_util_logging.py::test_Exception",
"tests/test_util/test_util_logging.py::test_verbosity_filter",
"tests/test_util/test_util_logging.py::test_nonl_info_log",
"tests/test_util/test_util_logging.py::test_once_warning_log",
"... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
✨ Add `show_warning_types` configuration variable
This PR add the `show_warning_types` config variable which when set to `True`, prepends the type and subtype (if set) to warning messages.
For example:
```
WARNING: py:class reference target not found: TextElement [ref.class]
```
This follows the best practices of other tools, such as mypy and ruff to provide the user greater information on the warning:
1. To provide greater specificity for the origin of the warning, particularly for extensions which utilise warning types. Currently these extensions have to provide bespoke solutions for showing the warning origin (see e.g. https://myst-parser.readthedocs.io/en/latest/configuration.html#build-warnings)
2. To encourage users to deal with warnings. I strongly recommend for users to run `sphinx-build -nW --keep-going`, then fix all warnings. Practically this is not always possible and so the use of `show_warning_types` and `suppress_warnings`, provides a way for users to deal with most warnings, whilst being able to suppress "less important" ones
Note currently, the default is `False`, i.e. they will still not be shown, which is done in order to introduce any breaking changes.
But, like https://github.com/python/mypy/pull/13542, I would strongly encourage this eventualy defaulting to `True`.
closes #8845
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sphinx/util/logging.py]
(definition of WarningLogRecordTranslator.filter:)
def filter(self, record: SphinxWarningLogRecord) -> bool:
[end of new definitions in sphinx/util/logging.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Show warnings with their "type" and "subtype"?
Similar to #8813, just for warnings instead of exceptions:
Currently, warning messages are shown like this:
```
WARNING: Some warning message
```
If a warning can be disabled, I think it would be helpful to display the warning `type` and `subtype`, maybe like:
```
WARNING (some_type.some_subtype): Some warning message
```
----------
-0: IMO, it's better to resolve the root reason for the warning instead of suppressing. I feel it's noisy...
+1
For [myst-parser](https://github.com/executablebooks/MyST-Parser) and [myst-nb](https://github.com/ExecutableBookProject/MyST-NB) I have had numerous people ask to silence a specific warning.
Although I encourage them to fix the warning, it is true that some warnings are more important than others. Also, as already mentioned, displaying the warning type helps to identify its origin, and thus how to fix it.
Displaying warning types has plenty of precedence in other packages, such as:
- https://mypy.readthedocs.io/en/stable/error_codes.html?highlight=show#silencing-errors-based-on-error-codes
- http://pylint.pycqa.org/en/latest/user_guide/message-control.html#block-disables
In myst-parser I now append the warning type to the end of the message: https://myst-parser.readthedocs.io/en/latest/using/howto.html#suppress-warnings
```
WARNING: Non-consecutive header level increase; 1 to 3 [myst.header]
```
> I feel it's noisy...
You could simply add a configuration option to control whether they are displayed, e.g. `show_warning_type`
Also to note, in https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-suppress_warnings it says "Now, this option should be considered experimental.".
I assume this is no longer the case, since this option has been around for a long time now?
I would consider creating a PR, but obviously would want to know first it was not likely to be rejected 😬
@tk0miya
> IMO, it's better to resolve the root reason for the warning instead of suppressing.
I agree, most of the times it's better to fix the root reason.
But sometimes it's not, and then it would be nice to have the additional information in order to silence the right warnings.
If not, people might just silence *all* warnings, which might be worse!
So I would add to your argument:
* most of the time, it's better to resolve the root reason for the warning instead of suppressing
* if not, it's better to suppress only certain warnings explicitly instead of suppressing all of them
> I feel it's noisy...
This would certainly add a little bit of noise, but I think it would be worth it.
For me, the main usage is in different CI tasks. I might run Sphinx in multiple CI tasks, but for certain builders (e.g. `linkcheck`) I might want to disable some warnings, which I want to keep enabled for the main `html` builder task.
Does this make sense?
@chrisjsewell
> You could simply add a configuration option to control whether they are displayed, e.g. `show_warning_type`
This is certainly a possibility, but I feel that this would never actually be used (assuming it has the correct default value).
People who care about warning messages will keep them to a minimum (ideally 0) anyway. And people who don't care, won't care about the exact display of the warnings anyway.
> -0: IMO, it's better to resolve the root reason for the warning instead of suppressing. I feel it's noisy...
every C++ compiler gives an exact warning number, by which you can also disable the given warning type - if done right, the "noisiness" can be kept low and the output made really universal.
@tk0miya I agree that it is better to fix the underlying issue. But to present another situation where it could be useful: I'm developing an extension that parses code blocks and links the code with corresponding autodoc entries. I intend to issue warnings when those entries are not found, but the code example might still be valid. So a user might want to ignore the warning because there's nothing they can do until the entry is added (perhaps in a 3rd party library). They *can* look at the extension documentation, but it would be much faster to see the exact warning type in the message.
It is a bit noisy though 😄 and I think more often it's a valid warning that can be fixed. So if not default behavior, could this at least be available via configuration?
--------------------
</issues> | aaecc9376d0662aeca5d3bd7c9d9fa36d6398478 |
conan-io__conan-15888 | 15,888 | conan-io/conan | null | bdcc67e6055e923d79e3a95767f7027e84ac29ec | 2024-03-18T11:32:44Z | diff --git a/conans/client/conanfile/configure.py b/conans/client/conanfile/configure.py
index 210e0f5f1bd..116a879a820 100644
--- a/conans/client/conanfile/configure.py
+++ b/conans/client/conanfile/configure.py
@@ -7,6 +7,8 @@
def run_configure_method(conanfile, down_options, profile_options, ref):
""" Run all the config-related functions for the given conanfile object """
+ initial_requires_count = len(conanfile.requires)
+
if hasattr(conanfile, "config_options"):
with conanfile_exception_formatter(conanfile, "config_options"):
conanfile.config_options()
@@ -23,6 +25,9 @@ def run_configure_method(conanfile, down_options, profile_options, ref):
elif "auto_shared_fpic" in conanfile.implements:
auto_shared_fpic_configure(conanfile)
+ if initial_requires_count != len(conanfile.requires):
+ conanfile.output.warning("Requirements should only be added in the requirements()/build_requirements() methods, not configure()/config_options(), which might raise errors in the future.", warn_tag="deprecated")
+
result = conanfile.options.get_upstream_options(down_options, ref, is_consumer)
self_options, up_options, private_up_options = result
# self_options are the minimum to reproduce state, as defined from downstream (not profile)
diff --git a/conans/model/requires.py b/conans/model/requires.py
index 50b7739f8dc..428bb596863 100644
--- a/conans/model/requires.py
+++ b/conans/model/requires.py
@@ -574,3 +574,6 @@ def __repr__(self):
def serialize(self):
return [v.serialize() for v in self._requires.values()]
+
+ def __len__(self):
+ return len(self._requires)
| diff --git a/conans/test/integration/build_requires/build_requires_test.py b/conans/test/integration/build_requires/build_requires_test.py
index da3a5af71e4..b11bda489e8 100644
--- a/conans/test/integration/build_requires/build_requires_test.py
+++ b/conans/test/integration/build_requires/build_requires_test.py
@@ -581,3 +581,17 @@ def test_build_missing_build_requires():
c.run("install app --build=missing")
assert "- Build" not in c.out
assert re.search(r"Skipped binaries(\s*)tool/0.1, tooldep/0.1", c.out)
+
+
+def test_requirement_in_wrong_method():
+ tc = TestClient(light=True)
+ tc.save({"conanfile.py": textwrap.dedent("""
+ from conan import ConanFile
+ class Pkg(ConanFile):
+ name = "pkg"
+ version = "0.1"
+ def configure(self):
+ self.requires("foo/1.0")
+ """)})
+ tc.run('create . -cc="core:warnings_as_errors=[\'*\']"', assert_error=True)
+ assert "ERROR: deprecated: Requirements should only be added in the requirements()/build_requirements() methods, not configure()/config_options(), which might raise errors in the future." in tc.out
| [
{
"components": [
{
"doc": "",
"lines": [
578,
579
],
"name": "Requirements.__len__",
"signature": "def __len__(self):",
"type": "function"
}
],
"file": "conans/model/requires.py"
}
] | [
"conans/test/integration/build_requires/build_requires_test.py::test_requirement_in_wrong_method"
] | [
"conans/test/integration/build_requires/build_requires_test.py::test_conanfile_txt",
"conans/test/integration/build_requires/build_requires_test.py::test_complete",
"conans/test/integration/build_requires/build_requires_test.py::test_dependents_new_buildenv",
"conans/test/integration/build_requires/build_requ... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Warn on misplaced requirement function calls
Changelog: Feature: Warn on misplaced requirement function calls
Docs: Omit
Only open question is the warn_tag. Is deprecated appropiate?
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conans/model/requires.py]
(definition of Requirements.__len__:)
def __len__(self):
[end of new definitions in conans/model/requires.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
conan-io__conan-15876 | 15,876 | conan-io/conan | null | 7f0f1b2844b0892408bb5deefe04b78e60403a9f | 2024-03-14T21:44:26Z | diff --git a/conan/cli/printers/graph.py b/conan/cli/printers/graph.py
index 6513636f8d4..404ca7c97f5 100644
--- a/conan/cli/printers/graph.py
+++ b/conan/cli/printers/graph.py
@@ -80,6 +80,17 @@ def _format_resolved(title, reqs_to_print):
reason = f": {reason}" if reason else ""
output.info(" {}{}".format(d, reason), Color.BRIGHT_CYAN)
+ if graph.options_conflicts:
+ output.info("Options conflicts", Color.BRIGHT_YELLOW)
+ for ref, ref_conflicts in graph.options_conflicts.items():
+ for option, conflict_info in ref_conflicts.items():
+ prev_value = conflict_info['value']
+ output.info(f" {ref}:{option}={prev_value} (current value)", Color.BRIGHT_CYAN)
+ for src_ref, conflict_value in conflict_info["conflicts"]:
+ output.info(f" {src_ref}->{option}={conflict_value}", Color.BRIGHT_CYAN)
+ output.info(" It is recommended to define options values in profiles, not in recipes",
+ Color.BRIGHT_CYAN)
+
def print_graph_packages(graph):
# I am excluding the "download"-"cache" or remote information, that is not
diff --git a/conans/client/graph/graph.py b/conans/client/graph/graph.py
index 63f79a41a34..ce1995e02d4 100644
--- a/conans/client/graph/graph.py
+++ b/conans/client/graph/graph.py
@@ -321,6 +321,7 @@ def __init__(self):
self.aliased = {}
self.resolved_ranges = {}
self.replaced_requires = {}
+ self.options_conflicts = {}
self.error = False
def overrides(self):
diff --git a/conans/client/graph/graph_builder.py b/conans/client/graph/graph_builder.py
index e190fd1e61d..937d32f76a2 100644
--- a/conans/client/graph/graph_builder.py
+++ b/conans/client/graph/graph_builder.py
@@ -13,7 +13,7 @@
from conans.client.graph.provides import check_graph_provides
from conans.errors import ConanException
from conans.model.conan_file import ConanFile
-from conans.model.options import Options
+from conans.model.options import Options, _PackageOptions
from conans.model.pkg_type import PackageType
from conans.model.recipe_ref import RecipeReference, ref_matches
from conans.model.requires import Requirement
@@ -97,10 +97,31 @@ def _expand_require(self, require, node, graph, profile_host, profile_build, gra
# print("Closing a loop from ", node, "=>", prev_node)
# Keep previous "test" status only if current is also test
prev_node.test = prev_node.test and (node.test or require.test)
+ self._save_options_conflicts(node, require, prev_node, graph)
require.process_package_type(node, prev_node)
graph.add_edge(node, prev_node, require)
node.propagate_closing_loop(require, prev_node)
+ def _save_options_conflicts(self, node, require, prev_node, graph):
+ """ Store the discrepancies of options when closing a diamond, to later report
+ them. This list is not exhaustive, only the diamond vertix, not other transitives
+ """
+ down_options = self._compute_down_options(node, require, prev_node.ref)
+ down_options = down_options._deps_package_options # noqa
+ if not down_options:
+ return
+ down_pkg_options = _PackageOptions()
+ for pattern, options in down_options.items():
+ if ref_matches(prev_node.ref, pattern, is_consumer=False):
+ down_pkg_options.update_options(options)
+ prev_options = {k: v for k, v in prev_node.conanfile.options.items()}
+ for k, v in down_pkg_options.items():
+ prev_value = prev_options.get(k)
+ if prev_value is not None and prev_value != v:
+ d = graph.options_conflicts.setdefault(str(prev_node.ref), {})
+ conflicts = d.setdefault(k, {"value": prev_value}).setdefault("conflicts", [])
+ conflicts.append((node.ref, v))
+
@staticmethod
def _conflicting_version(require, node,
prev_require, prev_node, prev_ref, base_previous, resolve_prereleases):
@@ -334,6 +355,29 @@ def _create_new_node(self, node, require, graph, profile_host, profile_build, gr
new_node.recipe = recipe_status
new_node.remote = remote
+ down_options = self._compute_down_options(node, require, new_ref)
+
+ if recipe_status != RECIPE_PLATFORM:
+ self._prepare_node(new_node, profile_host, profile_build, down_options)
+ if dep_conanfile.package_type is PackageType.CONF and node.recipe != RECIPE_VIRTUAL:
+ raise ConanException(f"Configuration package {dep_conanfile} cannot be used as "
+ f"requirement, but {node.ref} is requiring it")
+
+ require.process_package_type(node, new_node)
+ graph.add_node(new_node)
+ graph.add_edge(node, new_node, require)
+ if node.propagate_downstream(require, new_node):
+ raise GraphRuntimeError(node, new_node)
+
+ # This is necessary to prevent infinite loops even when visibility is False
+ ancestor = node.check_loops(new_node)
+ if ancestor is not None:
+ raise GraphLoopError(new_node, require, ancestor)
+
+ return new_node
+
+ @staticmethod
+ def _compute_down_options(node, require, new_ref):
# The consumer "up_options" are the options that come from downstream to this node
if require.options is not None:
# If the consumer has specified "requires(options=xxx)", we need to use it
@@ -354,25 +398,7 @@ def _create_new_node(self, node, require, graph, profile_host, profile_build, gr
down_options = node.conanfile.private_up_options
else:
down_options = Options(options_values=node.conanfile.default_build_options)
-
- if recipe_status != RECIPE_PLATFORM:
- self._prepare_node(new_node, profile_host, profile_build, down_options)
- if dep_conanfile.package_type is PackageType.CONF and node.recipe != RECIPE_VIRTUAL:
- raise ConanException(f"Configuration package {dep_conanfile} cannot be used as "
- f"requirement, but {node.ref} is requiring it")
-
- require.process_package_type(node, new_node)
- graph.add_node(new_node)
- graph.add_edge(node, new_node, require)
- if node.propagate_downstream(require, new_node):
- raise GraphRuntimeError(node, new_node)
-
- # This is necessary to prevent infinite loops even when visibility is False
- ancestor = node.check_loops(new_node)
- if ancestor is not None:
- raise GraphLoopError(new_node, require, ancestor)
-
- return new_node
+ return down_options
@staticmethod
def _remove_overrides(dep_graph):
| diff --git a/conans/test/integration/options/options_test.py b/conans/test/integration/options/options_test.py
index 9a7b417b085..65f4d77e794 100644
--- a/conans/test/integration/options/options_test.py
+++ b/conans/test/integration/options/options_test.py
@@ -739,3 +739,30 @@ def test_wrong_option_syntax_no_trace(self):
tc.run('create . -o="&:myoption"', assert_error=True)
assert "ValueError" not in tc.out
assert "Error while parsing option" in tc.out
+
+
+class TestConflictOptionsWarnings:
+
+ def test_options_warnings(self):
+ c = TestClient()
+ liba = GenConanfile("liba", "0.1").with_option("myoption", [1, 2, 3], default=1)
+ libb = GenConanfile("libb", "0.1").with_requires("liba/0.1")
+ libc = GenConanfile("libc", "0.1").with_requirement("liba/0.1", options={"myoption": 2})
+ app = GenConanfile().with_requires("libb/0.1", "libc/0.1")
+
+ c.save({"liba/conanfile.py": liba,
+ "libb/conanfile.py": libb,
+ "libc/conanfile.py": libc,
+ "app/conanfile.py": app})
+ c.run("export liba")
+ c.run("export libb")
+ c.run("export libc")
+
+ c.run("graph info app")
+ expected = textwrap.dedent("""\
+ Options conflicts
+ liba/0.1:myoption=1 (current value)
+ libc/0.1->myoption=2
+ It is recommended to define options values in profiles, not in recipes
+ """)
+ assert expected in c.out
| [
{
"components": [
{
"doc": "Store the discrepancies of options when closing a diamond, to later report\nthem. This list is not exhaustive, only the diamond vertix, not other transitives",
"lines": [
105,
123
],
"name": "DepsGraphBuilder._save_options_con... | [
"conans/test/integration/options/options_test.py::TestConflictOptionsWarnings::test_options_warnings"
] | [
"conans/test/integration/options/options_test.py::OptionsTest::test_any",
"conans/test/integration/options/options_test.py::OptionsTest::test_define_nested_option_not_freeze",
"conans/test/integration/options/options_test.py::OptionsTest::test_del_options_configure",
"conans/test/integration/options/options_t... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
print options conflicts
Changelog: Feature: Print options conflicts in the graph caused by different branches recipes defining options values.
Docs: https://github.com/conan-io/docs/pull/3643
Close https://github.com/conan-io/conan/issues/15865
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conans/client/graph/graph_builder.py]
(definition of DepsGraphBuilder._save_options_conflicts:)
def _save_options_conflicts(self, node, require, prev_node, graph):
"""Store the discrepancies of options when closing a diamond, to later report
them. This list is not exhaustive, only the diamond vertix, not other transitives"""
(definition of DepsGraphBuilder._compute_down_options:)
def _compute_down_options(node, require, new_ref):
[end of new definitions in conans/client/graph/graph_builder.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
tobymao__sqlglot-3145 | 3,145 | tobymao/sqlglot | null | d6bac3e54c6445c52daa04015b1b2e4a6933e682 | 2024-03-14T14:27:31Z | diff --git a/sqlglot/dialects/tsql.py b/sqlglot/dialects/tsql.py
index 45855772cb..755360cc16 100644
--- a/sqlglot/dialects/tsql.py
+++ b/sqlglot/dialects/tsql.py
@@ -810,6 +810,22 @@ class Generator(generator.Generator):
exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
}
+ def select_sql(self, expression: exp.Select) -> str:
+ if expression.args.get("offset"):
+ if not expression.args.get("order"):
+ # ORDER BY is required in order to use OFFSET in a query, so we use
+ # a noop order by, since we don't really care about the order.
+ # See: https://www.microsoftpressstore.com/articles/article.aspx?p=2314819
+ expression.order_by(exp.select(exp.null()).subquery(), copy=False)
+
+ limit = expression.args.get("limit")
+ if isinstance(limit, exp.Limit):
+ # TOP and OFFSET can't be combined, we need use FETCH instead of TOP
+ # we replace here because otherwise TOP would be generated in select_sql
+ limit.replace(exp.Fetch(direction="FIRST", count=limit.expression))
+
+ return super().select_sql(expression)
+
def convert_sql(self, expression: exp.Convert) -> str:
name = "TRY_CONVERT" if expression.args.get("safe") else "CONVERT"
return self.func(
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index ed3b2c7272..1198aaaf43 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -2125,24 +2125,13 @@ def matchrecognize_sql(self, expression: exp.MatchRecognize) -> str:
return f"{self.seg('MATCH_RECOGNIZE')} {self.wrap(body)}{alias}"
def query_modifiers(self, expression: exp.Expression, *sqls: str) -> str:
- limit: t.Optional[exp.Fetch | exp.Limit] = expression.args.get("limit")
-
- # If the limit is generated as TOP, we need to ensure it's not generated twice
- with_offset_limit_modifiers = not isinstance(limit, exp.Limit) or not self.LIMIT_IS_TOP
+ limit = expression.args.get("limit")
if self.LIMIT_FETCH == "LIMIT" and isinstance(limit, exp.Fetch):
limit = exp.Limit(expression=exp.maybe_copy(limit.args.get("count")))
elif self.LIMIT_FETCH == "FETCH" and isinstance(limit, exp.Limit):
limit = exp.Fetch(direction="FIRST", count=exp.maybe_copy(limit.expression))
- fetch = isinstance(limit, exp.Fetch)
-
- offset_limit_modifiers = (
- self.offset_limit_modifiers(expression, fetch, limit)
- if with_offset_limit_modifiers
- else []
- )
-
options = self.expressions(expression, key="options")
if options:
options = f" OPTION{self.wrap(options)}"
@@ -2159,7 +2148,7 @@ def query_modifiers(self, expression: exp.Expression, *sqls: str) -> str:
self.sql(expression, "having"),
*[gen(self, expression) for gen in self.AFTER_HAVING_MODIFIER_TRANSFORMS.values()],
self.sql(expression, "order"),
- *offset_limit_modifiers,
+ *self.offset_limit_modifiers(expression, isinstance(limit, exp.Fetch), limit),
*self.after_limit_modifiers(expression),
options,
sep="",
@@ -2190,12 +2179,13 @@ def select_sql(self, expression: exp.Select) -> str:
distinct = self.sql(expression, "distinct")
distinct = f" {distinct}" if distinct else ""
kind = self.sql(expression, "kind")
+
limit = expression.args.get("limit")
- top = (
- self.limit_sql(limit, top=True)
- if isinstance(limit, exp.Limit) and self.LIMIT_IS_TOP
- else ""
- )
+ if isinstance(limit, exp.Limit) and self.LIMIT_IS_TOP:
+ top = self.limit_sql(limit, top=True)
+ limit.pop()
+ else:
+ top = ""
expressions = self.expressions(expression)
| diff --git a/tests/dialects/test_tsql.py b/tests/dialects/test_tsql.py
index ed474fd1af..1d2f03b631 100644
--- a/tests/dialects/test_tsql.py
+++ b/tests/dialects/test_tsql.py
@@ -272,6 +272,28 @@ def test_tsql(self):
"SELECT [x].[y] FROM foo",
)
+ self.validate_all(
+ "SELECT * FROM t ORDER BY (SELECT NULL) OFFSET 2 ROWS",
+ read={
+ "postgres": "SELECT * FROM t OFFSET 2",
+ },
+ write={
+ "postgres": "SELECT * FROM t ORDER BY (SELECT NULL) NULLS FIRST OFFSET 2",
+ "tsql": "SELECT * FROM t ORDER BY (SELECT NULL) OFFSET 2 ROWS",
+ },
+ )
+ self.validate_all(
+ "SELECT * FROM t ORDER BY (SELECT NULL) OFFSET 5 ROWS FETCH FIRST 10 ROWS ONLY",
+ read={
+ "duckdb": "SELECT * FROM t LIMIT 10 OFFSET 5",
+ "sqlite": "SELECT * FROM t LIMIT 5, 10",
+ "tsql": "SELECT * FROM t ORDER BY (SELECT NULL) OFFSET 5 ROWS FETCH FIRST 10 ROWS ONLY",
+ },
+ write={
+ "duckdb": "SELECT * FROM t ORDER BY (SELECT NULL) NULLS FIRST LIMIT 10 OFFSET 5",
+ "sqlite": "SELECT * FROM t ORDER BY (SELECT NULL) LIMIT 10 OFFSET 5",
+ },
+ )
self.validate_all(
"SELECT CAST([a].[b] AS SMALLINT) FROM foo",
write={
| [] | [
"tests/dialects/test_tsql.py::TestTSQL::test_tsql"
] | [
"tests/dialects/test_tsql.py::TestTSQL::test__types_ints",
"tests/dialects/test_tsql.py::TestTSQL::test_add_date",
"tests/dialects/test_tsql.py::TestTSQL::test_charindex",
"tests/dialects/test_tsql.py::TestTSQL::test_commit",
"tests/dialects/test_tsql.py::TestTSQL::test_convert",
"tests/dialects/test_tsql... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Feat(tsql): transpile LIMIT with OFFSET properly
Fixes #3144
Reference:
- https://www.microsoftpressstore.com/articles/article.aspx?p=2314819
- https://learn.microsoft.com/en-us/sql/t-sql/queries/select-order-by-clause-transact-sql?view=sql-server-ver16
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Concerns about Translating SQLite Queries to TSQL
1. **Offset Issue:**
When attempting to convert an SQLite query to TSQL using the provided code snippet:
```python
import sqlglot
sql_a = "SELECT name, (SELECT COUNT(*) FROM orders WHERE orders.customer_id = customers.id) AS order_count FROM customers LIMIT 5, 10"
sql_b = sqlglot.transpile(sql_a, read="sqlite", write="tsql")[0]
print(sql_b)
```
The resulting TSQL output is as follows:
```sql
SELECT TOP 10 name, (SELECT COUNT(*) FROM orders WHERE orders.customer_id = customers.id) AS order_count FROM customers
```
It appears that the translated TSQL does not include information about the offset of 5. Instead, it only selects the top 10 rows, neglecting the fact that it should select 10 rows after an offset of 5. Could you please verify if this behavior is intended?
2. **Completeness of Translation:**
I had a question about the thoroughness of the translation process from SQLite to TSQL. Does the transpiler cover all possible cases of SQLite queries that are incompatible with TSQL syntax? Are there any known clauses or scenarios where the transpiler fails to convert the syntax accurately?
----------
Hey, thanks for the report. I'll take a look, this seems like a bug.
> I had a question about the thoroughness of the translation process from SQLite to TSQL. Does the transpiler cover all possible cases of SQLite queries that are incompatible with TSQL syntax? Are there any known clauses or scenarios where the transpiler fails to convert the syntax accurately?
The transpiler probably does not cover all possible cases as both dialects are huge and there may even be features that are not transpilable at all. It does cover a very large surface though, so I expect that for more "standard" queries you won't have any issues. There's no documentation about missing functionality, but feel free to file issues and / or PRs if you come across bugs in the future so we can improve.
--------------------
</issues> | ceb42fabad60312699e4b15936aeebac00e22e4d | |
google-deepmind__optax-865 | 865 | google-deepmind/optax | null | f45b2eb82ffdb8c22d57923b5039000539bed4bc | 2024-03-13T20:28:19Z | diff --git a/docs/api/utilities.rst b/docs/api/utilities.rst
index fbc1b497e..a7d423021 100644
--- a/docs/api/utilities.rst
+++ b/docs/api/utilities.rst
@@ -106,6 +106,7 @@ Tree
tree_ones_like
tree_random_like
tree_scalar_mul
+ tree_set
tree_sub
tree_sum
tree_vdot
@@ -155,6 +156,10 @@ Tree scalar multiply
~~~~~~~~~~~~~~~~~~~~
.. autofunction:: tree_scalar_mul
+Set values in a tree
+~~~~~~~~~~~~~~~~~~~~
+.. autofunction:: tree_set
+
Tree subtract
~~~~~~~~~~~~~
.. autofunction:: tree_sub
diff --git a/optax/tree_utils/__init__.py b/optax/tree_utils/__init__.py
index 0c12e353d..14e8cf80a 100644
--- a/optax/tree_utils/__init__.py
+++ b/optax/tree_utils/__init__.py
@@ -17,6 +17,7 @@
from optax.tree_utils._state_utils import tree_get
from optax.tree_utils._state_utils import tree_get_all_with_path
from optax.tree_utils._state_utils import tree_map_params
+from optax.tree_utils._state_utils import tree_set
from optax.tree_utils._tree_math import tree_add
from optax.tree_utils._tree_math import tree_add_scalar_mul
diff --git a/optax/tree_utils/_state_utils.py b/optax/tree_utils/_state_utils.py
index dde675f50..9892cb617 100644
--- a/optax/tree_utils/_state_utils.py
+++ b/optax/tree_utils/_state_utils.py
@@ -175,14 +175,21 @@ def tree_get_all_with_path(
(``path_to_value``, ``value``). Here ``value`` is one entry of the state
that corresponds to the ``key``, and ``path_to_value`` is a path returned
by :func:`jax.tree_util.tree_flatten_with_path`.
+
+ Raises:
+ ValueError: If the input tree is flat, i.e., it is not a tuple/list/dict.
"""
- values_with_path_found = []
+ found_values_with_path = []
tree_flatten_with_path, _ = jax.tree_util.tree_flatten_with_path(tree)
+ if not tree_flatten_with_path or not tree_flatten_with_path[0][0]:
+ raise ValueError(
+ "The input tree cannot be flat, i.e., it must be a tuple/list/dict."
+ )
for path, val in tree_flatten_with_path:
key_leaf = _convert_jax_key_fn(path[-1])
if key_leaf == key:
- values_with_path_found.append((path, val))
- return values_with_path_found
+ found_values_with_path.append((path, val))
+ return found_values_with_path
def tree_get(tree: base.PyTree, key: Any, default: Optional[Any] = None) -> Any:
@@ -190,8 +197,11 @@ def tree_get(tree: base.PyTree, key: Any, default: Optional[Any] = None) -> Any:
Search in the leaves of a pytree for a specific ``key`` (which can be a key
from a dictionary or a name from a NamedTuple).
+
If no leaves in the tree have the required ``key`` returns ``default``.
+ Raises a ``KeyError`` if multiple values of ``key`` are found in ``tree``.
+
.. seealso:: :func:`optax.tree_utils.tree_get_all_with_path`
Examples:
@@ -217,14 +227,68 @@ def tree_get(tree: base.PyTree, key: Any, default: Optional[Any] = None) -> Any:
Raises:
KeyError: If multiple values of ``key`` are found in ``tree``.
+ ValueError: If the input tree is flat, i.e., it is not a tuple/list/dict.
"""
- values_with_path_found = tree_get_all_with_path(tree, key)
- if len(values_with_path_found) > 1:
+ found_values_with_path = tree_get_all_with_path(tree, key)
+ if len(found_values_with_path) > 1:
raise KeyError(f"Found multiple values for '{key}' in {tree}.")
- elif not values_with_path_found:
+ elif not found_values_with_path:
return default
else:
- return values_with_path_found[0][1]
+ return found_values_with_path[0][1]
+
+
+def tree_set(tree: base.PyTree, **kwargs: Any) -> base.PyTree:
+ """Creates a copy of tree with some leaves replaced as specified by kwargs.
+
+ Raises a ``KeyError`` if some keys in ``kwargs`` are not present in the tree.
+
+ Examples:
+ >>> import jax.numpy as jnp
+ >>> import optax
+ >>> params = jnp.array([1., 2., 3.])
+ >>> opt = optax.inject_hyperparams(optax.adam)(learning_rate=1.)
+ >>> state = opt.init(params)
+ >>> new_state = optax.tree_utils.tree_set(state, learning_rate=2.)
+ >>> lr = optax.tree_utils.tree_get(new_state, 'learning_rate')
+ >>> print(lr)
+ 2.0
+
+ Args:
+ tree: pytree whose values are to be replaced.
+ **kwargs: dictionary of keys with values to replace in the tree.
+
+ Returns:
+ new_tree
+ new pytree with the same structure as tree. For each leaf whose
+ key/name matches a key in ``**kwargs``, their values are set by the
+ corresponding value in ``**kwargs``.
+
+ Raises:
+ KeyError: If no values of some key in ``**kwargs`` are found in ``tree``.
+ ValueError: If the input tree is flat, i.e., it is not a tuple/list/dict.
+ """
+ tree_flatten_with_path, _ = jax.tree_util.tree_flatten_with_path(tree)
+ if not tree_flatten_with_path or not tree_flatten_with_path[0][0]:
+ raise ValueError(
+ "The input tree cannot be flat, i.e., it must be a tuple/list/dict."
+ )
+ key_leaves = [
+ _convert_jax_key_fn(path[-1]) for path, _ in tree_flatten_with_path
+ ]
+ if (left_keys := set(kwargs) - set(key_leaves)):
+ left_keys_str = " nor ".join({f"'{key}'" for key in left_keys})
+ raise KeyError(f"Found no value for {left_keys_str} in {tree}.")
+
+ def _replace(path, value):
+ """Replace a value in tree if key from path matches some key in kwargs."""
+ key_leaf = _convert_jax_key_fn(path[-1])
+ if key_leaf in kwargs:
+ return kwargs[key_leaf]
+ else:
+ return value
+
+ return jax.tree_util.tree_map_with_path(_replace, tree)
@jax.tree_util.register_pytree_node_class
| diff --git a/optax/tree_utils/_state_utils_test.py b/optax/tree_utils/_state_utils_test.py
index 9597f99c5..0a945760d 100644
--- a/optax/tree_utils/_state_utils_test.py
+++ b/optax/tree_utils/_state_utils_test.py
@@ -244,6 +244,12 @@ def test_map_non_params_to_none(self):
def test_tree_get_all_with_path(self):
params = jnp.array([1.0, 2.0, 3.0])
+ with self.subTest('Test with flat tree'):
+ tree = ()
+ self.assertRaises(ValueError, _state_utils.tree_get, tree, 'foo')
+ tree = jnp.array([1.0, 2.0, 3.0])
+ self.assertRaises(ValueError, _state_utils.tree_get, tree, 'foo')
+
with self.subTest('Test with single value in state'):
key = 'count'
opt = transform.scale_by_adam()
@@ -253,8 +259,8 @@ def test_tree_get_all_with_path(self):
self.assertEqual(values_found, expected_result)
with self.subTest('Test with no value in state'):
- key = 'count'
- opt = alias.sgd(learning_rate=1.0)
+ key = 'apple'
+ opt = alias.adam(learning_rate=1.0)
state = opt.init(params)
values_found = _state_utils.tree_get_all_with_path(state, key)
self.assertEmpty(values_found)
@@ -318,7 +324,7 @@ def test_tree_get(self):
with self.subTest('Test jitted tree_get'):
opt = _inject.inject_hyperparams(alias.sgd)(
- learning_rate=lambda x: 1/(x+1)
+ learning_rate=lambda x: 1 / (x + 1)
)
state = opt.init(params)
@@ -327,10 +333,64 @@ def get_learning_rate(state):
return _state_utils.tree_get(state, 'learning_rate')
for i in range(4):
- # we simply update state, we don't care about updates.
+ # we simply update state, we don't care about updates.
_, state = opt.update(params, state)
lr = get_learning_rate(state)
- self.assertEqual(lr, 1/(i+1))
+ self.assertEqual(lr, 1 / (i + 1))
+
+ def test_tree_set(self):
+ params = jnp.array([1.0, 2.0, 3.0])
+
+ with self.subTest('Test with flat tree'):
+ tree = ()
+ self.assertRaises(ValueError, _state_utils.tree_get, tree, 'foo')
+ tree = jnp.array([1.0, 2.0, 3.0])
+ self.assertRaises(ValueError, _state_utils.tree_get, tree, 'foo')
+
+ with self.subTest('Test modifying an injected hyperparam'):
+ opt = _inject.inject_hyperparams(alias.adam)(learning_rate=1.0)
+ state = opt.init(params)
+ new_state = _state_utils.tree_set(state, learning_rate=2.0, b1=3.0)
+ lr = _state_utils.tree_get(new_state, 'learning_rate')
+ self.assertEqual(lr, 2.0)
+
+ with self.subTest('Test modifying an attribute of the state'):
+ opt = _inject.inject_hyperparams(alias.adam)(learning_rate=1.0)
+ state = opt.init(params)
+ new_state = _state_utils.tree_set(state, learning_rate=2.0, b1=3.0)
+ b1 = _state_utils.tree_get(new_state, 'b1')
+ self.assertEqual(b1, 3.0)
+
+ with self.subTest('Test modifying a value not present in the state'):
+ opt = _inject.inject_hyperparams(alias.adam)(learning_rate=1.0)
+ state = opt.init(params)
+ self.assertRaises(KeyError, _state_utils.tree_set, state, ema=2.0)
+
+ with self.subTest('Test jitted tree_set'):
+
+ @jax.jit
+ def set_learning_rate(state, lr):
+ return _state_utils.tree_set(state, learning_rate=lr)
+
+ modified_state = state
+ lr = 1.0
+ for i in range(4):
+ modified_state = set_learning_rate(modified_state, lr / (i + 1))
+ # we simply update state, we don't care about updates.
+ _, modified_state = opt.update(params, modified_state)
+ modified_lr = _state_utils.tree_get(modified_state, 'learning_rate')
+ self.assertEqual(modified_lr, lr / (i + 1))
+
+ with self.subTest('Test modifying several values at once'):
+ opt = combine.chain(
+ alias.adam(learning_rate=1.0), alias.adam(learning_rate=1.0)
+ )
+ state = opt.init(params)
+ new_state = _state_utils.tree_set(state, count=2.0)
+ values_found = _state_utils.tree_get_all_with_path(new_state, 'count')
+ self.assertLen(values_found, 2)
+ for _, value in values_found:
+ self.assertEqual(value, 2.0)
def _fake_params():
| diff --git a/docs/api/utilities.rst b/docs/api/utilities.rst
index fbc1b497e..a7d423021 100644
--- a/docs/api/utilities.rst
+++ b/docs/api/utilities.rst
@@ -106,6 +106,7 @@ Tree
tree_ones_like
tree_random_like
tree_scalar_mul
+ tree_set
tree_sub
tree_sum
tree_vdot
@@ -155,6 +156,10 @@ Tree scalar multiply
~~~~~~~~~~~~~~~~~~~~
.. autofunction:: tree_scalar_mul
+Set values in a tree
+~~~~~~~~~~~~~~~~~~~~
+.. autofunction:: tree_set
+
Tree subtract
~~~~~~~~~~~~~
.. autofunction:: tree_sub
| [
{
"components": [
{
"doc": "Creates a copy of tree with some leaves replaced as specified by kwargs.\n\nRaises a ``KeyError`` if some keys in ``kwargs`` are not present in the tree.\n\nExamples:\n >>> import jax.numpy as jnp\n >>> import optax\n >>> params = jnp.array([1., 2., 3.])\n >>> opt =... | [
"optax/tree_utils/_state_utils_test.py::StateUtilsTest::test_tree_get_all_with_path",
"optax/tree_utils/_state_utils_test.py::StateUtilsTest::test_tree_set"
] | [
"optax/tree_utils/_state_utils_test.py::StateUtilsTest::test_adam",
"optax/tree_utils/_state_utils_test.py::StateUtilsTest::test_dict_based_optimizers",
"optax/tree_utils/_state_utils_test.py::StateUtilsTest::test_inject_hparams",
"optax/tree_utils/_state_utils_test.py::StateUtilsTest::test_map_non_params_to_... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Utility to set value in a pytree (and so in state)
Utility to set value in a pytree (and so in state)
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in optax/tree_utils/_state_utils.py]
(definition of tree_set:)
def tree_set(tree: base.PyTree, **kwargs: Any) -> base.PyTree:
"""Creates a copy of tree with some leaves replaced as specified by kwargs.
Raises a ``KeyError`` if some keys in ``kwargs`` are not present in the tree.
Examples:
>>> import jax.numpy as jnp
>>> import optax
>>> params = jnp.array([1., 2., 3.])
>>> opt = optax.inject_hyperparams(optax.adam)(learning_rate=1.)
>>> state = opt.init(params)
>>> new_state = optax.tree_utils.tree_set(state, learning_rate=2.)
>>> lr = optax.tree_utils.tree_get(new_state, 'learning_rate')
>>> print(lr)
2.0
Args:
tree: pytree whose values are to be replaced.
**kwargs: dictionary of keys with values to replace in the tree.
Returns:
new_tree
new pytree with the same structure as tree. For each leaf whose
key/name matches a key in ``**kwargs``, their values are set by the
corresponding value in ``**kwargs``.
Raises:
KeyError: If no values of some key in ``**kwargs`` are found in ``tree``.
ValueError: If the input tree is flat, i.e., it is not a tuple/list/dict."""
(definition of tree_set._replace:)
def _replace(path, value):
"""Replace a value in tree if key from path matches some key in kwargs."""
[end of new definitions in optax/tree_utils/_state_utils.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 1e08bccf195ac54e7d9d766eb5e69345bf0e3230 | |
tobymao__sqlglot-3133 | 3,133 | tobymao/sqlglot | null | 4bf862ba5294ac0f319487b33b7f90ff718509f4 | 2024-03-13T12:16:10Z | diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index 1815477928..a18cfbed48 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -91,14 +91,6 @@ def _build_if_from_nullifzero(args: t.List) -> exp.If:
return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
-def _datatype_sql(self: Snowflake.Generator, expression: exp.DataType) -> str:
- if expression.is_type("array"):
- return "ARRAY"
- elif expression.is_type("map"):
- return "OBJECT"
- return self.datatype_sql(expression)
-
-
def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
flag = expression.text("flag")
@@ -252,6 +244,25 @@ def _unqualify_unpivot_columns(expression: exp.Expression) -> exp.Expression:
return expression
+def _flatten_structured_types_unless_iceberg(expression: exp.Expression) -> exp.Expression:
+ assert isinstance(expression, exp.Create)
+
+ def _flatten_structured_type(expression: exp.DataType) -> exp.DataType:
+ if expression.this in exp.DataType.NESTED_TYPES:
+ expression.set("expressions", None)
+ return expression
+
+ props = expression.args.get("properties")
+ if isinstance(expression.this, exp.Schema) and not (props and props.find(exp.IcebergProperty)):
+ for schema_expression in expression.this.expressions:
+ if isinstance(schema_expression, exp.ColumnDef):
+ column_type = schema_expression.kind
+ if isinstance(column_type, exp.DataType):
+ column_type.transform(_flatten_structured_type, copy=False)
+
+ return expression
+
+
class Snowflake(Dialect):
# https://docs.snowflake.com/en/sql-reference/identifiers-syntax
NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
@@ -707,6 +718,7 @@ class Generator(generator.Generator):
LIMIT_ONLY_LITERALS = True
JSON_KEY_VALUE_PAIR_SEP = ","
INSERT_OVERWRITE = " OVERWRITE INTO"
+ STRUCT_DELIMITER = ("(", ")")
TRANSFORMS = {
**generator.Generator.TRANSFORMS,
@@ -719,10 +731,10 @@ class Generator(generator.Generator):
"CONVERT_TIMEZONE", e.args.get("zone"), e.this
),
exp.BitwiseXor: rename_func("BITXOR"),
+ exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
exp.DateAdd: date_delta_sql("DATEADD"),
exp.DateDiff: date_delta_sql("DATEDIFF"),
exp.DateStrToDate: datestrtodate_sql,
- exp.DataType: _datatype_sql,
exp.DayOfMonth: rename_func("DAYOFMONTH"),
exp.DayOfWeek: rename_func("DAYOFWEEK"),
exp.DayOfYear: rename_func("DAYOFYEAR"),
@@ -800,6 +812,8 @@ class Generator(generator.Generator):
TYPE_MAPPING = {
**generator.Generator.TYPE_MAPPING,
+ exp.DataType.Type.NESTED: "OBJECT",
+ exp.DataType.Type.STRUCT: "OBJECT",
exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
}
@@ -814,6 +828,18 @@ class Generator(generator.Generator):
exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
}
+ def datatype_sql(self, expression: exp.DataType) -> str:
+ expressions = expression.expressions
+ if (
+ expressions
+ and expression.is_type(*exp.DataType.STRUCT_TYPES)
+ and any(isinstance(field_type, exp.DataType) for field_type in expressions)
+ ):
+ # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
+ return "OBJECT"
+
+ return super().datatype_sql(expression)
+
def tonumber_sql(self, expression: exp.ToNumber) -> str:
return self.func(
"TO_NUMBER",
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index ea5c72444b..299f7c0e30 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -1762,6 +1762,7 @@ class Drop(Expression):
arg_types = {
"this": False,
"kind": False,
+ "expressions": False,
"exists": False,
"temporary": False,
"materialized": False,
@@ -2318,6 +2319,10 @@ class GlobalProperty(Property):
arg_types = {}
+class IcebergProperty(Property):
+ arg_types = {}
+
+
class InheritsProperty(Property):
arg_types = {"expressions": True}
@@ -3838,6 +3843,18 @@ class Type(AutoName):
XML = auto()
YEAR = auto()
+ STRUCT_TYPES = {
+ Type.NESTED,
+ Type.OBJECT,
+ Type.STRUCT,
+ }
+
+ NESTED_TYPES = {
+ *STRUCT_TYPES,
+ Type.ARRAY,
+ Type.MAP,
+ }
+
TEXT_TYPES = {
Type.CHAR,
Type.NCHAR,
@@ -4685,7 +4702,13 @@ def else_(self, condition: ExpOrStr, copy: bool = True, **opts) -> Case:
class Cast(Func):
- arg_types = {"this": True, "to": True, "format": False, "safe": False}
+ arg_types = {
+ "this": True,
+ "to": True,
+ "format": False,
+ "safe": False,
+ "action": False,
+ }
@property
def name(self) -> str:
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 3f9aec1098..ed3b2c7272 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -95,6 +95,7 @@ class Generator(metaclass=_Generator):
exp.ExternalProperty: lambda *_: "EXTERNAL",
exp.GlobalProperty: lambda *_: "GLOBAL",
exp.HeapProperty: lambda *_: "HEAP",
+ exp.IcebergProperty: lambda *_: "ICEBERG",
exp.InheritsProperty: lambda self, e: f"INHERITS ({self.expressions(e, flat=True)})",
exp.InlineLengthColumnConstraint: lambda self, e: f"INLINE LENGTH {self.sql(e, 'this')}",
exp.InputModelProperty: lambda self, e: f"INPUT{self.sql(e, 'this')}",
@@ -405,6 +406,7 @@ class Generator(metaclass=_Generator):
exp.GlobalProperty: exp.Properties.Location.POST_CREATE,
exp.HeapProperty: exp.Properties.Location.POST_WITH,
exp.InheritsProperty: exp.Properties.Location.POST_SCHEMA,
+ exp.IcebergProperty: exp.Properties.Location.POST_CREATE,
exp.InputModelProperty: exp.Properties.Location.POST_SCHEMA,
exp.IsolatedLoadingProperty: exp.Properties.Location.POST_NAME,
exp.JournalProperty: exp.Properties.Location.POST_NAME,
@@ -1168,6 +1170,8 @@ def delete_sql(self, expression: exp.Delete) -> str:
def drop_sql(self, expression: exp.Drop) -> str:
this = self.sql(expression, "this")
+ expressions = self.expressions(expression, flat=True)
+ expressions = f" ({expressions})" if expressions else ""
kind = expression.args["kind"]
exists_sql = " IF EXISTS " if expression.args.get("exists") else " "
temporary = " TEMPORARY" if expression.args.get("temporary") else ""
@@ -1175,9 +1179,7 @@ def drop_sql(self, expression: exp.Drop) -> str:
cascade = " CASCADE" if expression.args.get("cascade") else ""
constraints = " CONSTRAINTS" if expression.args.get("constraints") else ""
purge = " PURGE" if expression.args.get("purge") else ""
- return (
- f"DROP{temporary}{materialized} {kind}{exists_sql}{this}{cascade}{constraints}{purge}"
- )
+ return f"DROP{temporary}{materialized} {kind}{exists_sql}{this}{expressions}{cascade}{constraints}{purge}"
def except_sql(self, expression: exp.Except) -> str:
return self.set_operations(expression)
@@ -2799,7 +2801,9 @@ def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) ->
format_sql = f" FORMAT {format_sql}" if format_sql else ""
to_sql = self.sql(expression, "to")
to_sql = f" {to_sql}" if to_sql else ""
- return f"{safe_prefix or ''}CAST({self.sql(expression, 'this')} AS{to_sql}{format_sql})"
+ action = self.sql(expression, "action")
+ action = f" {action}" if action else ""
+ return f"{safe_prefix or ''}CAST({self.sql(expression, 'this')} AS{to_sql}{format_sql}{action})"
def currentdate_sql(self, expression: exp.CurrentDate) -> str:
zone = self.sql(expression, "this")
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 31dcca8c5e..85422b3c7d 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -144,6 +144,7 @@ class Parser(metaclass=_Parser):
STRUCT_TYPE_TOKENS = {
TokenType.NESTED,
+ TokenType.OBJECT,
TokenType.STRUCT,
}
@@ -748,6 +749,7 @@ class Parser(metaclass=_Parser):
"FREESPACE": lambda self: self._parse_freespace(),
"GLOBAL": lambda self: self.expression(exp.GlobalProperty),
"HEAP": lambda self: self.expression(exp.HeapProperty),
+ "ICEBERG": lambda self: self.expression(exp.IcebergProperty),
"IMMUTABLE": lambda self: self.expression(
exp.StabilityProperty, this=exp.Literal.string("IMMUTABLE")
),
@@ -1018,6 +1020,8 @@ class Parser(metaclass=_Parser):
USABLES: OPTIONS_TYPE = dict.fromkeys(("ROLE", "WAREHOUSE", "DATABASE", "SCHEMA"), tuple())
+ CAST_ACTIONS: OPTIONS_TYPE = dict.fromkeys(("RENAME", "ADD"), ("FIELDS",))
+
INSERT_ALTERNATIVES = {"ABORT", "FAIL", "IGNORE", "REPLACE", "ROLLBACK"}
CLONE_KEYWORDS = {"CLONE", "COPY"}
@@ -1428,13 +1432,22 @@ def _parse_drop(self, exists: bool = False) -> exp.Drop | exp.Command:
if not kind:
return self._parse_as_command(start)
+ if_exists = exists or self._parse_exists()
+ table = self._parse_table_parts(
+ schema=True, is_db_reference=self._prev.token_type == TokenType.SCHEMA
+ )
+
+ if self._match(TokenType.L_PAREN, advance=False):
+ expressions = self._parse_wrapped_csv(self._parse_types)
+ else:
+ expressions = None
+
return self.expression(
exp.Drop,
comments=start.comments,
- exists=exists or self._parse_exists(),
- this=self._parse_table(
- schema=True, is_db_reference=self._prev.token_type == TokenType.SCHEMA
- ),
+ exists=if_exists,
+ this=table,
+ expressions=expressions,
kind=kind,
temporary=temporary,
materialized=materialized,
@@ -4860,7 +4873,12 @@ def _parse_cast(self, strict: bool, safe: t.Optional[bool] = None) -> exp.Expres
to = self.expression(exp.CharacterSet, this=self._parse_var_or_string())
return self.expression(
- exp.Cast if strict else exp.TryCast, this=this, to=to, format=fmt, safe=safe
+ exp.Cast if strict else exp.TryCast,
+ this=this,
+ to=to,
+ format=fmt,
+ safe=safe,
+ action=self._parse_var_from_options(self.CAST_ACTIONS, raise_unmatched=False),
)
def _parse_string_agg(self) -> exp.Expression:
| diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py
index eb2ef3a582..a9ef96ee0f 100644
--- a/tests/dialects/test_bigquery.py
+++ b/tests/dialects/test_bigquery.py
@@ -662,6 +662,13 @@ def test_bigquery(self):
"duckdb": "SELECT {'y': ARRAY(SELECT {'b': 1} FROM x)} FROM z",
},
)
+ self.validate_all(
+ "SELECT CAST(STRUCT(1) AS STRUCT<INT64>)",
+ write={
+ "bigquery": "SELECT CAST(STRUCT(1) AS STRUCT<INT64>)",
+ "snowflake": "SELECT CAST(OBJECT_CONSTRUCT('_0', 1) AS OBJECT)",
+ },
+ )
self.validate_all(
"cast(x as date format 'MM/DD/YYYY')",
write={
diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py
index 306496b7b6..8362ed0902 100644
--- a/tests/dialects/test_duckdb.py
+++ b/tests/dialects/test_duckdb.py
@@ -972,7 +972,7 @@ def test_cast(self):
"hive": "CAST(COL AS ARRAY<BIGINT>)",
"spark": "CAST(COL AS ARRAY<BIGINT>)",
"postgres": "CAST(COL AS BIGINT[])",
- "snowflake": "CAST(COL AS ARRAY)",
+ "snowflake": "CAST(COL AS ARRAY(BIGINT))",
},
)
self.validate_all(
diff --git a/tests/dialects/test_presto.py b/tests/dialects/test_presto.py
index 2ea595e6d3..99b1fc0829 100644
--- a/tests/dialects/test_presto.py
+++ b/tests/dialects/test_presto.py
@@ -63,7 +63,7 @@ def test_cast(self):
"duckdb": "CAST(a AS INT[])",
"presto": "CAST(a AS ARRAY(INTEGER))",
"spark": "CAST(a AS ARRAY<INT>)",
- "snowflake": "CAST(a AS ARRAY)",
+ "snowflake": "CAST(a AS ARRAY(INT))",
},
)
self.validate_all(
@@ -82,18 +82,17 @@ def test_cast(self):
"duckdb": "CAST([1, 2] AS BIGINT[])",
"presto": "CAST(ARRAY[1, 2] AS ARRAY(BIGINT))",
"spark": "CAST(ARRAY(1, 2) AS ARRAY<BIGINT>)",
- "snowflake": "CAST([1, 2] AS ARRAY)",
+ "snowflake": "CAST([1, 2] AS ARRAY(BIGINT))",
},
)
self.validate_all(
- "CAST(MAP(ARRAY[1], ARRAY[1]) AS MAP(INT,INT))",
+ "CAST(MAP(ARRAY['key'], ARRAY[1]) AS MAP(VARCHAR, INT))",
write={
- "bigquery": "CAST(MAP([1], [1]) AS MAP<INT64, INT64>)",
- "duckdb": "CAST(MAP([1], [1]) AS MAP(INT, INT))",
- "presto": "CAST(MAP(ARRAY[1], ARRAY[1]) AS MAP(INTEGER, INTEGER))",
- "hive": "CAST(MAP(1, 1) AS MAP<INT, INT>)",
- "spark": "CAST(MAP_FROM_ARRAYS(ARRAY(1), ARRAY(1)) AS MAP<INT, INT>)",
- "snowflake": "CAST(OBJECT_CONSTRUCT(1, 1) AS OBJECT)",
+ "duckdb": "CAST(MAP(['key'], [1]) AS MAP(TEXT, INT))",
+ "presto": "CAST(MAP(ARRAY['key'], ARRAY[1]) AS MAP(VARCHAR, INTEGER))",
+ "hive": "CAST(MAP('key', 1) AS MAP<STRING, INT>)",
+ "snowflake": "CAST(OBJECT_CONSTRUCT('key', 1) AS MAP(VARCHAR, INT))",
+ "spark": "CAST(MAP_FROM_ARRAYS(ARRAY('key'), ARRAY(1)) AS MAP<STRING, INT>)",
},
)
self.validate_all(
@@ -104,7 +103,7 @@ def test_cast(self):
"presto": "CAST(MAP(ARRAY['a', 'b', 'c'], ARRAY[ARRAY[1], ARRAY[2], ARRAY[3]]) AS MAP(VARCHAR, ARRAY(INTEGER)))",
"hive": "CAST(MAP('a', ARRAY(1), 'b', ARRAY(2), 'c', ARRAY(3)) AS MAP<STRING, ARRAY<INT>>)",
"spark": "CAST(MAP_FROM_ARRAYS(ARRAY('a', 'b', 'c'), ARRAY(ARRAY(1), ARRAY(2), ARRAY(3))) AS MAP<STRING, ARRAY<INT>>)",
- "snowflake": "CAST(OBJECT_CONSTRUCT('a', [1], 'b', [2], 'c', [3]) AS OBJECT)",
+ "snowflake": "CAST(OBJECT_CONSTRUCT('a', [1], 'b', [2], 'c', [3]) AS MAP(VARCHAR, ARRAY(INT)))",
},
)
self.validate_all(
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index 9d5a93be11..22c3fd53d5 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -40,6 +40,11 @@ def test_snowflake(self):
)""",
)
+ self.validate_identity("SELECT CAST(OBJECT_CONSTRUCT('a', 1) AS MAP(VARCHAR, INT))")
+ self.validate_identity("SELECT CAST(OBJECT_CONSTRUCT('a', 1) AS OBJECT(a CHAR NOT NULL))")
+ self.validate_identity("SELECT CAST([1, 2, 3] AS ARRAY(INT))")
+ self.validate_identity("SELECT CAST(obj AS OBJECT(x CHAR) RENAME FIELDS)")
+ self.validate_identity("SELECT CAST(obj AS OBJECT(x CHAR, y VARCHAR) ADD FIELDS)")
self.validate_identity("SELECT TO_TIMESTAMP(123.4)").selects[0].assert_is(exp.Anonymous)
self.validate_identity("SELECT TO_TIME(x) FROM t")
self.validate_identity("SELECT TO_TIMESTAMP(x) FROM t")
@@ -1068,6 +1073,9 @@ def test_ddl(self):
self.validate_identity("CREATE SCHEMA mytestschema_clone CLONE testschema")
self.validate_identity("CREATE TABLE IDENTIFIER('foo') (COLUMN1 VARCHAR, COLUMN2 VARCHAR)")
self.validate_identity("CREATE TABLE IDENTIFIER($foo) (col1 VARCHAR, col2 VARCHAR)")
+ self.validate_identity(
+ "DROP function my_udf (OBJECT(city VARCHAR, zipcode DECIMAL, val ARRAY(BOOLEAN)))"
+ )
self.validate_identity(
"CREATE TABLE orders_clone_restore CLONE orders AT (TIMESTAMP => TO_TIMESTAMP_TZ('04/05/2013 01:02:03', 'mm/dd/yyyy hh24:mi:ss'))"
)
@@ -1083,6 +1091,17 @@ def test_ddl(self):
self.validate_identity(
"CREATE OR REPLACE TABLE EXAMPLE_DB.DEMO.USERS (ID DECIMAL(38, 0) NOT NULL, PRIMARY KEY (ID), FOREIGN KEY (CITY_CODE) REFERENCES EXAMPLE_DB.DEMO.CITIES (CITY_CODE))"
)
+ self.validate_identity(
+ "CREATE ICEBERG TABLE my_iceberg_table (amount ARRAY(INT)) CATALOG='SNOWFLAKE' EXTERNAL_VOLUME='my_external_volume' BASE_LOCATION='my/relative/path/from/extvol'"
+ )
+ self.validate_identity(
+ "CREATE OR REPLACE FUNCTION my_udf(location OBJECT(city VARCHAR, zipcode DECIMAL, val ARRAY(BOOLEAN))) RETURNS VARCHAR AS $$ SELECT 'foo' $$",
+ "CREATE OR REPLACE FUNCTION my_udf(location OBJECT(city VARCHAR, zipcode DECIMAL, val ARRAY(BOOLEAN))) RETURNS VARCHAR AS ' SELECT \\'foo\\' '",
+ )
+ self.validate_identity(
+ "CREATE OR REPLACE FUNCTION my_udtf(foo BOOLEAN) RETURNS TABLE(col1 ARRAY(INT)) AS $$ WITH t AS (SELECT CAST([1, 2, 3] AS ARRAY(INT)) AS c) SELECT c FROM t $$",
+ "CREATE OR REPLACE FUNCTION my_udtf(foo BOOLEAN) RETURNS TABLE (col1 ARRAY(INT)) AS ' WITH t AS (SELECT CAST([1, 2, 3] AS ARRAY(INT)) AS c) SELECT c FROM t '",
+ )
self.validate_all(
"CREATE TABLE orders_clone CLONE orders",
| [] | [
"tests/dialects/test_bigquery.py::TestBigQuery::test_bigquery",
"tests/dialects/test_duckdb.py::TestDuckDB::test_cast",
"tests/dialects/test_presto.py::TestPresto::test_cast",
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake"
] | [
"tests/dialects/test_bigquery.py::TestBigQuery::test_errors",
"tests/dialects/test_bigquery.py::TestBigQuery::test_group_concat",
"tests/dialects/test_bigquery.py::TestBigQuery::test_json_object",
"tests/dialects/test_bigquery.py::TestBigQuery::test_merge",
"tests/dialects/test_bigquery.py::TestBigQuery::te... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Feat!(snowflake): iceberg ddl, structured types, cast rename/keep fields, drop fix
Snowflake added support Iceberg tables and structured types, so now `ARRAY(INT)` is valid. This PR:
- Adds support for `CREATE ICEBERG TABLE ...`
- Fixes the generation of structured types
- Adds support for `CAST( <source_expr> AS <target_data_type> [ RENAME FIELDS | ADD FIELDS ] )`
- Improves transpilation of `STRUCT`-like types _to_ Snowflake
- Fixes an issue with `DROP` - we weren't parsing the data type list properly
References:
- https://docs.snowflake.com/en/sql-reference/functions/cast
- https://docs.snowflake.com/en/user-guide/tables-iceberg-data-types
- https://docs.snowflake.com/en/sql-reference/data-types-structured#constructing-structured-arrays-structured-objects-and-maps
- https://docs.snowflake.com/en/sql-reference/sql/create-iceberg-table
- https://docs.snowflake.com/en/sql-reference/sql/drop-function
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | ceb42fabad60312699e4b15936aeebac00e22e4d | ||
EleutherAI__lm-evaluation-harness-1566 | 1,566 | EleutherAI/lm-evaluation-harness | null | 49695e8d94c3ab011b7ae8814d809de30b1b1182 | 2024-03-12T17:35:39Z | diff --git a/lm_eval/__main__.py b/lm_eval/__main__.py
index 489c1662d41..18c243d431d 100644
--- a/lm_eval/__main__.py
+++ b/lm_eval/__main__.py
@@ -53,13 +53,30 @@ def parse_value(item):
return items
-def parse_eval_args() -> argparse.Namespace:
+def check_argument_types(parser: argparse.ArgumentParser):
+ """
+ Check to make sure all CLI args are typed, raises error if not
+ """
+ for action in parser._actions:
+ if action.dest != "help" and not action.const:
+ if action.type is None:
+ raise ValueError(
+ f"Argument '{action.dest}' doesn't have a type specified."
+ )
+ else:
+ continue
+
+
+def setup_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
- parser.add_argument("--model", "-m", default="hf", help="Name of model e.g. `hf`")
+ parser.add_argument(
+ "--model", "-m", type=str, default="hf", help="Name of model e.g. `hf`"
+ )
parser.add_argument(
"--tasks",
"-t",
default=None,
+ type=str,
metavar="task1,task2",
help="To get full list of tasks, use the command lm-eval --tasks list",
)
@@ -67,6 +84,7 @@ def parse_eval_args() -> argparse.Namespace:
"--model_args",
"-a",
default="",
+ type=str,
help="Comma separated string arguments for model, e.g. `pretrained=EleutherAI/pythia-160m,dtype=float32`",
)
parser.add_argument(
@@ -164,6 +182,7 @@ def parse_eval_args() -> argparse.Namespace:
)
parser.add_argument(
"--gen_kwargs",
+ type=dict,
default=None,
help=(
"String arguments for model generation on greedy_until tasks,"
@@ -180,6 +199,7 @@ def parse_eval_args() -> argparse.Namespace:
)
parser.add_argument(
"--wandb_args",
+ type=str,
default="",
help="Comma separated string arguments passed to wandb.init, e.g. `project=lm-eval,job_type=eval",
)
@@ -209,13 +229,19 @@ def parse_eval_args() -> argparse.Namespace:
help="Sets trust_remote_code to True to execute code to create HF Datasets from the Hub",
)
+ return parser
+
+
+def parse_eval_args(parser: argparse.ArgumentParser) -> argparse.Namespace:
+ check_argument_types(parser)
return parser.parse_args()
def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None:
if not args:
# we allow for args to be passed externally, else we parse them ourselves
- args = parse_eval_args()
+ parser = setup_parser()
+ args = parse_eval_args(parser)
if args.wandb_args:
wandb_logger = WandbLogger(**simple_parse_args_string(args.wandb_args))
| diff --git a/tests/test_cli.py b/tests/test_cli.py
new file mode 100644
index 00000000000..feaa7340d6a
--- /dev/null
+++ b/tests/test_cli.py
@@ -0,0 +1,43 @@
+import argparse
+
+import pytest
+
+import lm_eval.__main__
+
+
+def test_cli_parse_error():
+ """
+ Assert error raised if cli args argument doesn't have type
+ """
+ with pytest.raises(ValueError):
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
+ parser.add_argument(
+ "--model", "-m", type=str, default="hf", help="Name of model e.g. `hf`"
+ )
+ parser.add_argument(
+ "--tasks",
+ "-t",
+ default=None,
+ metavar="task1,task2",
+ help="To get full list of tasks, use the command lm-eval --tasks list",
+ )
+ lm_eval.__main__.check_argument_types(parser)
+
+
+def test_cli_parse_no_error():
+ """
+ Assert typed arguments are parsed correctly
+ """
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
+ parser.add_argument(
+ "--model", "-m", type=str, default="hf", help="Name of model e.g. `hf`"
+ )
+ parser.add_argument(
+ "--tasks",
+ "-t",
+ type=str,
+ default=None,
+ metavar="task1,task2",
+ help="To get full list of tasks, use the command lm-eval --tasks list",
+ )
+ lm_eval.__main__.check_argument_types(parser)
| [
{
"components": [
{
"doc": "Check to make sure all CLI args are typed, raises error if not",
"lines": [
56,
67
],
"name": "check_argument_types",
"signature": "def check_argument_types(parser: argparse.ArgumentParser):",
"type": "function... | [
"tests/test_cli.py::test_cli_parse_error",
"tests/test_cli.py::test_cli_parse_no_error"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Proposed approach for testing CLI arg parsing
See discussion here: https://github.com/EleutherAI/lm-evaluation-harness/issues/1518
Here's an approach to start testing CLI argument parsing:
1. Separate out setting up the argument parser in `parse_eval_args` into a separate method, `setup_parser` that gets called in `parse_eval_args`
2. Create unit tests that call the parser for each of the command line arguments
3. Adding specific TypeError exceptions at each argument entrypoint in the `cli_evaluate` method
Let me know what you think about this approach. If it seems reasonable, I'll add the tests for the rest of the methods and exceptions where it's reasonable.
@LSinev @haileyschoelkopf
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in lm_eval/__main__.py]
(definition of check_argument_types:)
def check_argument_types(parser: argparse.ArgumentParser):
"""Check to make sure all CLI args are typed, raises error if not"""
(definition of setup_parser:)
def setup_parser() -> argparse.ArgumentParser:
[end of new definitions in lm_eval/__main__.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | decc533d02222f3b866d9a89263277fe0cc2fcb2 | ||
googleapis__python-aiplatform-3422 | 3,422 | googleapis/python-aiplatform | null | 47d435bce4b5206910e77df03a99730d62484e5b | 2024-03-12T17:22:31Z | diff --git a/samples/model-builder/vector_search/vector_search_create_index_endpoint_sample.py b/samples/model-builder/vector_search/vector_search_create_index_endpoint_sample.py
new file mode 100644
index 0000000000..43d809ee8f
--- /dev/null
+++ b/samples/model-builder/vector_search/vector_search_create_index_endpoint_sample.py
@@ -0,0 +1,42 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from google.cloud import aiplatform
+
+
+# [START aiplatform_sdk_vector_search_create_index_endpoint_sample]
+def vector_search_create_index_endpoint(
+ project: str, location: str, display_name: str
+) -> None:
+ """Create a vector search index endpoint.
+
+ Args:
+ project (str): Required. Project ID
+ location (str): Required. The region name
+ display_name (str): Required. The index endpoint display name
+ """
+ # Initialize the Vertex AI client
+ aiplatform.init(project=project, location=location)
+
+ # Create Index Endpoint
+ index_endpoint = aiplatform.MatchingEngineIndexEndpoint.create(
+ display_name=display_name,
+ public_endpoint_enabled=True,
+ description="Matching Engine Index Endpoint",
+ )
+
+ print(index_endpoint.name)
+
+
+# [END aiplatform_sdk_vector_search_create_index_endpoint_sample]
diff --git a/samples/model-builder/vector_search/vector_search_create_index_sample.py b/samples/model-builder/vector_search/vector_search_create_index_sample.py
new file mode 100644
index 0000000000..6a2313cd2a
--- /dev/null
+++ b/samples/model-builder/vector_search/vector_search_create_index_sample.py
@@ -0,0 +1,50 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Optional
+
+from google.cloud import aiplatform
+
+
+# [START aiplatform_sdk_vector_search_create_index_sample]
+def vector_search_create_index(
+ project: str, location: str, display_name: str, gcs_uri: Optional[str] = None
+) -> None:
+ """Create a vector search index.
+
+ Args:
+ project (str): Required. Project ID
+ location (str): Required. The region name
+ display_name (str): Required. The index display name
+ gcs_uri (str): Optional. The Google Cloud Storage uri for index content
+ """
+ # Initialize the Vertex AI client
+ aiplatform.init(project=project, location=location, staging_bucket=gcs_uri)
+
+ # Create Index
+ index = aiplatform.MatchingEngineIndex.create_tree_ah_index(
+ display_name=display_name,
+ description="Matching Engine Index",
+ dimensions=100,
+ approximate_neighbors_count=150,
+ leaf_node_embedding_count=500,
+ leaf_nodes_to_search_percent=7,
+ index_update_method="batch_update", # Options: stream_update, batch_update
+ distance_measure_type=aiplatform.matching_engine.matching_engine_index_config.DistanceMeasureType.DOT_PRODUCT_DISTANCE,
+ )
+
+ print(index.name)
+
+
+# [END aiplatform_sdk_vector_search_create_index_sample]
diff --git a/samples/model-builder/vector_search/vector_search_deploy_index_sample.py b/samples/model-builder/vector_search/vector_search_deploy_index_sample.py
new file mode 100644
index 0000000000..adafef39d4
--- /dev/null
+++ b/samples/model-builder/vector_search/vector_search_deploy_index_sample.py
@@ -0,0 +1,57 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from google.cloud import aiplatform
+
+
+# [START aiplatform_sdk_vector_search_deploy_index_sample]
+def vector_search_deploy_index(
+ project: str,
+ location: str,
+ index_name: str,
+ index_endpoint_name: str,
+ deployed_index_id: str,
+) -> None:
+ """Deploy a vector search index to a vector search index endpoint.
+
+ Args:
+ project (str): Required. Project ID
+ location (str): Required. The region name
+ index_name (str): Required. The index to update. A fully-qualified index
+ resource name or a index ID. Example:
+ "projects/123/locations/us-central1/indexes/my_index_id" or
+ "my_index_id".
+ index_endpoint_name (str): Required. Index endpoint to deploy the index to.
+ deployed_index_id (str): Required. The user specified ID of the DeployedIndex.
+ """
+ # Initialize the Vertex AI client
+ aiplatform.init(project=project, location=location)
+
+ # Create the index instance from an existing index
+ index = aiplatform.MatchingEngineIndex(index_name=index_name)
+
+ # Create the index endpoint instance from an existing endpoint.
+ index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
+ index_endpoint_name=index_endpoint_name
+ )
+
+ # Deploy Index to Endpoint
+ index_endpoint = index_endpoint.deploy_index(
+ index=index, deployed_index_id=deployed_index_id
+ )
+
+ print(index_endpoint.deployed_indexes)
+
+
+# [END aiplatform_sdk_vector_search_deploy_index_sample]
diff --git a/samples/model-builder/vector_search/vector_search_stream_update_sample.py b/samples/model-builder/vector_search/vector_search_stream_update_sample.py
index 03ab377878..fce88ab3fc 100644
--- a/samples/model-builder/vector_search/vector_search_stream_update_sample.py
+++ b/samples/model-builder/vector_search/vector_search_stream_update_sample.py
@@ -28,7 +28,7 @@ def stream_update_vector_search_index(
location (str): Required. The region name, e.g. "us-central1"
index_name (str): Required. The index to update. A fully-qualified index
resource name or a index ID. Example:
- "projects/123/locations/us-central1/ indexes/my_index_id" or
+ "projects/123/locations/us-central1/indexes/my_index_id" or
"my_index_id".
datapoints: Sequence[dict]: Required. The datapoints to be updated. The dict
element should be of the IndexDatapoint type.
| diff --git a/samples/model-builder/conftest.py b/samples/model-builder/conftest.py
index 750e682e20..398f3ac7bc 100644
--- a/samples/model-builder/conftest.py
+++ b/samples/model-builder/conftest.py
@@ -1248,8 +1248,31 @@ def mock_index_endpoint_init(mock_index_endpoint):
@pytest.fixture
def mock_index_endpoint_find_neighbors(mock_index_endpoint):
- with patch.object(
- mock_index_endpoint, "find_neighbors"
- ) as mock_find_neighbors:
+ with patch.object(mock_index_endpoint, "find_neighbors") as mock_find_neighbors:
mock_find_neighbors.return_value = None
yield mock_find_neighbors
+
+
+@pytest.fixture
+def mock_index_create_tree_ah_index(mock_index):
+ with patch.object(
+ aiplatform.MatchingEngineIndex, "create_tree_ah_index"
+ ) as mock_create_tree_ah_index:
+ mock_create_tree_ah_index.return_value = mock_index
+ yield mock_create_tree_ah_index
+
+
+@pytest.fixture
+def mock_index_endpoint_create(mock_index_endpoint):
+ with patch.object(
+ aiplatform.MatchingEngineIndexEndpoint, "create"
+ ) as mock_index_endpoint_create:
+ mock_index_endpoint_create.return_value = mock_index_endpoint
+ yield mock_index_endpoint_create
+
+
+@pytest.fixture
+def mock_index_endpoint_deploy_index(mock_index_endpoint):
+ with patch.object(mock_index_endpoint, "deploy_index") as mock_deploy_index:
+ mock_deploy_index.return_value = mock_index_endpoint
+ yield mock_deploy_index
diff --git a/samples/model-builder/test_constants.py b/samples/model-builder/test_constants.py
index 924d888746..210256913d 100644
--- a/samples/model-builder/test_constants.py
+++ b/samples/model-builder/test_constants.py
@@ -347,3 +347,6 @@
VECTOR_SEARCH_INDEX_ENDPOINT = "456"
VECTOR_SEARCH_DEPLOYED_INDEX_ID = "789"
VECTOR_SERACH_INDEX_QUERIES = [[0.1]]
+VECTOR_SEARCH_INDEX_DISPLAY_NAME = "my-vector-search-index"
+VECTOR_SEARCH_GCS_URI = "gs://fake-dir"
+VECTOR_SEARCH_INDEX_ENDPOINT_DISPLAY_NAME = "my-vector-search-index-endpoint"
diff --git a/samples/model-builder/vector_search/vector_search_create_index_endpoint_sample_test.py b/samples/model-builder/vector_search/vector_search_create_index_endpoint_sample_test.py
new file mode 100644
index 0000000000..d7b576dc75
--- /dev/null
+++ b/samples/model-builder/vector_search/vector_search_create_index_endpoint_sample_test.py
@@ -0,0 +1,42 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from unittest.mock import ANY
+
+import test_constants as constants
+from vector_search import vector_search_create_index_endpoint_sample
+
+
+def test_vector_search_create_index_endpoint_sample(
+ mock_sdk_init,
+ mock_index_endpoint_create,
+):
+ vector_search_create_index_endpoint_sample.vector_search_create_index_endpoint(
+ project=constants.PROJECT,
+ location=constants.LOCATION,
+ display_name=constants.VECTOR_SEARCH_INDEX_ENDPOINT_DISPLAY_NAME,
+ )
+
+ # Check client initialization
+ mock_sdk_init.assert_called_with(
+ project=constants.PROJECT,
+ location=constants.LOCATION,
+ )
+
+ # Check index creation
+ mock_index_endpoint_create.assert_called_with(
+ display_name=constants.VECTOR_SEARCH_INDEX_ENDPOINT_DISPLAY_NAME,
+ public_endpoint_enabled=True,
+ description=ANY,
+ )
diff --git a/samples/model-builder/vector_search/vector_search_create_index_sample_test.py b/samples/model-builder/vector_search/vector_search_create_index_sample_test.py
new file mode 100644
index 0000000000..32cef3ced9
--- /dev/null
+++ b/samples/model-builder/vector_search/vector_search_create_index_sample_test.py
@@ -0,0 +1,49 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from unittest.mock import ANY
+
+import test_constants as constants
+from vector_search import vector_search_create_index_sample
+
+
+def test_vector_search_create_index_sample(
+ mock_sdk_init,
+ mock_index_create_tree_ah_index,
+):
+ vector_search_create_index_sample.vector_search_create_index(
+ project=constants.PROJECT,
+ location=constants.LOCATION,
+ display_name=constants.VECTOR_SEARCH_INDEX_DISPLAY_NAME,
+ gcs_uri=constants.VECTOR_SEARCH_GCS_URI,
+ )
+
+ # Check client initialization
+ mock_sdk_init.assert_called_with(
+ project=constants.PROJECT,
+ location=constants.LOCATION,
+ staging_bucket=constants.VECTOR_SEARCH_GCS_URI,
+ )
+
+ # Check index creation
+ mock_index_create_tree_ah_index.assert_called_with(
+ display_name=constants.VECTOR_SEARCH_INDEX_DISPLAY_NAME,
+ description=ANY,
+ dimensions=ANY,
+ approximate_neighbors_count=ANY,
+ leaf_node_embedding_count=ANY,
+ leaf_nodes_to_search_percent=ANY,
+ index_update_method=ANY,
+ distance_measure_type=ANY,
+ )
diff --git a/samples/model-builder/vector_search/vector_search_deploy_index_sample_test.py b/samples/model-builder/vector_search/vector_search_deploy_index_sample_test.py
new file mode 100644
index 0000000000..47ee21d759
--- /dev/null
+++ b/samples/model-builder/vector_search/vector_search_deploy_index_sample_test.py
@@ -0,0 +1,50 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import test_constants as constants
+from vector_search import vector_search_deploy_index_sample
+
+
+def test_vector_search_deploy_index_sample(
+ mock_sdk_init,
+ mock_index_init,
+ mock_index_endpoint_init,
+ mock_index_endpoint_deploy_index,
+ mock_index,
+):
+ vector_search_deploy_index_sample.vector_search_deploy_index(
+ project=constants.PROJECT,
+ location=constants.LOCATION,
+ index_name=constants.VECTOR_SEARCH_INDEX,
+ index_endpoint_name=constants.VECTOR_SEARCH_INDEX_ENDPOINT,
+ deployed_index_id=constants.VECTOR_SEARCH_DEPLOYED_INDEX_ID,
+ )
+
+ # Check client initialization
+ mock_sdk_init.assert_called_with(
+ project=constants.PROJECT, location=constants.LOCATION
+ )
+
+ # Check index initialization with right index name
+ mock_index_init.assert_called_with(index_name=constants.VECTOR_SEARCH_INDEX)
+
+ # Check index endpoint initialization with right index endpoint name
+ mock_index_endpoint_init.assert_called_with(
+ index_endpoint_name=constants.VECTOR_SEARCH_INDEX_ENDPOINT
+ )
+ # Check index deployment
+ mock_index_endpoint_deploy_index.assert_called_with(
+ index=mock_index,
+ deployed_index_id=constants.VECTOR_SEARCH_DEPLOYED_INDEX_ID,
+ )
| [
{
"components": [
{
"doc": "Create a vector search index endpoint.\n\nArgs:\n project (str): Required. Project ID\n location (str): Required. The region name\n display_name (str): Required. The index endpoint display name",
"lines": [
19,
39
],
... | [
"samples/model-builder/vector_search/vector_search_create_index_endpoint_sample_test.py::test_vector_search_create_index_endpoint_sample",
"samples/model-builder/vector_search/vector_search_create_index_sample_test.py::test_vector_search_create_index_sample",
"samples/model-builder/vector_search/vector_search_d... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
chore: adding code samples for vector search create index, create index endpoint, deploy index
chore: adding code samples for vector search create index, create index endpoint, deploy index
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in samples/model-builder/vector_search/vector_search_create_index_endpoint_sample.py]
(definition of vector_search_create_index_endpoint:)
def vector_search_create_index_endpoint( project: str, location: str, display_name: str ) -> None:
"""Create a vector search index endpoint.
Args:
project (str): Required. Project ID
location (str): Required. The region name
display_name (str): Required. The index endpoint display name"""
[end of new definitions in samples/model-builder/vector_search/vector_search_create_index_endpoint_sample.py]
[start of new definitions in samples/model-builder/vector_search/vector_search_create_index_sample.py]
(definition of vector_search_create_index:)
def vector_search_create_index( project: str, location: str, display_name: str, gcs_uri: Optional[str] = None ) -> None:
"""Create a vector search index.
Args:
project (str): Required. Project ID
location (str): Required. The region name
display_name (str): Required. The index display name
gcs_uri (str): Optional. The Google Cloud Storage uri for index content"""
[end of new definitions in samples/model-builder/vector_search/vector_search_create_index_sample.py]
[start of new definitions in samples/model-builder/vector_search/vector_search_deploy_index_sample.py]
(definition of vector_search_deploy_index:)
def vector_search_deploy_index( project: str, location: str, index_name: str, index_endpoint_name: str, deployed_index_id: str, ) -> None:
"""Deploy a vector search index to a vector search index endpoint.
Args:
project (str): Required. Project ID
location (str): Required. The region name
index_name (str): Required. The index to update. A fully-qualified index
resource name or a index ID. Example:
"projects/123/locations/us-central1/indexes/my_index_id" or
"my_index_id".
index_endpoint_name (str): Required. Index endpoint to deploy the index to.
deployed_index_id (str): Required. The user specified ID of the DeployedIndex."""
[end of new definitions in samples/model-builder/vector_search/vector_search_deploy_index_sample.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 67358fa6a830eb842f6b52d09061af4a41b54af6 | ||
tobymao__sqlglot-3116 | 3,116 | tobymao/sqlglot | null | c4e7bbfd3d88f3efb1fea806f85091dbe32379cf | 2024-03-11T09:24:51Z | diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 92a9e7a4d0..665f524434 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -1580,6 +1580,15 @@ class EncodeColumnConstraint(ColumnConstraintKind):
pass
+# https://www.postgresql.org/docs/current/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
+class ExcludeColumnConstraint(ColumnConstraintKind):
+ pass
+
+
+class WithOperator(Expression):
+ arg_types = {"this": True, "op": True}
+
+
class GeneratedAsIdentityColumnConstraint(ColumnConstraintKind):
# this: True -> ALWAYS, this: False -> BY DEFAULT
arg_types = {
@@ -1854,14 +1863,22 @@ class Index(Expression):
arg_types = {
"this": False,
"table": False,
- "using": False,
- "where": False,
- "columns": False,
"unique": False,
"primary": False,
"amp": False, # teradata
+ "params": False,
+ }
+
+
+class IndexParameters(Expression):
+ arg_types = {
+ "using": False,
"include": False,
- "partition_by": False, # teradata
+ "columns": False,
+ "with_storage": False,
+ "partition_by": False,
+ "tablespace": False,
+ "where": False,
}
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 9b0623792b..bd96e14909 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -89,6 +89,7 @@ class Generator(metaclass=_Generator):
exp.DateFormatColumnConstraint: lambda self, e: f"FORMAT {self.sql(e, 'this')}",
exp.DefaultColumnConstraint: lambda self, e: f"DEFAULT {self.sql(e, 'this')}",
exp.EncodeColumnConstraint: lambda self, e: f"ENCODE {self.sql(e, 'this')}",
+ exp.ExcludeColumnConstraint: lambda self, e: f"EXCLUDE {self.sql(e, 'this').lstrip()}",
exp.ExecuteAsProperty: lambda self, e: self.naked_property(e),
exp.ExternalProperty: lambda *_: "EXTERNAL",
exp.GlobalProperty: lambda *_: "GLOBAL",
@@ -140,6 +141,7 @@ class Generator(metaclass=_Generator):
exp.VarMap: lambda self, e: self.func("MAP", e.args["keys"], e.args["values"]),
exp.VolatileProperty: lambda *_: "VOLATILE",
exp.WithJournalTableProperty: lambda self, e: f"WITH JOURNAL TABLE={self.sql(e, 'this')}",
+ exp.WithOperator: lambda self, e: f"{self.sql(e, 'this')} WITH {self.sql(e, 'op')}",
}
# Whether null ordering is supported in order by
@@ -1207,17 +1209,9 @@ def hint_sql(self, expression: exp.Hint) -> str:
return f" /*+ {self.expressions(expression, sep=self.QUERY_HINT_SEP).strip()} */"
- def index_sql(self, expression: exp.Index) -> str:
- unique = "UNIQUE " if expression.args.get("unique") else ""
- primary = "PRIMARY " if expression.args.get("primary") else ""
- amp = "AMP " if expression.args.get("amp") else ""
- name = self.sql(expression, "this")
- name = f"{name} " if name else ""
- table = self.sql(expression, "table")
- table = f"{self.INDEX_ON} {table}" if table else ""
+ def indexparameters_sql(self, expression: exp.IndexParameters) -> str:
using = self.sql(expression, "using")
using = f" USING {using}" if using else ""
- index = "INDEX " if not table else ""
columns = self.expressions(expression, key="columns", flat=True)
columns = f"({columns})" if columns else ""
partition_by = self.expressions(expression, key="partition_by", flat=True)
@@ -1226,7 +1220,26 @@ def index_sql(self, expression: exp.Index) -> str:
include = self.expressions(expression, key="include", flat=True)
if include:
include = f" INCLUDE ({include})"
- return f"{unique}{primary}{amp}{index}{name}{table}{using}{columns}{include}{partition_by}{where}"
+ with_storage = self.expressions(expression, key="with_storage", flat=True)
+ with_storage = f" WITH {with_storage}" if with_storage else ""
+ tablespace = self.sql(expression, "tablespace")
+ tablespace = f" USING INDEX TABLESPACE {tablespace}" if tablespace else ""
+
+ return f"{using}{columns}{include}{with_storage}{tablespace}{partition_by}{where}"
+
+ def index_sql(self, expression: exp.Index) -> str:
+ unique = "UNIQUE " if expression.args.get("unique") else ""
+ primary = "PRIMARY " if expression.args.get("primary") else ""
+ amp = "AMP " if expression.args.get("amp") else ""
+ name = self.sql(expression, "this")
+ name = f"{name} " if name else ""
+ table = self.sql(expression, "table")
+ table = f"{self.INDEX_ON} {table}" if table else ""
+
+ index = "INDEX " if not table else ""
+
+ params = self.sql(expression, "params")
+ return f"{unique}{primary}{amp}{index}{name}{table}{params}"
def identifier_sql(self, expression: exp.Identifier) -> str:
text = expression.name
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 5f00054024..24c289500e 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -838,6 +838,9 @@ class Parser(metaclass=_Parser):
exp.DefaultColumnConstraint, this=self._parse_bitwise()
),
"ENCODE": lambda self: self.expression(exp.EncodeColumnConstraint, this=self._parse_var()),
+ "EXCLUDE": lambda self: self.expression(
+ exp.ExcludeColumnConstraint, this=self._parse_index_params()
+ ),
"FOREIGN KEY": lambda self: self._parse_foreign_key(),
"FORMAT": lambda self: self.expression(
exp.DateFormatColumnConstraint, this=self._parse_var_or_string()
@@ -877,7 +880,15 @@ class Parser(metaclass=_Parser):
"RENAME": lambda self: self._parse_alter_table_rename(),
}
- SCHEMA_UNNAMED_CONSTRAINTS = {"CHECK", "FOREIGN KEY", "LIKE", "PRIMARY KEY", "UNIQUE", "PERIOD"}
+ SCHEMA_UNNAMED_CONSTRAINTS = {
+ "CHECK",
+ "EXCLUDE",
+ "FOREIGN KEY",
+ "LIKE",
+ "PERIOD",
+ "PRIMARY KEY",
+ "UNIQUE",
+ }
NO_PAREN_FUNCTION_PARSERS = {
"ANY": lambda self: self.expression(exp.Any, this=self._parse_bitwise()),
@@ -1008,7 +1019,8 @@ class Parser(metaclass=_Parser):
CLONE_KEYWORDS = {"CLONE", "COPY"}
HISTORICAL_DATA_KIND = {"TIMESTAMP", "OFFSET", "STATEMENT", "STREAM"}
- OPCLASS_FOLLOW_KEYWORDS = {"ASC", "DESC", "NULLS"}
+ OPCLASS_FOLLOW_KEYWORDS = {"ASC", "DESC", "NULLS", "WITH"}
+
OPTYPE_FOLLOW_TOKENS = {TokenType.COMMA, TokenType.R_PAREN}
TABLE_INDEX_HINT_TOKENS = {TokenType.FORCE, TokenType.IGNORE, TokenType.USE}
@@ -2841,6 +2853,7 @@ def _parse_join(
def _parse_opclass(self) -> t.Optional[exp.Expression]:
this = self._parse_conjunction()
+
if self._match_texts(self.OPCLASS_FOLLOW_KEYWORDS, advance=False):
return this
@@ -2849,6 +2862,37 @@ def _parse_opclass(self) -> t.Optional[exp.Expression]:
return this
+ def _parse_index_params(self) -> exp.IndexParameters:
+ using = self._parse_var(any_token=True) if self._match(TokenType.USING) else None
+
+ if self._match(TokenType.L_PAREN, advance=False):
+ columns = self._parse_wrapped_csv(self._parse_with_operator)
+ else:
+ columns = None
+
+ include = self._parse_wrapped_id_vars() if self._match_text_seq("INCLUDE") else None
+ partition_by = self._parse_partition_by()
+ with_storage = (
+ self._parse_csv(self._parse_conjunction) if self._match(TokenType.WITH) else None
+ )
+ tablespace = (
+ self._parse_var(any_token=True)
+ if self._match_text_seq("USING", "INDEX", "TABLESPACE")
+ else None
+ )
+ where = self._parse_where()
+
+ return self.expression(
+ exp.IndexParameters,
+ using=using,
+ columns=columns,
+ include=include,
+ partition_by=partition_by,
+ where=where,
+ with_storage=with_storage,
+ tablespace=tablespace,
+ )
+
def _parse_index(
self,
index: t.Optional[exp.Expression] = None,
@@ -2872,27 +2916,16 @@ def _parse_index(
index = self._parse_id_var()
table = None
- using = self._parse_var(any_token=True) if self._match(TokenType.USING) else None
-
- if self._match(TokenType.L_PAREN, advance=False):
- columns = self._parse_wrapped_csv(lambda: self._parse_ordered(self._parse_opclass))
- else:
- columns = None
-
- include = self._parse_wrapped_id_vars() if self._match_text_seq("INCLUDE") else None
+ params = self._parse_index_params()
return self.expression(
exp.Index,
this=index,
table=table,
- using=using,
- columns=columns,
unique=unique,
primary=primary,
amp=amp,
- include=include,
- partition_by=self._parse_partition_by(),
- where=self._parse_where(),
+ params=params,
)
def _parse_table_hints(self) -> t.Optional[t.List[exp.Expression]]:
@@ -6094,3 +6127,13 @@ def _parse_truncate_table(self) -> t.Optional[exp.TruncateTable] | exp.Expressio
option=option,
partition=partition,
)
+
+ def _parse_with_operator(self) -> t.Optional[exp.Expression]:
+ this = self._parse_ordered(self._parse_opclass)
+
+ if not self._match(TokenType.WITH):
+ return this
+
+ op = self._parse_var(any_token=True)
+
+ return self.expression(exp.WithOperator, this=this, op=op)
| diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index 0b87693ea4..45264f44d1 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -130,6 +130,18 @@ def test_postgres(self):
self.validate_identity(
"SELECT * FROM foo, LATERAL (SELECT * FROM bar WHERE bar.id = foo.bar_id) AS ss"
)
+ self.validate_identity(
+ "CREATE TABLE t (vid INT NOT NULL, CONSTRAINT ht_vid_nid_fid_idx EXCLUDE (INT4RANGE(vid, nid) WITH &&, INT4RANGE(fid, fid, '[]') WITH &&))"
+ )
+ self.validate_identity(
+ "CREATE TABLE t (i INT, PRIMARY KEY (i), EXCLUDE USING gist(col varchar_pattern_ops DESC NULLS LAST WITH &&) WITH (sp1 = 1, sp2 = 2))"
+ )
+ self.validate_identity(
+ "CREATE TABLE t (i INT, EXCLUDE USING btree(INT4RANGE(vid, nid, '[]') ASC NULLS FIRST WITH &&) INCLUDE (col1, col2))"
+ )
+ self.validate_identity(
+ "CREATE TABLE t (i INT, EXCLUDE USING gin(col1 WITH &&, col2 WITH ||) USING INDEX TABLESPACE tablespace WHERE (id > 5))"
+ )
self.validate_identity(
"SELECT c.oid, n.nspname, c.relname "
"FROM pg_catalog.pg_class AS c "
| [] | [
"tests/dialects/test_postgres.py::TestPostgres::test_postgres"
] | [
"tests/dialects/test_postgres.py::TestPostgres::test_array_offset",
"tests/dialects/test_postgres.py::TestPostgres::test_bool_or",
"tests/dialects/test_postgres.py::TestPostgres::test_ddl",
"tests/dialects/test_postgres.py::TestPostgres::test_operator",
"tests/dialects/test_postgres.py::TestPostgres::test_r... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat: Adding EXCLUDE constraint support
Fixes #3097
The syntax for the `EXCLUDE` table constraint is the following:
```
[CONSTRAINT constraint_name] EXCLUDE [ USING index_method ] ( exclude_element WITH operator [, ... ] ) index_parameters [ WHERE ( predicate ) ]
```
where:
```
<index_method> ::= identifier (see [1])
<exclude_element> ::= { column_name | ( expression ) } [ opclass ] [ ASC | DESC ] [ NULLS { FIRST | LAST } ]
<opclass> ::= identifier (see [2])
<index_parameters> ::= [ INCLUDE ( column_name [, ... ] ) ]
[ WITH ( storage_parameter [= value] [, ... ] ) ]
[ USING INDEX TABLESPACE tablespace_name ]
```
Design Notes
-----------------
- Added `exp.ExcludeElement` that captures all information from `exclude_element WITH operator [, ... ] `
- This is an exclusively Postgres feature afaik, but the changes are added on the base Dialect.
Docs
----------
1. [Index methods](https://www.postgresql.org/docs/current/indexes-types.html)
2. [Operator class](https://www.postgresql.org/docs/current/indexes-opclass.html)
3. [EXCLUDE constraint](https://www.postgresql.org/docs/current/sql-createtable.html)
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
EXCLUDE indexes support for PostgreSQL dialect
Thanks for adding INCLUDE indexes in #2855, we've updated and now it works. There are [EXCLUDE indexes in PostgreSQL](https://www.postgresql.org/docs/current/sql-createtable.html#SQL-CREATETABLE-EXCLUDE), which would be great to support. Here is the example:
```
Python 3.8.10 (default, Nov 22 2023, 10:22:35)
[GCC 9.4.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import sqlglot
>>> sqlglot.parse_one("""
... CREATE TABLE ht (
... vid INTEGER NOT NULL,
... fid INTEGER NOT NULL,
... nid INTEGER NOT NULL CHECK (nid > vid),
... vop CHAR(1) NOT NULL,
... nop CHAR(1) NOT NULL,
... geom GEOMETRY(POINTZ, 3857),
... fld_i INTEGER,
... fld_t TEXT,
... fld_d DATE,
... PRIMARY KEY (vid, fid),
... CONSTRAINT ht_vid_nid_fid_idx EXCLUDE USING gist (int4range(vid, nid) WITH &&, int4range(fid, fid, '[]') WITH &&)
... )
... """, read="postgres")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/ngw/env/lib/python3.8/site-packages/sqlglot/__init__.py", line 124, in parse_one
result = dialect.parse(sql, **opts)
File "/opt/ngw/env/lib/python3.8/site-packages/sqlglot/dialects/dialect.py", line 477, in parse
return self.parser(**opts).parse(self.tokenize(sql), sql)
File "/opt/ngw/env/lib/python3.8/site-packages/sqlglot/parser.py", line 1099, in parse
return self._parse(
File "/opt/ngw/env/lib/python3.8/site-packages/sqlglot/parser.py", line 1165, in _parse
expressions.append(parse_method(self))
File "/opt/ngw/env/lib/python3.8/site-packages/sqlglot/parser.py", line 1367, in _parse_statement
return self.STATEMENT_PARSERS[self._prev.token_type](self)
File "/opt/ngw/env/lib/python3.8/site-packages/sqlglot/parser.py", line 616, in <lambda>
TokenType.CREATE: lambda self: self._parse_create(),
File "/opt/ngw/env/lib/python3.8/site-packages/sqlglot/parser.py", line 1487, in _parse_create
this = self._parse_schema(this=table_parts)
File "/opt/ngw/env/lib/python3.8/site-packages/sqlglot/parser.py", line 4288, in _parse_schema
args = self._parse_csv(lambda: self._parse_constraint() or self._parse_field_def())
File "/opt/ngw/env/lib/python3.8/site-packages/sqlglot/parser.py", line 5367, in _parse_csv
parse_result = parse_method()
File "/opt/ngw/env/lib/python3.8/site-packages/sqlglot/parser.py", line 4288, in <lambda>
args = self._parse_csv(lambda: self._parse_constraint() or self._parse_field_def())
File "/opt/ngw/env/lib/python3.8/site-packages/sqlglot/parser.py", line 4453, in _parse_constraint
return self.expression(
File "/opt/ngw/env/lib/python3.8/site-packages/sqlglot/parser.py", line 1229, in expression
return self.validate_expression(instance)
File "/opt/ngw/env/lib/python3.8/site-packages/sqlglot/parser.py", line 1249, in validate_expression
self.raise_error(error_message)
File "/opt/ngw/env/lib/python3.8/site-packages/sqlglot/parser.py", line 1209, in raise_error
raise error
sqlglot.errors.ParseError: Required keyword: 'expressions' missing for <class 'sqlglot.expressions.Constraint'>. Line 13, Col: 38.
TEXT,
fld_d DATE,
PRIMARY KEY (vid, fid),
CONSTRAINT ht_vid_nid_fid_idx EXCLUDE USING gist (int4range(vid, nid) WITH &&, int4range(fid, fid, '[]') WITH &&)
)
>>> sqlglot.__version__
'22.2.1'
```
----------
--------------------
</issues> | ceb42fabad60312699e4b15936aeebac00e22e4d | |
tobymao__sqlglot-3101 | 3,101 | tobymao/sqlglot | null | 8a34fb433bc33551febe96665e16668de73e5bd6 | 2024-03-08T03:15:05Z | diff --git a/sqlglot/dialects/tsql.py b/sqlglot/dialects/tsql.py
index b6f491f900..20f27740e6 100644
--- a/sqlglot/dialects/tsql.py
+++ b/sqlglot/dialects/tsql.py
@@ -862,12 +862,12 @@ def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
return rename_func("DATETIMEFROMPARTS")(self, expression)
- def set_operation(self, expression: exp.Union, op: str) -> str:
+ def set_operations(self, expression: exp.Union) -> str:
limit = expression.args.get("limit")
if limit:
return self.sql(expression.limit(limit.pop(), copy=False))
- return super().set_operation(expression, op)
+ return super().set_operations(expression)
def setitem_sql(self, expression: exp.SetItem) -> str:
this = expression.this
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 89597d2389..c95b3b94e4 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -453,6 +453,7 @@ class Generator(metaclass=_Generator):
exp.Insert,
exp.Join,
exp.Select,
+ exp.Union,
exp.Update,
exp.Where,
exp.With,
@@ -1146,10 +1147,7 @@ def drop_sql(self, expression: exp.Drop) -> str:
)
def except_sql(self, expression: exp.Except) -> str:
- return self.prepend_ctes(
- expression,
- self.set_operation(expression, self.except_op(expression)),
- )
+ return self.set_operations(expression)
def except_op(self, expression: exp.Except) -> str:
return f"EXCEPT{'' if expression.args.get('distinct') else ' ALL'}"
@@ -1492,10 +1490,7 @@ def insert_sql(self, expression: exp.Insert) -> str:
return self.prepend_ctes(expression, sql)
def intersect_sql(self, expression: exp.Intersect) -> str:
- return self.prepend_ctes(
- expression,
- self.set_operation(expression, self.intersect_op(expression)),
- )
+ return self.set_operations(expression)
def intersect_op(self, expression: exp.Intersect) -> str:
return f"INTERSECT{'' if expression.args.get('distinct') else ' ALL'}"
@@ -2256,11 +2251,32 @@ def qualify_sql(self, expression: exp.Qualify) -> str:
this = self.indent(self.sql(expression, "this"))
return f"{self.seg('QUALIFY')}{self.sep()}{this}"
+ def set_operations(self, expression: exp.Union) -> str:
+ sqls: t.List[str] = []
+ stack: t.List[t.Union[str, exp.Expression]] = [expression]
+
+ while stack:
+ node = stack.pop()
+
+ if isinstance(node, exp.Union):
+ stack.append(node.expression)
+ stack.append(
+ self.maybe_comment(
+ getattr(self, f"{node.key}_op")(node),
+ expression=node.this,
+ comments=node.comments,
+ )
+ )
+ stack.append(node.this)
+ else:
+ sqls.append(self.sql(node))
+
+ this = self.sep().join(sqls)
+ this = self.query_modifiers(expression, this)
+ return self.prepend_ctes(expression, this)
+
def union_sql(self, expression: exp.Union) -> str:
- return self.prepend_ctes(
- expression,
- self.set_operation(expression, self.union_op(expression)),
- )
+ return self.set_operations(expression)
def union_op(self, expression: exp.Union) -> str:
kind = " DISTINCT" if self.EXPLICIT_UNION else ""
@@ -3172,13 +3188,6 @@ def naked_property(self, expression: exp.Property) -> str:
self.unsupported(f"Unsupported property {expression.__class__.__name__}")
return f"{property_name} {self.sql(expression, 'this')}"
- def set_operation(self, expression: exp.Union, op: str) -> str:
- this = self.maybe_comment(self.sql(expression, "this"), comments=expression.comments)
- op = self.seg(op)
- return self.query_modifiers(
- expression, f"{this}{op}{self.sep()}{self.sql(expression, 'expression')}"
- )
-
def tag_sql(self, expression: exp.Tag) -> str:
return f"{expression.args.get('prefix')}{self.sql(expression.this)}{expression.args.get('postfix')}"
| diff --git a/tests/test_transpile.py b/tests/test_transpile.py
index 49deda9e8d..2354a80c82 100644
--- a/tests/test_transpile.py
+++ b/tests/test_transpile.py
@@ -428,7 +428,8 @@ def test_comments(self):
"""SELECT
'hotel1' AS hotel,
*
-FROM dw_1_dw_1_1.exactonline_1.transactionlines /*
+FROM dw_1_dw_1_1.exactonline_1.transactionlines
+/*
UNION ALL
SELECT
'Thon Partner Hotel Jølster' AS hotel,
| [] | [
"tests/test_transpile.py::TestTranspile::test_comments"
] | [
"tests/test_transpile.py::TestTranspile::test_alias",
"tests/test_transpile.py::TestTranspile::test_alter",
"tests/test_transpile.py::TestTranspile::test_command_identity",
"tests/test_transpile.py::TestTranspile::test_error_level",
"tests/test_transpile.py::TestTranspile::test_extract",
"tests/test_trans... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat: no more recursion for union generation
{}
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | ceb42fabad60312699e4b15936aeebac00e22e4d | ||
tobymao__sqlglot-3089 | 3,089 | tobymao/sqlglot | null | d898f559fac44789da08689e835619f978c05a3e | 2024-03-06T16:35:36Z | diff --git a/sqlglot/dialects/__init__.py b/sqlglot/dialects/__init__.py
index 276ad59cb3..29c6580012 100644
--- a/sqlglot/dialects/__init__.py
+++ b/sqlglot/dialects/__init__.py
@@ -61,6 +61,7 @@ class Generator(Generator):
----
"""
+from sqlglot.dialects.athena import Athena
from sqlglot.dialects.bigquery import BigQuery
from sqlglot.dialects.clickhouse import ClickHouse
from sqlglot.dialects.databricks import Databricks
diff --git a/sqlglot/dialects/athena.py b/sqlglot/dialects/athena.py
new file mode 100644
index 0000000000..dc87d8dcf3
--- /dev/null
+++ b/sqlglot/dialects/athena.py
@@ -0,0 +1,12 @@
+from __future__ import annotations
+
+from sqlglot.dialects.trino import Trino
+from sqlglot.tokens import TokenType
+
+
+class Athena(Trino):
+ class Parser(Trino.Parser):
+ STATEMENT_PARSERS = {
+ **Trino.Parser.STATEMENT_PARSERS,
+ TokenType.USING: lambda self: self._parse_as_command(self._prev),
+ }
diff --git a/sqlglot/dialects/dialect.py b/sqlglot/dialects/dialect.py
index f11c0da20b..d2533ebcf5 100644
--- a/sqlglot/dialects/dialect.py
+++ b/sqlglot/dialects/dialect.py
@@ -31,6 +31,7 @@ class Dialects(str, Enum):
DIALECT = ""
+ ATHENA = "athena"
BIGQUERY = "bigquery"
CLICKHOUSE = "clickhouse"
DATABRICKS = "databricks"
| diff --git a/tests/dialects/test_athena.py b/tests/dialects/test_athena.py
new file mode 100644
index 0000000000..99e36f2155
--- /dev/null
+++ b/tests/dialects/test_athena.py
@@ -0,0 +1,16 @@
+from tests.dialects.test_dialect import Validator
+
+
+class TestAthena(Validator):
+ dialect = "athena"
+ maxDiff = None
+
+ def test_athena(self):
+ self.validate_identity(
+ """USING EXTERNAL FUNCTION some_function(input VARBINARY)
+ RETURNS VARCHAR
+ LAMBDA 'some-name'
+ SELECT
+ some_function(1)""",
+ check_command_warning=True,
+ )
| [] | [
"tests/dialects/test_athena.py::TestAthena::test_athena"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Feat: add Athena dialect
Fixes #3087
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Support User Defined Functions on Athena Dialect
It looks like sqlglot is not able to parse [AWS Athena's user defined functions syntax](https://docs.aws.amazon.com/athena/latest/ug/querying-udf.html):
```py
from sqlglot import parse
from sqlglot.dialects import Trino
parse("""
USING EXTERNAL FUNCTION some_function(input VARBINARY)
RETURNS VARCHAR
LAMBDA 'some-name'
SELECT
some_function(1)
""", dialect=Trino)
```
Exception:
```
sqlglot.errors.ParseError: Invalid expression / Unexpected token. Line 2, Col: 9.
USING EXTERNAL FUNCTION some_function(input VARBINARY)
RETURNS VARCHAR
LAMBDA 'some-name'
```
We are using `Trino` dialect since sqlglot does not have a dedicated one for Athena, as far as I understand, but Athena is based off Trino, so this dialect works otherwise perfectly for our codebase :slightly_smiling_face:
Am I missing something? Does it need a dedicated dialect for Athena?
----------
Hey @sbrandtb, thanks for the report.
AWS Athena is not supported right now, so this behavior is expected as it's not valid _Trino_ syntax, IIUC. Closing this as not planned for now since we don't plan to add Athena in the near future.
@georgesittas Given that Athena is basically Trino, what speaks against deriving a new Athena dialect, starting with UDF syntax? I am willing to help.
We actually just discussed this internally and decided to implement the `Athena` dialect and have this be parsed into a `Command` for now. Should be a good enough compromise to start with. Appreciate your willingness to help here.
So should have a fix for this particular issue soon. Feel free to contribute improvements to Athena if you find more discrepancies in the future.
--------------------
</issues> | ceb42fabad60312699e4b15936aeebac00e22e4d | |
pgmpy__pgmpy-1742 | 1,742 | pgmpy/pgmpy | null | 687e13a812eae747d6fa321ec021ec44021e0e67 | 2024-03-05T10:37:00Z | diff --git a/pgmpy/inference/ExactInference.py b/pgmpy/inference/ExactInference.py
index 7224bac99..0680f7f95 100644
--- a/pgmpy/inference/ExactInference.py
+++ b/pgmpy/inference/ExactInference.py
@@ -1249,7 +1249,7 @@ class BeliefPropagationWithMessageParsing(Inference):
Parameters
----------
model: FactorGraph
- model for which inference is to performed
+ Model on which to run the inference.
References
----------
@@ -1265,17 +1265,23 @@ def __init__(self, model: FactorGraph, check_model=True):
model.check_model()
self.model = model
- def query(self, variables, evidence):
+ def query(self, variables, evidence=None, virtual_evidence=None):
"""
Returns the a dict of posterior distributions for each of the queried `variables`,
- given the `evidence`.
+ given the `evidence` and the `virtual_evidence`.
Parameters
----------
variables: list
- list of variables for which you want to compute the posterior
- evidence: dict
- a dict key, value pair as {var: state_of_var_observed}
+ List of variables for which you want to compute the posterior.
+ evidence: dict or None (default: None)
+ A dict key, value pair as {var: state_of_var_observed}.
+ None if no evidence.
+ virtual_evidence: list or None (default: None)
+ A list of pgmpy.factors.discrete.TabularCPD representing the virtual
+ evidences. Each virtual evidence becomes a virtual message that gets added to
+ the list of computed messages incoming to the variable node.
+ None if no virtual evidence.
Examples
--------
@@ -1306,7 +1312,8 @@ def query(self, variables, evidence):
... )
>>> belief_propagation = BeliefPropagation(factor_graph)
>>> belief_propagation.query(variables=['B', 'C'],
- ... evidence={'A': 1, 'D': 0})
+ ... evidence={'D': 0},
+ ... virtual_evidence=[TabularCPD(['A'], 2, [[0.3], [0.7]])])
"""
common_vars = set(evidence if evidence is not None else []).intersection(
set(variables)
@@ -1316,13 +1323,32 @@ def query(self, variables, evidence):
f"Can't have the same variables in both `variables` and `evidence`. Found in both: {common_vars}"
)
+ # Can't have the same variables in both `evidence` and `virtual_evidence`
+ if evidence is not None and virtual_evidence is not None:
+ self._check_virtual_evidence(virtual_evidence)
+
+ ve_names = self._get_virtual_evidence_var_list(virtual_evidence)
+ common_vars = set(evidence).intersection(set(ve_names))
+ if common_vars:
+ raise ValueError(
+ f"Can't have the same variables in both `evidence` and `virtual_evidence`. Found in both: {common_vars}"
+ )
+
agg_res = {}
for var in variables:
- res = self.schedule_variable_node_messages(var, evidence, None)
+ res = self.schedule_variable_node_messages(
+ var, None, evidence, virtual_evidence
+ )
agg_res[var] = DiscreteFactor([var], [len(res)], res)
return agg_res
- def schedule_variable_node_messages(self, variable, evidence, from_factor):
+ def schedule_variable_node_messages(
+ self,
+ variable,
+ from_factor,
+ evidence,
+ virtual_evidence,
+ ):
"""
Returns the message sent by the variable to the factor requesting it.
For that, the variable requests the messages coming from its neighbouring
@@ -1331,17 +1357,32 @@ def schedule_variable_node_messages(self, variable, evidence, from_factor):
Parameters
----------
variable: str
- the variable node from which to compute the outgoing message
- evidence: dict
- a dict key, value pair as {var: state_of_var_observed}
- from_factor: str
- the factor requesting the message, as part of the recursion.
+ The variable node from which to compute the outgoing message
+ from_factor: pgmpy.factors.discrete.DiscreteFactor or None.
+ The factor requesting the message, as part of the recursion.
None for the first time this function is called.
+ evidence: dict or None
+ A dict key, value pair as {var: state_of_var_observed}.
+ None if no evidence.
+ virtual_evidence: list or None
+ A list of pgmpy.factors.discrete.TabularCPD representing the virtual
+ evidences. Each virtual evidence becomes a virtual message that gets added to
+ the list of computed messages incoming to the variable node.
+ None if no virtual evidence.
"""
- if variable in evidence.keys():
+ if evidence is not None and variable in evidence.keys():
# Is an observed variable
return self.model.get_point_mass_message(variable, evidence[variable])
+ virtual_messages = []
+ if (
+ virtual_evidence is not None
+ and variable in self._get_virtual_evidence_var_list(virtual_evidence)
+ ):
+ virtual_messages = [
+ cpd.values for cpd in virtual_evidence if cpd.variables[0] == variable
+ ]
+
incoming_factors = [
factor
for factor in list(self.model.neighbors(variable))
@@ -1350,19 +1391,23 @@ def schedule_variable_node_messages(self, variable, evidence, from_factor):
if len(incoming_factors) == 0:
# Is an unobserved leaf variable
- return self.calc_variable_node_message(variable, [])
+ return self.calc_variable_node_message(variable, [] + virtual_messages)
else:
# Else, get the incoming messages from all incoming factors
incoming_messages = []
for factor in incoming_factors:
incoming_messages.append(
self.schedule_factor_node_messages(
- factor, evidence, from_variable=variable
+ factor, variable, evidence, virtual_evidence
)
)
- return self.calc_variable_node_message(variable, incoming_messages)
+ return self.calc_variable_node_message(
+ variable, incoming_messages + virtual_messages
+ )
- def schedule_factor_node_messages(self, factor, evidence, from_variable):
+ def schedule_factor_node_messages(
+ self, factor, from_variable, evidence, virtual_evidence
+ ):
"""
Returns the message sent from the factor to the variable requesting it.
For that, the factor requests the messages coming from its neighbouring
@@ -1370,12 +1415,18 @@ def schedule_factor_node_messages(self, factor, evidence, from_variable):
Parameters
----------
- factor: str
- the factor from which we want to compute the outgoing message
- evidence: dict
- a dict key, value pair as {var: state_of_var_observed}
+ factor: pgmpy.factors.discrete.DiscreteFactor
+ The factor from which we want to compute the outgoing message.
from_variable: str
- the variable requesting the message, as part of the recursion.
+ The variable requesting the message, as part of the recursion.
+ evidence: dict
+ A dict key, value pair as {var: state_of_var_observed}.
+ None if no evidence.
+ virtual_evidence: list
+ A list of pgmpy.factors.discrete.TabularCPD representing the virtual
+ evidences. Each virtual evidence becomes a virtual message that gets added to
+ the list of computed messages incoming to the variable node.
+ None if no virtual evidence.
"""
assert from_variable is not None, "from_var must be specified"
@@ -1389,7 +1440,7 @@ def schedule_factor_node_messages(self, factor, evidence, from_variable):
for var in incoming_vars:
incoming_messages.append(
self.schedule_variable_node_messages(
- var, evidence, from_factor=factor
+ var, factor, evidence, virtual_evidence
)
)
return self.calc_factor_node_message(
diff --git a/pgmpy/inference/base.py b/pgmpy/inference/base.py
index 4b952233a..7a6acf483 100644
--- a/pgmpy/inference/base.py
+++ b/pgmpy/inference/base.py
@@ -175,27 +175,23 @@ def _prune_bayesian_model(self, variables, evidence):
return bn, evidence
- def _virtual_evidence(self, virtual_evidence):
+ def _check_virtual_evidence(self, virtual_evidence):
"""
- Modifies the model to incorporate virtual evidence. For each virtual evidence
- variable a binary variable is added as the child of the evidence variable to
- the model. The state 0 probabilities of the child is the evidence.
+ Checks the virtual evidence's format is correct. Each evidence must:
+ - Be a TabularCPD instance
+ - Be targeted to a single variable
+ - Be defined on a variable which is in the model
+ - Have the same cardinality as its corresponding variable in the model
Parameters
----------
- virtual_evidence: dict
- A dict of TabularCPD instances specifying the virtual evidence for each
+ virtual_evidence: list
+ A list of TabularCPD instances specifying the virtual evidence for each
of the evidence variables.
-
- Returns
- -------
- None
-
- References
- ----------
- [1] Mrad, Ali Ben, et al. "Uncertain evidence in Bayesian networks: Presentation and comparison on a simple example." International Conference on Information Processing and Management of Uncertainty in Knowledge-Based Systems. Springer, Berlin, Heidelberg, 2012.
"""
for cpd in virtual_evidence:
+ if not isinstance(cpd, TabularCPD):
+ raise ValueError("Virtual evidence should be a TabularCPD")
var = cpd.variables[0]
if var not in self.model.nodes():
raise ValueError(
@@ -211,6 +207,28 @@ def _virtual_evidence(self, virtual_evidence):
"The number of states/cardinality for the evidence should be same as the number of states/cardinality of the variable in the model"
)
+ def _virtual_evidence(self, virtual_evidence):
+ """
+ Modifies the model to incorporate virtual evidence. For each virtual evidence
+ variable a binary variable is added as the child of the evidence variable to
+ the model. The state 0 probabilities of the child is the evidence.
+
+ Parameters
+ ----------
+ virtual_evidence: list
+ A list of TabularCPD instances specifying the virtual evidence for each
+ of the evidence variables.
+
+ Returns
+ -------
+ None
+
+ References
+ ----------
+ [1] Mrad, Ali Ben, et al. "Uncertain evidence in Bayesian networks: Presentation and comparison on a simple example." International Conference on Information Processing and Management of Uncertainty in Knowledge-Based Systems. Springer, Berlin, Heidelberg, 2012.
+ """
+ self._check_virtual_evidence(virtual_evidence)
+
bn = self.model.copy()
for cpd in virtual_evidence:
var = cpd.variables[0]
@@ -230,3 +248,16 @@ def _virtual_evidence(self, virtual_evidence):
bn.add_cpds(new_cpd)
self.__init__(bn)
+
+ @staticmethod
+ def _get_virtual_evidence_var_list(virtual_evidence):
+ """
+ Returns the list of variables that have a virtual evidence.
+
+ Parameters
+ ----------
+ virtual_evidence: list
+ A list of TabularCPD instances specifying the virtual evidence for each
+ of the evidence variables.
+ """
+ return [cpd.variables[0] for cpd in virtual_evidence]
| diff --git a/pgmpy/tests/test_inference/test_ExactInference.py b/pgmpy/tests/test_inference/test_ExactInference.py
index 52e1d5f35..ee372a108 100644
--- a/pgmpy/tests/test_inference/test_ExactInference.py
+++ b/pgmpy/tests/test_inference/test_ExactInference.py
@@ -1144,11 +1144,11 @@ def setUp(self):
self.belief_propagation = BeliefPropagationWithMessageParsing(self.factor_graph)
def test_query_single_variable(self):
- res = self.belief_propagation.query(["C"], {})
+ res = self.belief_propagation.query(["C"])
assert np.allclose(res["C"].values, np.array([0.217, 0.783]), atol=1e-20)
def test_query_multiple_variable(self):
- res = self.belief_propagation.query(["A", "B", "C", "D"], {})
+ res = self.belief_propagation.query(["A", "B", "C", "D"])
assert np.allclose(res["A"].values, np.array([0.4, 0.6]), atol=1e-20)
assert np.allclose(res["B"].values, np.array([0.11, 0.21, 0.68]), atol=1e-20)
assert np.allclose(res["C"].values, np.array([0.217, 0.783]), atol=1e-20)
@@ -1171,3 +1171,48 @@ def test_query_multiple_variable_with_evidence(self):
assert np.allclose(
res["C"].values, np.array([0.14166667, 0.85833333]), atol=1e-20
)
+
+ def test_query_single_variable_with_virtual_evidence(self):
+ ve = [TabularCPD("A", 2, [[0.1], [0.9]])]
+ res = self.belief_propagation.query(["B"], virtual_evidence=ve)
+ assert np.allclose(
+ res["B"].values, np.array([0.06034483, 0.16034483, 0.77931034]), atol=1e-20
+ )
+
+ def test_query_multiple_variable_with_multiple_evidence_and_virtual_evidence(self):
+ ve = [
+ TabularCPD("A", 2, [[0.027], [0.972]]),
+ TabularCPD("B", 3, [[0.3], [0.6], [0.1]]),
+ ]
+ res = self.belief_propagation.query(
+ ["B", "C"], evidence={"D": 0}, virtual_evidence=ve
+ )
+ assert np.allclose(
+ res["B"].values, np.array([0.05938567, 0.3440273, 0.59658703]), atol=1e-20
+ )
+ assert np.allclose(
+ res["C"].values, np.array([0.25542662, 0.74457338]), atol=1e-20
+ )
+
+ def test_query_allows_multiple_virtual_evidence_per_variable(self):
+ ve1 = [
+ TabularCPD("A", 2, [[0.1], [0.9]]),
+ TabularCPD("A", 2, [[0.3], [0.7]]),
+ ]
+ res1 = self.belief_propagation.query(["B"], virtual_evidence=ve1)
+ cpd = TabularCPD("A", 2, [[0.1 * 0.3], [0.9 * 0.7]])
+ cpd.normalize()
+ res2 = self.belief_propagation.query(["B"], virtual_evidence=[cpd])
+ assert np.allclose(res1["B"].values, res2["B"].values, atol=1e-20)
+ assert np.allclose(
+ res2["B"].values, np.array([0.05461538, 0.15461538, 0.79076923]), atol=1e-20
+ )
+
+ def test_query_error_obs_var_has_evidence(self):
+ with self.assertRaises(
+ ValueError,
+ msg="Can't have the same variables in both `evidence` and `virtual_evidence`. Found in both: {'A'}",
+ ):
+ self.belief_propagation.query(
+ ["B"], evidence={"A": 1}, virtual_evidence={"A": [np.array([0.1, 0.9])]}
+ )
| [
{
"components": [
{
"doc": "Checks the virtual evidence's format is correct. Each evidence must:\n- Be a TabularCPD instance\n- Be targeted to a single variable\n- Be defined on a variable which is in the model\n- Have the same cardinality as its corresponding variable in the model\n\nParameters\n... | [
"pgmpy/tests/test_inference/test_ExactInference.py::TestBeliefPropagationWithMessageParsing::test_query_allows_multiple_virtual_evidence_per_variable",
"pgmpy/tests/test_inference/test_ExactInference.py::TestBeliefPropagationWithMessageParsing::test_query_error_obs_var_has_evidence",
"pgmpy/tests/test_inference... | [
"pgmpy/tests/test_inference/test_ExactInference.py::TestVariableElimination::test_elimination_order",
"pgmpy/tests/test_inference/test_ExactInference.py::TestVariableElimination::test_induced_graph",
"pgmpy/tests/test_inference/test_ExactInference.py::TestVariableElimination::test_induced_width",
"pgmpy/tests... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Feature/Factor Graph BP: Add virtual evidence
This PR adds capacity to handle virtual evidence on the factor graph BP.
I defined virtual evidence as a list of tabular CPD to be consistent with the current implementation
### Your checklist for this pull request
- [x] Make sure you are requesting to **pull a topic/feature/bugfix branch** (right side). Don't request your master!
- [x] Make sure you are making a pull request against the **dev branch** (left side). Also you should start *your branch* off *our dev*.
- [x] Check the commit's or even all commits' message styles matches our requested structure.
### List of changes to the codebase in this pull request
- Add ability to support virtual evidence Factor Graph BP.
- In the base Inference class, exported the virtual evidence checks from `_virtual_evidence()` into a new function `_check_virtual_evidence()`. Indeed in the Factor Graph BP, there's no need to add a new variable to the graph, instead each evidence becomes a virtual message that gets added to the variable's incoming messages list
- Exhaustive testing
- Improved documentation
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in pgmpy/inference/base.py]
(definition of Inference._check_virtual_evidence:)
def _check_virtual_evidence(self, virtual_evidence):
"""Checks the virtual evidence's format is correct. Each evidence must:
- Be a TabularCPD instance
- Be targeted to a single variable
- Be defined on a variable which is in the model
- Have the same cardinality as its corresponding variable in the model
Parameters
----------
virtual_evidence: list
A list of TabularCPD instances specifying the virtual evidence for each
of the evidence variables."""
(definition of Inference._get_virtual_evidence_var_list:)
def _get_virtual_evidence_var_list(virtual_evidence):
"""Returns the list of variables that have a virtual evidence.
Parameters
----------
virtual_evidence: list
A list of TabularCPD instances specifying the virtual evidence for each
of the evidence variables."""
[end of new definitions in pgmpy/inference/base.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | cf8d0f12e2e5be62b01ff8fded85f3f64eab1e84 | ||
tobymao__sqlglot-3072 | 3,072 | tobymao/sqlglot | null | 223a4751f88809710872fa7d757d22d9eeeb4f40 | 2024-03-03T11:04:04Z | diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index 85ed6705e6..1cf94a4854 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -670,6 +670,7 @@ class Tokenizer(tokens.Tokenizer):
"RENAME": TokenType.REPLACE,
"RM": TokenType.COMMAND,
"SAMPLE": TokenType.TABLE_SAMPLE,
+ "SEQUENCE": TokenType.SEQUENCE,
"SQL_DOUBLE": TokenType.DOUBLE,
"SQL_VARCHAR": TokenType.VARCHAR,
"STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 1a24875043..47bb8122b6 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -1214,6 +1214,8 @@ class Create(DDL):
"begin": False,
"end": False,
"clone": False,
+ "start": False,
+ "increment": False,
}
@property
@@ -3619,6 +3621,14 @@ def output_name(self) -> str:
return self.name
+class Increment(Expression):
+ arg_types = {"this": True}
+
+
+class Start(Expression):
+ arg_types = {"this": True}
+
+
class Parameter(Condition):
arg_types = {"this": True, "expression": False}
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index e6f5c4b085..c87b552eb2 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -958,7 +958,13 @@ def create_sql(self, expression: exp.Create) -> str:
clone = self.sql(expression, "clone")
clone = f" {clone}" if clone else ""
- expression_sql = f"CREATE{modifiers} {kind}{exists_sql} {this}{properties_sql}{expression_sql}{postexpression_props_sql}{index_sql}{no_schema_binding}{clone}"
+ start = self.sql(expression, "start")
+ start = f" {start}" if start else ""
+
+ increment = self.sql(expression, "increment")
+ increment = f" {increment}" if increment else ""
+
+ expression_sql = f"CREATE{modifiers} {kind}{exists_sql} {this}{properties_sql}{expression_sql}{postexpression_props_sql}{index_sql}{no_schema_binding}{clone}{start}{increment}"
return self.prepend_ctes(expression, expression_sql)
def clone_sql(self, expression: exp.Clone) -> str:
@@ -1541,6 +1547,14 @@ def historicaldata_sql(self, expression: exp.HistoricalData) -> str:
expr = self.sql(expression, "expression")
return f"{this} ({kind} => {expr})"
+ def start_sql(self, expression: exp.Start) -> str:
+ this = self.sql(expression, "this")
+ return f"START WITH {this}"
+
+ def increment_sql(self, expression: exp.Increment) -> str:
+ this = self.sql(expression, "this")
+ return f"INCREMENT BY {this}"
+
def table_parts(self, expression: exp.Table) -> str:
return ".".join(
self.sql(part)
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 49dac2ea62..8f96def172 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -291,6 +291,7 @@ class Parser(metaclass=_Parser):
TokenType.VIEW,
TokenType.MODEL,
TokenType.DICTIONARY,
+ TokenType.SEQUENCE,
TokenType.STORAGE_INTEGRATION,
}
@@ -1433,6 +1434,8 @@ def _parse_create(self) -> exp.Create | exp.Command:
begin = None
end = None
clone = None
+ seq_start = None
+ seq_increment = None
def extend_props(temp_props: t.Optional[exp.Properties]) -> None:
nonlocal properties
@@ -1510,6 +1513,12 @@ def extend_props(temp_props: t.Optional[exp.Properties]) -> None:
elif create_token.token_type == TokenType.VIEW:
if self._match_text_seq("WITH", "NO", "SCHEMA", "BINDING"):
no_schema_binding = True
+ elif create_token.token_type == TokenType.SEQUENCE:
+ if self._match_texts("START") or self._match(TokenType.START_WITH):
+ seq_start = self.expression(exp.Start, this=self._parse_number())
+ if self._match_texts("INCREMENT"):
+ self._match_texts("BY")
+ seq_increment = self.expression(exp.Increment, this=self._parse_number())
shallow = self._match_text_seq("SHALLOW")
@@ -1537,6 +1546,8 @@ def extend_props(temp_props: t.Optional[exp.Properties]) -> None:
begin=begin,
end=end,
clone=clone,
+ start=seq_start,
+ increment=seq_increment,
)
def _parse_property_before(self) -> t.Optional[exp.Expression]:
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py
index da9df7d26d..6d9e021d63 100644
--- a/sqlglot/tokens.py
+++ b/sqlglot/tokens.py
@@ -340,6 +340,7 @@ class TokenType(AutoName):
SELECT = auto()
SEMI = auto()
SEPARATOR = auto()
+ SEQUENCE = auto()
SERDE_PROPERTIES = auto()
SET = auto()
SETTINGS = auto()
| diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index 4cb0159e9f..b14eb38c61 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -1099,6 +1099,15 @@ def test_ddl(self):
write={"snowflake": "CREATE TABLE a (b INT)"},
)
+ self.validate_identity("CREATE SEQUENCE seq1 START WITH 1 INCREMENT BY 2")
+ self.assertIsInstance(
+ parse_one("CREATE SEQUENCE seq1 START 1 INCREMENT 2", read="snowflake"), exp.Create
+ )
+ self.assertIsInstance(
+ parse_one("CREATE SEQUENCE seq1 WITH START WITH 1 INCREMENT BY 2", read="snowflake"),
+ exp.Create,
+ )
+
def test_user_defined_functions(self):
self.validate_all(
"CREATE FUNCTION a(x DATE, y BIGINT) RETURNS ARRAY LANGUAGE JAVASCRIPT AS $$ SELECT 1 $$",
| [] | [
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl"
] | [
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten",
"tests/dialects/test_snowflake.py::TestSnowflake::test_historical_data",
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize",
"tests/dialects/test_snow... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat(snowflake): parse CREATE SEQUENCE
see https://docs.snowflake.com/en/sql-reference/sql/create-sequence
partially resolves #2954
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
support parsing sequence related statements
**Is your feature request related to a problem? Please describe.**
When using fakesnow for unit testing we discovered that `CREATE SEQUENCE seq1` is currently not supported by sqlglot. As is accessing sequences.
**Describe the solution you'd like**
It would be terrific it sqlglot could also transpile sequence creation, access and deletion.
**Describe alternatives you've considered**
We have currently mocked the parts of our code that relied on the sequences being present. Since the sequence is only created implicitly via SQLalchemy, we also considered changing the table definitions during testing.
**Additional context**
at least between duckdb and snowflake there are bigger differences in how to access a sequence vs creating one:
duckdb: `select nextval(seq1)`
snowflake: `select seq1.nextval`
----------
Hey, can you provide some more information on this? What's exactly the scope of this request? Docs would also be helpful.
I guess the motivation behind this was https://github.com/tekumara/fakesnow/issues/44, i.e. transpiling Snowflake to DuckDB.
The thing is that Snowflake's syntax can't be mapped 1-1 to DuckDB without additional context, because e.g. it's perfectly valid to have a table `t` with a column `nextval`, so converting `t.nextval` into `nextval('t')` would be incorrect in this case. The additional context needed is whether `t` is a sequence object or not.
Another way to make this work could be through having metadata in comments:
```sql
SELECT t.nextval /* sequence */ FROM t
```
Not sure this is the correct way to address this, though. Due to the above I'll go ahead and close this as not planned for the time being. Happy to continue the discussion though, either here or in Slack.
Will take a look at the DDL syntax and see what it'd entail to fix the transpilation there, at least.
Thanks for looking into this @georgesittas! I think it would be great if sqlglot would have have some docs about what things are not within the scope.
Sure thing! May update the FAQ section later about this, but what's within scope is kinda fluid, e.g. it changes depending on our priorities. The rule of thumb is that if something's too complicated and relatively niche, it's unlikely that we'll prioritize it.
--------------------
</issues> | ceb42fabad60312699e4b15936aeebac00e22e4d | |
pgmpy__pgmpy-1740 | 1,740 | pgmpy/pgmpy | null | e11c33d7a2288c8b94b502485e338833646e8214 | 2024-03-01T19:07:23Z | diff --git a/.gitignore b/.gitignore
index 540035c64..1b133ceaa 100644
--- a/.gitignore
+++ b/.gitignore
@@ -39,3 +39,6 @@ nosetests.xml
# Auto examples generated
docs/auto_examples/*
docs/examples/*
+
+# Macos files
+.DS_Store
diff --git a/pgmpy/inference/ExactInference.py b/pgmpy/inference/ExactInference.py
index 86baab5b3..7224bac99 100644
--- a/pgmpy/inference/ExactInference.py
+++ b/pgmpy/inference/ExactInference.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python3
import copy
import itertools
+from functools import reduce
import networkx as nx
import numpy as np
@@ -20,8 +21,8 @@
from pgmpy.models import (
BayesianNetwork,
DynamicBayesianNetwork,
+ FactorGraph,
JunctionTree,
- MarkovNetwork,
)
from pgmpy.utils import compat_fns
@@ -1235,3 +1236,229 @@ def map_query(
map_query_results[var] = value
return map_query_results
+
+
+class BeliefPropagationWithMessageParsing(Inference):
+ """
+ Class for performing efficient inference using Belief Propagation method on factor graphs.
+
+ The message-parsing algorithm recursively parses the factor graph to propagate the
+ model's beliefs to infer the posterior distribution of the queried variable. The recursion
+ stops when reaching an observed variable or a unobserved root/leaf variable.
+
+ Parameters
+ ----------
+ model: FactorGraph
+ model for which inference is to performed
+
+ References
+ ----------
+ Algorithm 2.1 in https://www.mbmlbook.com/LearningSkills_Testing_out_the_model.html
+ by J Winn (Microsoft Research).
+ """
+
+ def __init__(self, model: FactorGraph, check_model=True):
+ assert isinstance(
+ model, FactorGraph
+ ), "Model must be an instance of FactorGraph"
+ if check_model:
+ model.check_model()
+ self.model = model
+
+ def query(self, variables, evidence):
+ """
+ Returns the a dict of posterior distributions for each of the queried `variables`,
+ given the `evidence`.
+
+ Parameters
+ ----------
+ variables: list
+ list of variables for which you want to compute the posterior
+ evidence: dict
+ a dict key, value pair as {var: state_of_var_observed}
+
+ Examples
+ --------
+ >>> from pgmpy.factors.discrete import DiscreteFactor
+ >>> from pgmpy.models import FactorGraph
+ >>> from pgmpy.inference import BeliefPropagation
+ >>> factor_graph = FactorGraph()
+ >>> factor_graph.add_nodes_from(["A", "B", "C", "D"])
+ >>> phi1 = DiscreteFactor(["A"], [2], [0.4, 0.6])
+ >>> phi2 = DiscreteFactor(
+ ... ["B", "A"], [3, 2], [[0.2, 0.05], [0.3, 0.15], [0.5, 0.8]]
+ ... )
+ >>> phi3 = DiscreteFactor(["C", "B"], [2, 3], [[0.4, 0.5, 0.1], [0.6, 0.5, 0.9]])
+ >>> phi4 = DiscreteFactor(
+ ... ["D", "B"], [3, 3], [[0.1, 0.1, 0.2], [0.3, 0.2, 0.1], [0.6, 0.7, 0.7]]
+ ... )
+ >>> factor_graph.add_factors(phi1, phi2, phi3, phi4)
+ >>> factor_graph.add_edges_from(
+ ... [
+ ... (phi1, "A"),
+ ... ("A", phi2),
+ ... (phi2, "B"),
+ ... ("B", phi3),
+ ... (phi3, "C"),
+ ... ("B", phi4),
+ ... (phi4, "D"),
+ ... ]
+ ... )
+ >>> belief_propagation = BeliefPropagation(factor_graph)
+ >>> belief_propagation.query(variables=['B', 'C'],
+ ... evidence={'A': 1, 'D': 0})
+ """
+ common_vars = set(evidence if evidence is not None else []).intersection(
+ set(variables)
+ )
+ if common_vars:
+ raise ValueError(
+ f"Can't have the same variables in both `variables` and `evidence`. Found in both: {common_vars}"
+ )
+
+ agg_res = {}
+ for var in variables:
+ res = self.schedule_variable_node_messages(var, evidence, None)
+ agg_res[var] = DiscreteFactor([var], [len(res)], res)
+ return agg_res
+
+ def schedule_variable_node_messages(self, variable, evidence, from_factor):
+ """
+ Returns the message sent by the variable to the factor requesting it.
+ For that, the variable requests the messages coming from its neighbouring
+ factors, except the one making the request.
+
+ Parameters
+ ----------
+ variable: str
+ the variable node from which to compute the outgoing message
+ evidence: dict
+ a dict key, value pair as {var: state_of_var_observed}
+ from_factor: str
+ the factor requesting the message, as part of the recursion.
+ None for the first time this function is called.
+ """
+ if variable in evidence.keys():
+ # Is an observed variable
+ return self.model.get_point_mass_message(variable, evidence[variable])
+
+ incoming_factors = [
+ factor
+ for factor in list(self.model.neighbors(variable))
+ if factor != from_factor
+ ]
+
+ if len(incoming_factors) == 0:
+ # Is an unobserved leaf variable
+ return self.calc_variable_node_message(variable, [])
+ else:
+ # Else, get the incoming messages from all incoming factors
+ incoming_messages = []
+ for factor in incoming_factors:
+ incoming_messages.append(
+ self.schedule_factor_node_messages(
+ factor, evidence, from_variable=variable
+ )
+ )
+ return self.calc_variable_node_message(variable, incoming_messages)
+
+ def schedule_factor_node_messages(self, factor, evidence, from_variable):
+ """
+ Returns the message sent from the factor to the variable requesting it.
+ For that, the factor requests the messages coming from its neighbouring
+ variables, except the one making the request.
+
+ Parameters
+ ----------
+ factor: str
+ the factor from which we want to compute the outgoing message
+ evidence: dict
+ a dict key, value pair as {var: state_of_var_observed}
+ from_variable: str
+ the variable requesting the message, as part of the recursion.
+ """
+ assert from_variable is not None, "from_var must be specified"
+
+ incoming_vars = [var for var in factor.variables if var != from_variable]
+ if len(incoming_vars) == 0:
+ # from_var is a root variable. The factor is its prior
+ return self.calc_factor_node_message(factor, [], from_variable)
+ else:
+ # Else, get the incoming messages from all incoming variables
+ incoming_messages = []
+ for var in incoming_vars:
+ incoming_messages.append(
+ self.schedule_variable_node_messages(
+ var, evidence, from_factor=factor
+ )
+ )
+ return self.calc_factor_node_message(
+ factor, incoming_messages, from_variable
+ )
+
+ def calc_variable_node_message(self, variable, incoming_messages):
+ """
+ The outgoing message is the element wise product of all incoming messages
+
+ If there are no incoming messages, returns a uniform message
+ If there is only one incoming message, returns that message
+ Otherwise, returns the product of all incoming messages
+
+ Parameters
+ ----------
+ variable: str
+ the variable node from which to compute the outgoing message
+ incoming_messages: list
+ list of messages coming to this variable node
+ """
+ if len(incoming_messages) == 0:
+ return self.model.get_uniform_message(variable)
+ elif len(incoming_messages) == 1:
+ return incoming_messages[0]
+ else:
+ outgoing_message = reduce(np.multiply, incoming_messages)
+ return outgoing_message / np.sum(outgoing_message)
+
+ @staticmethod
+ def calc_factor_node_message(factor, incoming_messages, target_var):
+ """
+ Returns the outgoing message for a factor node, which is the
+ multiplication of the incoming messages with the factor function (CPT).
+
+ The variables' order in the incoming messages list must match the
+ variable's order in the CPT's dimensions
+
+ Parameters
+ ----------
+ factor: str
+ the factor node from which to compute the outgoing message
+ incoming_messages: list
+ list of messages coming to this factor node
+ target_var: str
+ the variable node to which the outgoing message is being sent to
+ """
+ cpt = factor.values
+
+ assert (
+ len(incoming_messages) == cpt.ndim - 1
+ ), f"Error computing factor node message for {target_var}. The number of incoming messages must equal the card(CPT) - 1"
+
+ if len(incoming_messages) == 0:
+ return cpt
+
+ # Ensure that the target var is on the CPT's 0th axis
+ target_var_idx = factor.variables.index(target_var)
+ if target_var_idx != 0:
+ # Move target var to the 0th axis to allow the reduction
+ cpt = np.moveaxis(cpt, target_var_idx, 0)
+
+ # Invert incoming_messages, so that the first message corresponds to the last
+ # dimension of the CPT
+ incoming_messages = list(reversed(incoming_messages))
+
+ # Reduce the CPT with the inverted list of incoming messages
+ outgoing_message = reduce(
+ lambda cpt_reduced, m: np.matmul(cpt_reduced, m), incoming_messages, cpt
+ )
+ # Normalise
+ return outgoing_message / sum(outgoing_message)
diff --git a/pgmpy/inference/__init__.py b/pgmpy/inference/__init__.py
index e1bdf9b35..25b40aa2a 100644
--- a/pgmpy/inference/__init__.py
+++ b/pgmpy/inference/__init__.py
@@ -2,6 +2,7 @@
from .CausalInference import CausalInference
from .ExactInference import BeliefPropagation
from .ExactInference import VariableElimination
+from .ExactInference import BeliefPropagationWithMessageParsing
from .ApproxInference import ApproxInference
from .dbn_inference import DBNInference
from .mplp import Mplp
@@ -11,6 +12,7 @@
"VariableElimination",
"DBNInference",
"BeliefPropagation",
+ "BeliefPropagationWithMessageParsing",
"BayesianModelSampling",
"CausalInference",
"ApproxInference",
diff --git a/pgmpy/models/FactorGraph.py b/pgmpy/models/FactorGraph.py
index 92ae94bf3..d6f63a9cf 100644
--- a/pgmpy/models/FactorGraph.py
+++ b/pgmpy/models/FactorGraph.py
@@ -6,10 +6,10 @@
import numpy as np
from networkx.algorithms import bipartite
-from pgmpy.models.MarkovNetwork import MarkovNetwork
from pgmpy.base import UndirectedGraph
-from pgmpy.factors.discrete import DiscreteFactor
from pgmpy.factors import factor_product
+from pgmpy.factors.discrete import DiscreteFactor
+from pgmpy.models.MarkovNetwork import MarkovNetwork
class FactorGraph(UndirectedGraph):
@@ -470,3 +470,55 @@ def copy(self):
copy.add_factors(*factors_copy)
return copy
+
+ def get_point_mass_message(self, variable, observation):
+ """
+ Returns a point mass message for the variable given the observed state.
+
+ Parameters
+ ----------
+ variable: str
+ The variable for which the message needs to be computed.
+ observation: int
+ The observed state of the variable.
+
+ Examples
+ --------
+ >>> from pgmpy.models import FactorGraph
+ >>> from pgmpy.factors.discrete import DiscreteFactor
+ >>> G = FactorGraph()
+ >>> G.add_node("a")
+ >>> phi = DiscreteFactor(["a"], [4], np.random.rand(4))
+ >>> G.add_factors(phi)
+ >>> G.add_edges_from([("a", phi)])
+ >>> G.get_point_mass_message("a", 1)
+ array([0, 1, 0, 0])
+ """
+ card = self.get_cardinality(variable)
+ # Create an array with 1 at the index of the evidence and 0 elsewhere
+ message = np.zeros(card)
+ message[observation] = 1
+ return message
+
+ def get_uniform_message(self, variable):
+ """
+ Returns a uniform message for the given variable
+
+ Parameters
+ ----------
+ variable: str
+ The variable for which the message needs to be computed.
+
+ Examples
+ --------
+ >>> from pgmpy.models import FactorGraph
+ >>> G = FactorGraph()
+ >>> G.add_node("a")
+ >>> phi = DiscreteFactor(["a"], [4], np.random.rand(4))
+ >>> G.add_factors(phi)
+ >>> G.add_edges_from([("a", phi)])
+ >>> G.get_get_uniform_message("a")
+ array([0.25, 0.25, 0.25, 0.25])
+ """
+ card = self.get_cardinality(variable)
+ return np.ones(card) / card
| diff --git a/pgmpy/tests/test_inference/test_ExactInference.py b/pgmpy/tests/test_inference/test_ExactInference.py
index 2be65a318..52e1d5f35 100644
--- a/pgmpy/tests/test_inference/test_ExactInference.py
+++ b/pgmpy/tests/test_inference/test_ExactInference.py
@@ -6,7 +6,8 @@
from pgmpy.factors.discrete import DiscreteFactor, TabularCPD
from pgmpy.inference import BeliefPropagation, VariableElimination
-from pgmpy.models import BayesianNetwork, JunctionTree, MarkovNetwork
+from pgmpy.inference.ExactInference import BeliefPropagationWithMessageParsing
+from pgmpy.models import BayesianNetwork, FactorGraph, JunctionTree, MarkovNetwork
class TestVariableElimination(unittest.TestCase):
@@ -1110,3 +1111,63 @@ def test_issue_1048(self):
def tearDown(self):
del self.junction_tree
del self.bayesian_model
+
+
+class TestBeliefPropagationWithMessageParsing(unittest.TestCase):
+ def setUp(self):
+ self.factor_graph = FactorGraph()
+ self.factor_graph.add_nodes_from(["A", "B", "C", "D"])
+
+ phi1 = DiscreteFactor(["A"], [2], [0.4, 0.6])
+ phi2 = DiscreteFactor(
+ ["B", "A"], [3, 2], [[0.2, 0.05], [0.3, 0.15], [0.5, 0.8]]
+ )
+ phi3 = DiscreteFactor(["C", "B"], [2, 3], [[0.4, 0.5, 0.1], [0.6, 0.5, 0.9]])
+ phi4 = DiscreteFactor(
+ ["D", "B"], [3, 3], [[0.1, 0.1, 0.2], [0.3, 0.2, 0.1], [0.6, 0.7, 0.7]]
+ )
+
+ self.factor_graph.add_factors(phi1, phi2, phi3, phi4)
+
+ self.factor_graph.add_edges_from(
+ [
+ (phi1, "A"),
+ ("A", phi2),
+ (phi2, "B"),
+ ("B", phi3),
+ (phi3, "C"),
+ ("B", phi4),
+ (phi4, "D"),
+ ]
+ )
+
+ self.belief_propagation = BeliefPropagationWithMessageParsing(self.factor_graph)
+
+ def test_query_single_variable(self):
+ res = self.belief_propagation.query(["C"], {})
+ assert np.allclose(res["C"].values, np.array([0.217, 0.783]), atol=1e-20)
+
+ def test_query_multiple_variable(self):
+ res = self.belief_propagation.query(["A", "B", "C", "D"], {})
+ assert np.allclose(res["A"].values, np.array([0.4, 0.6]), atol=1e-20)
+ assert np.allclose(res["B"].values, np.array([0.11, 0.21, 0.68]), atol=1e-20)
+ assert np.allclose(res["C"].values, np.array([0.217, 0.783]), atol=1e-20)
+ assert np.allclose(res["D"].values, np.array([0.168, 0.143, 0.689]), atol=1e-20)
+
+ def test_query_single_variable_with_evidence(self):
+ res = self.belief_propagation.query(["B", "C"], {"A": 1, "D": 0})
+ assert np.allclose(
+ res["B"].values, np.array([0.02777778, 0.08333333, 0.88888889]), atol=1e-20
+ )
+ assert np.allclose(
+ res["C"].values, np.array([0.14166667, 0.85833333]), atol=1e-20
+ )
+
+ def test_query_multiple_variable_with_evidence(self):
+ res = self.belief_propagation.query(["B", "C"], {"A": 1, "D": 0})
+ assert np.allclose(
+ res["B"].values, np.array([0.02777778, 0.08333333, 0.88888889]), atol=1e-20
+ )
+ assert np.allclose(
+ res["C"].values, np.array([0.14166667, 0.85833333]), atol=1e-20
+ )
diff --git a/pgmpy/tests/test_models/test_FactorGraph.py b/pgmpy/tests/test_models/test_FactorGraph.py
index 9e945f064..e2f491850 100644
--- a/pgmpy/tests/test_models/test_FactorGraph.py
+++ b/pgmpy/tests/test_models/test_FactorGraph.py
@@ -1,10 +1,9 @@
-import numpy as np
import unittest
+import numpy as np
+
from pgmpy.factors.discrete import DiscreteFactor
-from pgmpy.models import FactorGraph
-from pgmpy.models import MarkovNetwork
-from pgmpy.models import JunctionTree
+from pgmpy.models import FactorGraph, JunctionTree, MarkovNetwork
from pgmpy.tests import help_functions as hf
@@ -106,6 +105,22 @@ def test_get_partition_function(self):
def tearDown(self):
del self.graph
+ def test_get_point_mass_message(self):
+ self.graph.add_node("a")
+ phi = DiscreteFactor(["a"], [3], np.random.rand(3))
+ self.graph.add_factors(phi)
+ self.graph.add_edge("a", phi)
+ message = self.graph.get_point_mass_message("a", 0)
+ assert (message == np.array([1, 0, 0])).all()
+
+ def test_get_uniform_message(self):
+ self.graph.add_node("a")
+ phi = DiscreteFactor(["a"], [4], np.random.rand(4))
+ self.graph.add_factors(phi)
+ self.graph.add_edge("a", phi)
+ message = self.graph.get_uniform_message("a")
+ assert (message == np.array([0.25, 0.25, 0.25, 0.25])).all()
+
class TestFactorGraphMethods(unittest.TestCase):
def setUp(self):
| diff --git a/.gitignore b/.gitignore
index 540035c64..1b133ceaa 100644
--- a/.gitignore
+++ b/.gitignore
@@ -39,3 +39,6 @@ nosetests.xml
# Auto examples generated
docs/auto_examples/*
docs/examples/*
+
+# Macos files
+.DS_Store
| [
{
"components": [
{
"doc": "Class for performing efficient inference using Belief Propagation method on factor graphs.\n\nThe message-parsing algorithm recursively parses the factor graph to propagate the\nmodel's beliefs to infer the posterior distribution of the queried variable. The recursion\n... | [
"pgmpy/tests/test_inference/test_ExactInference.py::TestVariableElimination::test_elimination_order",
"pgmpy/tests/test_inference/test_ExactInference.py::TestVariableElimination::test_induced_graph",
"pgmpy/tests/test_inference/test_ExactInference.py::TestVariableElimination::test_induced_width",
"pgmpy/tests... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Feature/Faster Belief Propagation with message parsing & recursion
### Summary
The currently implemented BP is great but I faced limitations when increasing either the number of nodes (>18) or when increasing the number of variables being queried at once (up to 6). My model's variables have cardinalities ranging in 40-120. Under some settings, the inference was either crashing or taking >20s using a Bayes net model, and even longer when using a factor graph.
I suggest this BP algorithm which uses message-parsing and recursion. The algorithm recursively parses the graph to compute and propagate the messages towards the queried variable. Currently, it doesn't allow to return the joint density
On my model, this BP is more than 10x faster. Where using the existing BP on a Bayes net took >20s, my implementation took <1s.
Let me know what you think
### Your checklist for this pull request
- [x] Make sure you are requesting to **pull a topic/feature/bugfix branch** (right side). Don't request your master!
- [x] Make sure you are making a pull request against the **dev branch** (left side). Also you should start *your branch* off *our dev*.
- [x] Check the commit's or even all commits' message styles matches our requested structure.
### Issue number(s) that this pull request fixes
- Fixes #1739
### List of changes to the codebase in this pull request
- Adds a BeliefPropagationWithMessageParsing class
- Extends the FactorGraph class with functions used for the message parsing algorithm
- Add exhaustive testing
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in pgmpy/inference/ExactInference.py]
(definition of BeliefPropagationWithMessageParsing:)
class BeliefPropagationWithMessageParsing(Inference):
"""Class for performing efficient inference using Belief Propagation method on factor graphs.
The message-parsing algorithm recursively parses the factor graph to propagate the
model's beliefs to infer the posterior distribution of the queried variable. The recursion
stops when reaching an observed variable or a unobserved root/leaf variable.
Parameters
----------
model: FactorGraph
model for which inference is to performed
References
----------
Algorithm 2.1 in https://www.mbmlbook.com/LearningSkills_Testing_out_the_model.html
by J Winn (Microsoft Research)."""
(definition of BeliefPropagationWithMessageParsing.__init__:)
def __init__(self, model: FactorGraph, check_model=True):
(definition of BeliefPropagationWithMessageParsing.query:)
def query(self, variables, evidence):
"""Returns the a dict of posterior distributions for each of the queried `variables`,
given the `evidence`.
Parameters
----------
variables: list
list of variables for which you want to compute the posterior
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> from pgmpy.models import FactorGraph
>>> from pgmpy.inference import BeliefPropagation
>>> factor_graph = FactorGraph()
>>> factor_graph.add_nodes_from(["A", "B", "C", "D"])
>>> phi1 = DiscreteFactor(["A"], [2], [0.4, 0.6])
>>> phi2 = DiscreteFactor(
... ["B", "A"], [3, 2], [[0.2, 0.05], [0.3, 0.15], [0.5, 0.8]]
... )
>>> phi3 = DiscreteFactor(["C", "B"], [2, 3], [[0.4, 0.5, 0.1], [0.6, 0.5, 0.9]])
>>> phi4 = DiscreteFactor(
... ["D", "B"], [3, 3], [[0.1, 0.1, 0.2], [0.3, 0.2, 0.1], [0.6, 0.7, 0.7]]
... )
>>> factor_graph.add_factors(phi1, phi2, phi3, phi4)
>>> factor_graph.add_edges_from(
... [
... (phi1, "A"),
... ("A", phi2),
... (phi2, "B"),
... ("B", phi3),
... (phi3, "C"),
... ("B", phi4),
... (phi4, "D"),
... ]
... )
>>> belief_propagation = BeliefPropagation(factor_graph)
>>> belief_propagation.query(variables=['B', 'C'],
... evidence={'A': 1, 'D': 0})"""
(definition of BeliefPropagationWithMessageParsing.schedule_variable_node_messages:)
def schedule_variable_node_messages(self, variable, evidence, from_factor):
"""Returns the message sent by the variable to the factor requesting it.
For that, the variable requests the messages coming from its neighbouring
factors, except the one making the request.
Parameters
----------
variable: str
the variable node from which to compute the outgoing message
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
from_factor: str
the factor requesting the message, as part of the recursion.
None for the first time this function is called."""
(definition of BeliefPropagationWithMessageParsing.schedule_factor_node_messages:)
def schedule_factor_node_messages(self, factor, evidence, from_variable):
"""Returns the message sent from the factor to the variable requesting it.
For that, the factor requests the messages coming from its neighbouring
variables, except the one making the request.
Parameters
----------
factor: str
the factor from which we want to compute the outgoing message
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
from_variable: str
the variable requesting the message, as part of the recursion."""
(definition of BeliefPropagationWithMessageParsing.calc_variable_node_message:)
def calc_variable_node_message(self, variable, incoming_messages):
"""The outgoing message is the element wise product of all incoming messages
If there are no incoming messages, returns a uniform message
If there is only one incoming message, returns that message
Otherwise, returns the product of all incoming messages
Parameters
----------
variable: str
the variable node from which to compute the outgoing message
incoming_messages: list
list of messages coming to this variable node"""
(definition of BeliefPropagationWithMessageParsing.calc_factor_node_message:)
def calc_factor_node_message(factor, incoming_messages, target_var):
"""Returns the outgoing message for a factor node, which is the
multiplication of the incoming messages with the factor function (CPT).
The variables' order in the incoming messages list must match the
variable's order in the CPT's dimensions
Parameters
----------
factor: str
the factor node from which to compute the outgoing message
incoming_messages: list
list of messages coming to this factor node
target_var: str
the variable node to which the outgoing message is being sent to"""
[end of new definitions in pgmpy/inference/ExactInference.py]
[start of new definitions in pgmpy/models/FactorGraph.py]
(definition of FactorGraph.get_point_mass_message:)
def get_point_mass_message(self, variable, observation):
"""Returns a point mass message for the variable given the observed state.
Parameters
----------
variable: str
The variable for which the message needs to be computed.
observation: int
The observed state of the variable.
Examples
--------
>>> from pgmpy.models import FactorGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> G = FactorGraph()
>>> G.add_node("a")
>>> phi = DiscreteFactor(["a"], [4], np.random.rand(4))
>>> G.add_factors(phi)
>>> G.add_edges_from([("a", phi)])
>>> G.get_point_mass_message("a", 1)
array([0, 1, 0, 0])"""
(definition of FactorGraph.get_uniform_message:)
def get_uniform_message(self, variable):
"""Returns a uniform message for the given variable
Parameters
----------
variable: str
The variable for which the message needs to be computed.
Examples
--------
>>> from pgmpy.models import FactorGraph
>>> G = FactorGraph()
>>> G.add_node("a")
>>> phi = DiscreteFactor(["a"], [4], np.random.rand(4))
>>> G.add_factors(phi)
>>> G.add_edges_from([("a", phi)])
>>> G.get_get_uniform_message("a")
array([0.25, 0.25, 0.25, 0.25])"""
[end of new definitions in pgmpy/models/FactorGraph.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Feature: belief propagation with message parsing on factor graphs
Hi, I'm currently extending this library with belief propagation on factor graphs, using message parsing. This complements the currently implemented belief propagation which uses variable elimination.
So far it's been 10x faster for my factor graph with a dozen nodes.
I'll push a PR when done. It might help with issues: #383, #1571 , #1020
----------
--------------------
</issues> | cf8d0f12e2e5be62b01ff8fded85f3f64eab1e84 |
huggingface__huggingface_hub-2079 | 2,079 | huggingface/huggingface_hub | null | e699a4e1a1e1fff71afb9b92097067f04ba283c3 | 2024-03-01T17:19:09Z | diff --git a/docs/source/en/guides/integrations.md b/docs/source/en/guides/integrations.md
index 045b8c4327..e64bd0b8a9 100644
--- a/docs/source/en/guides/integrations.md
+++ b/docs/source/en/guides/integrations.md
@@ -148,7 +148,7 @@ are ready to go. You don't need to worry about stuff like repo creation, commits
of this is handled by the mixin and is available to your users. The Mixin also ensures that public methods are well
documented and type annotated.
-As a bonus, [`ModelHubMixin`] handles the model configuration for you. In some cases, you have a `config` input parameter when initializing your class (dictionary or dataclass containing high-level settings). In such cases, the `config` value is automatically serialized into a `config.json` dictionary for you. When re-loading the model from the Hub, the configuration is correctly deserialized. Make sure to use type annotation if you want to deserialize it as a dataclass. The big advantage of having a `config.json` file in your model repository is that it automatically enables the analytics on the Hub (e.g. the "downloads" count).
+As a bonus, [`ModelHubMixin`] handles the model configuration for you. If your `__init__` method expects a `config` input, it will be automatically saved in the repo when calling `save_pretrained` and reloaded correctly by `load_pretrained`. Moreover, if the `config` input parameter is annotated with dataclass type (e.g. `config: Optional[MyConfigClass] = None`), then the `config` value will be correctly deserialized for you. Finally, all jsonable values passed at initialization will be also stored in the config file. This means you don't necessarily have to expect a `config` input to benefit from it. The big advantage of having a `config.json` file in your model repository is that it automatically enables the analytics on the Hub (e.g. the "downloads" count).
### A concrete example: PyTorch
@@ -159,29 +159,27 @@ A good example of what we saw above is [`PyTorchModelHubMixin`], our integration
Here is how any user can load/save a PyTorch model from/to the Hub:
```python
->>> from dataclasses import dataclass
>>> import torch
>>> import torch.nn as nn
>>> from huggingface_hub import PyTorchModelHubMixin
-# 0. (optional) define a config class
->>> @dataclass
-... class Config:
-... hidden_size: int = 512
-... vocab_size: int = 30000
-... output_size: int = 4
-# 1. Define your Pytorch model exactly the same way you are used to
+# Define your Pytorch model exactly the same way you are used to
>>> class MyModel(nn.Module, PyTorchModelHubMixin): # multiple inheritance
-... def __init__(self, config: Config):
+... def __init__(self, hidden_size: int = 512, vocab_size: int = 30000, output_size: int = 4):
... super().__init__()
-... self.param = nn.Parameter(torch.rand(config.hidden_size, config.vocab_size))
-... self.linear = nn.Linear(config.output_size, config.vocab_size)
+... self.param = nn.Parameter(torch.rand(hidden_size, vocab_size))
+... self.linear = nn.Linear(output_size, vocab_size)
... def forward(self, x):
... return self.linear(x + self.param)
->>> model = MyModel(Config(hidden_size=128))
+# 1. Create model
+>>> model = MyModel(hidden_size=128)
+
+# Config is automatically created based on input + default values
+>>> model.config
+{"hidden_size": 128, "vocab_size": 30000, "output_size": 4}
# 2. (optional) Save model to local directory
>>> model.save_pretrained("path/to/my-awesome-model")
@@ -189,10 +187,10 @@ Here is how any user can load/save a PyTorch model from/to the Hub:
# 3. Push model weights to the Hub
>>> model.push_to_hub("my-awesome-model")
-# 4. Initialize model from the Hub
+# 4. Initialize model from the Hub => config has been preserved
>>> model = MyModel.from_pretrained("username/my-awesome-model")
>>> model.config
-Config(hidden_size=128, vocab_size=30000, output_size=4)
+{"hidden_size": 128, "vocab_size": 30000, "output_size": 4}
```
#### Implementation
@@ -211,25 +209,15 @@ class PyTorchModelHubMixin(ModelHubMixin):
2. Implement the `_save_pretrained` method:
```py
-from huggingface_hub import ModelCard, ModelCardData
+from huggingface_hub import ModelHubMixin
class PyTorchModelHubMixin(ModelHubMixin):
(...)
- def _save_pretrained(self, save_directory: Path):
- """Generate Model Card and save weights from a Pytorch model to a local directory."""
- model_card = ModelCard.from_template(
- card_data=ModelCardData(
- license='mit',
- library_name="pytorch",
- ...
- ),
- model_summary=...,
- model_type=...,
- ...
- )
- (save_directory / "README.md").write_text(str(model))
- torch.save(obj=self.module.state_dict(), f=save_directory / "pytorch_model.bin")
+ def _save_pretrained(self, save_directory: Path) -> None:
+ """Save weights from a Pytorch model to a local directory."""
+ save_model_as_safetensor(self.module, str(save_directory / SAFETENSORS_SINGLE_FILE))
+
```
3. Implement the `_from_pretrained` method:
@@ -255,13 +243,15 @@ class PyTorchModelHubMixin(ModelHubMixin):
**model_kwargs,
):
"""Load Pytorch pretrained weights and return the loaded model."""
- if os.path.isdir(model_id): # Can either be a local directory
- print("Loading weights from local directory")
- model_file = os.path.join(model_id, "pytorch_model.bin")
- else: # Or a model on the Hub
- model_file = hf_hub_download( # Download from the hub, passing same input args
+ model = cls(**model_kwargs)
+ if os.path.isdir(model_id):
+ print("Loading weights from local directory")
+ model_file = os.path.join(model_id, SAFETENSORS_SINGLE_FILE)
+ return cls._load_as_safetensor(model, model_file, map_location, strict)
+
+ model_file = hf_hub_download(
repo_id=model_id,
- filename="pytorch_model.bin",
+ filename=SAFETENSORS_SINGLE_FILE,
revision=revision,
cache_dir=cache_dir,
force_download=force_download,
@@ -269,14 +259,8 @@ class PyTorchModelHubMixin(ModelHubMixin):
resume_download=resume_download,
token=token,
local_files_only=local_files_only,
- )
-
- # Load model and return - custom logic depending on your framework
- model = cls(**model_kwargs)
- state_dict = torch.load(model_file, map_location=torch.device(map_location))
- model.load_state_dict(state_dict, strict=strict)
- model.eval()
- return model
+ )
+ return cls._load_as_safetensor(model, model_file, map_location, strict)
```
And that's it! Your library now enables users to upload and download files to and from the Hub.
diff --git a/src/huggingface_hub/hub_mixin.py b/src/huggingface_hub/hub_mixin.py
index 124c689a44..4ce164c8c8 100644
--- a/src/huggingface_hub/hub_mixin.py
+++ b/src/huggingface_hub/hub_mixin.py
@@ -3,7 +3,7 @@
import os
from dataclasses import asdict, is_dataclass
from pathlib import Path
-from typing import TYPE_CHECKING, Dict, List, Optional, Type, TypeVar, Union, get_args
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, TypeVar, Union, get_args
from .constants import CONFIG_NAME, PYTORCH_WEIGHTS_NAME, SAFETENSORS_SINGLE_FILE
from .file_download import hf_hub_download
@@ -12,6 +12,7 @@
EntryNotFoundError,
HfHubHTTPError,
SoftTemporaryDirectory,
+ is_jsonable,
is_safetensors_available,
is_torch_available,
logging,
@@ -48,18 +49,11 @@ class ModelHubMixin:
Example:
```python
- >>> from dataclasses import dataclass
>>> from huggingface_hub import ModelHubMixin
- # Define your model configuration (optional)
- >>> @dataclass
- ... class Config:
- ... foo: int = 512
- ... bar: str = "cpu"
-
# Inherit from ModelHubMixin (and optionally from your framework's model class)
>>> class MyCustomModel(ModelHubMixin):
- ... def __init__(self, config: Config):
+ ... def __init__(self, size: int = 512, device: str = "cpu"):
... # define how to initialize your model
... super().__init__()
... ...
@@ -85,7 +79,7 @@ class ModelHubMixin:
... # define how to deserialize your model
... ...
- >>> model = MyCustomModel(config=Config(foo=256, bar="gpu"))
+ >>> model = MyCustomModel(size=256, device="gpu")
# Save model weights to local directory
>>> model.save_pretrained("my-awesome-model")
@@ -96,27 +90,75 @@ class ModelHubMixin:
# Download and initialize weights from the Hub
>>> reloaded_model = MyCustomModel.from_pretrained("username/my-awesome-model")
>>> reloaded_model.config
- Config(foo=256, bar="gpu")
+ {"size": 256, "device": "gpu"}
```
"""
config: Optional[Union[dict, "DataclassInstance"]] = None
# ^ optional config attribute automatically set in `from_pretrained` (if not already set by the subclass)
+ _init_parameters: Dict[str, inspect.Parameter]
+ _jsonable_default_values: Dict[str, Any]
+
+ def __init_subclass__(cls) -> None:
+ """Inspect __init__ signature only once when subclassing."""
+ super().__init_subclass__()
+ cls._init_parameters = dict(inspect.signature(cls.__init__).parameters)
+ cls._jsonable_default_values = {
+ param.name: param.default
+ for param in cls._init_parameters.values()
+ if param.default is not inspect.Parameter.empty and is_jsonable(param.default)
+ }
+
def __new__(cls, *args, **kwargs) -> "ModelHubMixin":
+ """Create a new instance of the class and handle config.
+
+ 3 cases:
+ - If `self.config` is already set, do nothing.
+ - If `config` is passed as a dataclass, set it as `self.config`.
+ - Otherwise, build `self.config` from default values and passed values.
+ """
instance = super().__new__(cls)
- # Set `config` attribute if not already set by the subclass
- if instance.config is None:
- if "config" in kwargs:
- instance.config = kwargs["config"]
- elif len(args) > 0:
- sig = inspect.signature(cls.__init__)
- parameters = list(sig.parameters)[1:] # remove `self`
- for key, value in zip(parameters, args):
- if key == "config":
- instance.config = value
- break
+ # If `config` is already set, return early
+ if instance.config is not None:
+ return instance
+
+ # Infer passed values
+ passed_values = {
+ **{
+ key: value
+ for key, value in zip(
+ # Skip `self` and `config` parameters
+ list(cls._init_parameters)[1:],
+ args,
+ )
+ },
+ **kwargs,
+ }
+
+ # If config passed as dataclass => set it and return early
+ if is_dataclass(passed_values.get("config")):
+ instance.config = passed_values["config"]
+ return instance
+
+ # Otherwise, build config from default + passed values
+ init_config = {
+ # default values
+ **cls._jsonable_default_values,
+ # passed values
+ **{key: value for key, value in passed_values.items() if is_jsonable(value)},
+ }
+ init_config.pop("config", {})
+
+ # Populate `init_config` with provided config
+ provided_config = passed_values.get("config")
+ if isinstance(provided_config, dict):
+ init_config.update(provided_config)
+
+ # Set `config` attribute and return
+ if init_config != {}:
+ instance.config = init_config
return instance
def save_pretrained(
@@ -246,32 +288,37 @@ def from_pretrained(
except HfHubHTTPError as e:
logger.info(f"{CONFIG_NAME} not found on the HuggingFace Hub: {str(e)}")
+ # Read config
config = None
if config_file is not None:
- # Read config
with open(config_file, "r", encoding="utf-8") as f:
config = json.load(f)
- # Check if class expect a `config` argument
- init_parameters = inspect.signature(cls.__init__).parameters
- if "config" in init_parameters:
+ # Populate model_kwargs from config
+ for param in cls._init_parameters.values():
+ if param.name not in model_kwargs and param.name in config:
+ model_kwargs[param.name] = config[param.name]
+
+ # Check if `config` argument was passed at init
+ if "config" in cls._init_parameters:
# Check if `config` argument is a dataclass
- config_annotation = init_parameters["config"].annotation
+ config_annotation = cls._init_parameters["config"].annotation
if config_annotation is inspect.Parameter.empty:
pass # no annotation
elif is_dataclass(config_annotation):
- config = config_annotation(**config) # expect a dataclass
+ config = _load_dataclass(config_annotation, config)
else:
# if Optional/Union annotation => check if a dataclass is in the Union
for _sub_annotation in get_args(config_annotation):
if is_dataclass(_sub_annotation):
- config = _sub_annotation(**config)
+ config = _load_dataclass(_sub_annotation, config)
break
# Forward config to model initialization
model_kwargs["config"] = config
- elif any(param.kind == inspect.Parameter.VAR_KEYWORD for param in init_parameters.values()):
- # If __init__ accepts **kwargs, let's forward the config as well (as a dict)
+
+ elif any(param.kind == inspect.Parameter.VAR_KEYWORD for param in cls._init_parameters.values()):
+ # 2. If __init__ accepts **kwargs, let's forward the config as well (as a dict)
model_kwargs["config"] = config
instance = cls._from_pretrained(
@@ -288,7 +335,7 @@ def from_pretrained(
# Implicitly set the config as instance attribute if not already set by the class
# This way `config` will be available when calling `save_pretrained` or `push_to_hub`.
- if config is not None and instance.config is None:
+ if config is not None and (instance.config is None or instance.config == {}):
instance.config = config
return instance
@@ -428,26 +475,19 @@ class PyTorchModelHubMixin(ModelHubMixin):
Example:
```python
- >>> from dataclasses import dataclass
>>> import torch
>>> import torch.nn as nn
>>> from huggingface_hub import PyTorchModelHubMixin
- >>> @dataclass
- ... class Config:
- ... hidden_size: int = 512
- ... vocab_size: int = 30000
- ... output_size: int = 4
-
>>> class MyModel(nn.Module, PyTorchModelHubMixin):
- ... def __init__(self, config: Config):
+ ... def __init__(self, hidden_size: int = 512, vocab_size: int = 30000, output_size: int = 4):
... super().__init__()
- ... self.param = nn.Parameter(torch.rand(config.hidden_size, config.vocab_size))
- ... self.linear = nn.Linear(config.output_size, config.vocab_size)
+ ... self.param = nn.Parameter(torch.rand(hidden_size, vocab_size))
+ ... self.linear = nn.Linear(output_size, vocab_size)
... def forward(self, x):
... return self.linear(x + self.param)
- >>> model = MyModel()
+ >>> model = MyModel(hidden_size=256)
# Save model weights to local directory
>>> model.save_pretrained("my-awesome-model")
@@ -457,6 +497,8 @@ class PyTorchModelHubMixin(ModelHubMixin):
# Download and initialize weights from the Hub
>>> model = MyModel.from_pretrained("username/my-awesome-model")
+ >>> model.hidden_size
+ 256
```
"""
@@ -536,3 +578,11 @@ def _load_as_safetensor(cls, model: T, model_file: str, map_location: str, stric
)
model.to(map_location) # type: ignore [attr-defined]
return model
+
+
+def _load_dataclass(datacls: Type["DataclassInstance"], data: dict) -> "DataclassInstance":
+ """Load a dataclass instance from a dictionary.
+
+ Fields not expected by the dataclass are ignored.
+ """
+ return datacls(**{k: v for k, v in data.items() if k in datacls.__dataclass_fields__})
diff --git a/src/huggingface_hub/utils/__init__.py b/src/huggingface_hub/utils/__init__.py
index f56cdfd4ae..14473e22a0 100644
--- a/src/huggingface_hub/utils/__init__.py
+++ b/src/huggingface_hub/utils/__init__.py
@@ -97,6 +97,7 @@
from ._subprocess import capture_output, run_interactive_subprocess, run_subprocess
from ._telemetry import send_telemetry
from ._token import get_token
+from ._typing import is_jsonable
from ._validators import (
HFValidationError,
smoothly_deprecate_use_auth_token,
diff --git a/src/huggingface_hub/utils/_typing.py b/src/huggingface_hub/utils/_typing.py
index b45d88dd42..ae502b825b 100644
--- a/src/huggingface_hub/utils/_typing.py
+++ b/src/huggingface_hub/utils/_typing.py
@@ -14,10 +14,37 @@
# limitations under the License.
"""Handle typing imports based on system compatibility."""
-from typing import Callable, Literal, TypeVar
+from typing import Any, Callable, Literal, TypeVar
HTTP_METHOD_T = Literal["GET", "OPTIONS", "HEAD", "POST", "PUT", "PATCH", "DELETE"]
# type hint meaning "function signature not changed by decorator"
CallableT = TypeVar("CallableT", bound=Callable)
+
+_JSON_SERIALIZABLE_TYPES = (int, float, str, bool, type(None))
+
+
+def is_jsonable(obj: Any) -> bool:
+ """Check if an object is JSON serializable.
+
+ This is a weak check, as it does not check for the actual JSON serialization, but only for the types of the object.
+ It works correctly for basic use cases but do not guarantee an exhaustive check.
+
+ Object is considered to be recursively json serializable if:
+ - it is an instance of int, float, str, bool, or NoneType
+ - it is a list or tuple and all its items are json serializable
+ - it is a dict and all its keys are strings and all its values are json serializable
+ """
+ try:
+ if isinstance(obj, _JSON_SERIALIZABLE_TYPES):
+ return True
+ if isinstance(obj, (list, tuple)):
+ return all(is_jsonable(item) for item in obj)
+ if isinstance(obj, dict):
+ return all(isinstance(key, str) and is_jsonable(value) for key, value in obj.items())
+ if hasattr(obj, "__json__"):
+ return True
+ return False
+ except RecursionError:
+ return False
| diff --git a/tests/test_hub_mixin_pytorch.py b/tests/test_hub_mixin_pytorch.py
index bff22bae36..9fe623bc30 100644
--- a/tests/test_hub_mixin_pytorch.py
+++ b/tests/test_hub_mixin_pytorch.py
@@ -3,7 +3,7 @@
import struct
import unittest
from pathlib import Path
-from typing import TypeVar
+from typing import Any, TypeVar
from unittest.mock import Mock, patch
import pytest
@@ -17,6 +17,8 @@
from .testing_utils import repo_name, requires
+DUMMY_OBJECT = object()
+
if is_torch_available():
import torch
import torch.nn as nn
@@ -32,8 +34,21 @@ def __init__(self, **kwargs):
def forward(self, x):
return self.l1(x)
+ class DummyModelNoConfig(nn.Module, PyTorchModelHubMixin):
+ def __init__(
+ self,
+ num_classes: int = 42,
+ state: str = "layernorm",
+ not_jsonable: Any = DUMMY_OBJECT,
+ ):
+ super().__init__()
+ self.num_classes = num_classes
+ self.state = state
+ self.not_jsonable = not_jsonable
+
else:
DummyModel = None
+ DummyModelNoConfig = None
@requires("torch")
@@ -235,6 +250,55 @@ def test_push_to_hub(self):
# Delete repo
self._api.delete_repo(repo_id=repo_id)
+ def test_load_no_config(self):
+ config_file = self.cache_dir / "config.json"
+
+ # Test creating model => auto-generated config
+ model = DummyModelNoConfig(num_classes=50)
+ assert model.config == {"num_classes": 50, "state": "layernorm"}
+
+ # Test saving model => auto-generated config is saved
+ model.save_pretrained(self.cache_dir)
+ assert config_file.exists()
+ assert json.loads(config_file.read_text()) == {"num_classes": 50, "state": "layernorm"}
+
+ # Reload model => config is reloaded
+ reloaded = DummyModelNoConfig.from_pretrained(self.cache_dir)
+ assert reloaded.num_classes == 50
+ assert reloaded.state == "layernorm"
+ assert reloaded.config == {"num_classes": 50, "state": "layernorm"}
+
+ # Reload model with custom config => custom config is used
+ reloaded_with_default = DummyModelNoConfig.from_pretrained(self.cache_dir, state="other")
+ assert reloaded_with_default.num_classes == 50
+ assert reloaded_with_default.state == "other"
+ assert reloaded_with_default.config == {"num_classes": 50, "state": "other"}
+
+ reloaded_with_default.save_pretrained(self.cache_dir)
+ assert json.loads(config_file.read_text()) == {"num_classes": 50, "state": "other"}
+
+ def test_save_with_non_jsonable_config(self):
+ # Save with a non-jsonable value
+ my_object = object()
+ model = DummyModelNoConfig(not_jsonable=my_object)
+ assert model.not_jsonable is my_object
+ assert "not_jsonable" not in model.config
+
+ # Reload with default value
+ model.save_pretrained(self.cache_dir)
+ reloaded_model = DummyModelNoConfig.from_pretrained(self.cache_dir)
+ assert reloaded_model.not_jsonable is DUMMY_OBJECT
+ assert "not_jsonable" not in model.config
+
+ # If jsonable value passed by user, it's saved in the config
+ new_model = DummyModelNoConfig(not_jsonable=123)
+ new_model.save_pretrained(self.cache_dir)
+ assert new_model.config["not_jsonable"] == 123
+
+ reloaded_new_model = DummyModelNoConfig.from_pretrained(self.cache_dir)
+ assert reloaded_new_model.not_jsonable == 123
+ assert reloaded_new_model.config["not_jsonable"] == 123
+
def test_save_model_with_shared_tensors(self):
"""
Regression test for #2086. Shared tensors should be saved correctly.
diff --git a/tests/test_utils_typing.py b/tests/test_utils_typing.py
new file mode 100644
index 0000000000..afc148848c
--- /dev/null
+++ b/tests/test_utils_typing.py
@@ -0,0 +1,49 @@
+import json
+
+import pytest
+
+from huggingface_hub.utils._typing import is_jsonable
+
+
+class NotSerializableClass:
+ pass
+
+
+OBJ_WITH_CIRCULAR_REF = {"hello": "world"}
+OBJ_WITH_CIRCULAR_REF["recursive"] = OBJ_WITH_CIRCULAR_REF
+
+
+@pytest.mark.parametrize(
+ "data",
+ [
+ 123, #
+ 3.14,
+ "Hello, world!",
+ True,
+ None,
+ [],
+ [1, 2, 3],
+ [(1, 2.0, "string"), True],
+ {},
+ {"name": "Alice", "age": 30},
+ ],
+)
+def test_is_jsonable_success(data):
+ assert is_jsonable(data)
+ json.dumps(data)
+
+
+@pytest.mark.parametrize(
+ "data",
+ [
+ set([1, 2, 3]),
+ lambda x: x + 1,
+ NotSerializableClass(),
+ {"obj": NotSerializableClass()},
+ OBJ_WITH_CIRCULAR_REF,
+ ],
+)
+def test_is_jsonable_failure(data):
+ assert not is_jsonable(data)
+ with pytest.raises((TypeError, ValueError)):
+ json.dumps(data)
| diff --git a/docs/source/en/guides/integrations.md b/docs/source/en/guides/integrations.md
index 045b8c4327..e64bd0b8a9 100644
--- a/docs/source/en/guides/integrations.md
+++ b/docs/source/en/guides/integrations.md
@@ -148,7 +148,7 @@ are ready to go. You don't need to worry about stuff like repo creation, commits
of this is handled by the mixin and is available to your users. The Mixin also ensures that public methods are well
documented and type annotated.
-As a bonus, [`ModelHubMixin`] handles the model configuration for you. In some cases, you have a `config` input parameter when initializing your class (dictionary or dataclass containing high-level settings). In such cases, the `config` value is automatically serialized into a `config.json` dictionary for you. When re-loading the model from the Hub, the configuration is correctly deserialized. Make sure to use type annotation if you want to deserialize it as a dataclass. The big advantage of having a `config.json` file in your model repository is that it automatically enables the analytics on the Hub (e.g. the "downloads" count).
+As a bonus, [`ModelHubMixin`] handles the model configuration for you. If your `__init__` method expects a `config` input, it will be automatically saved in the repo when calling `save_pretrained` and reloaded correctly by `load_pretrained`. Moreover, if the `config` input parameter is annotated with dataclass type (e.g. `config: Optional[MyConfigClass] = None`), then the `config` value will be correctly deserialized for you. Finally, all jsonable values passed at initialization will be also stored in the config file. This means you don't necessarily have to expect a `config` input to benefit from it. The big advantage of having a `config.json` file in your model repository is that it automatically enables the analytics on the Hub (e.g. the "downloads" count).
### A concrete example: PyTorch
@@ -159,29 +159,27 @@ A good example of what we saw above is [`PyTorchModelHubMixin`], our integration
Here is how any user can load/save a PyTorch model from/to the Hub:
```python
->>> from dataclasses import dataclass
>>> import torch
>>> import torch.nn as nn
>>> from huggingface_hub import PyTorchModelHubMixin
-# 0. (optional) define a config class
->>> @dataclass
-... class Config:
-... hidden_size: int = 512
-... vocab_size: int = 30000
-... output_size: int = 4
-# 1. Define your Pytorch model exactly the same way you are used to
+# Define your Pytorch model exactly the same way you are used to
>>> class MyModel(nn.Module, PyTorchModelHubMixin): # multiple inheritance
-... def __init__(self, config: Config):
+... def __init__(self, hidden_size: int = 512, vocab_size: int = 30000, output_size: int = 4):
... super().__init__()
-... self.param = nn.Parameter(torch.rand(config.hidden_size, config.vocab_size))
-... self.linear = nn.Linear(config.output_size, config.vocab_size)
+... self.param = nn.Parameter(torch.rand(hidden_size, vocab_size))
+... self.linear = nn.Linear(output_size, vocab_size)
... def forward(self, x):
... return self.linear(x + self.param)
->>> model = MyModel(Config(hidden_size=128))
+# 1. Create model
+>>> model = MyModel(hidden_size=128)
+
+# Config is automatically created based on input + default values
+>>> model.config
+{"hidden_size": 128, "vocab_size": 30000, "output_size": 4}
# 2. (optional) Save model to local directory
>>> model.save_pretrained("path/to/my-awesome-model")
@@ -189,10 +187,10 @@ Here is how any user can load/save a PyTorch model from/to the Hub:
# 3. Push model weights to the Hub
>>> model.push_to_hub("my-awesome-model")
-# 4. Initialize model from the Hub
+# 4. Initialize model from the Hub => config has been preserved
>>> model = MyModel.from_pretrained("username/my-awesome-model")
>>> model.config
-Config(hidden_size=128, vocab_size=30000, output_size=4)
+{"hidden_size": 128, "vocab_size": 30000, "output_size": 4}
```
#### Implementation
@@ -211,25 +209,15 @@ class PyTorchModelHubMixin(ModelHubMixin):
2. Implement the `_save_pretrained` method:
```py
-from huggingface_hub import ModelCard, ModelCardData
+from huggingface_hub import ModelHubMixin
class PyTorchModelHubMixin(ModelHubMixin):
(...)
- def _save_pretrained(self, save_directory: Path):
- """Generate Model Card and save weights from a Pytorch model to a local directory."""
- model_card = ModelCard.from_template(
- card_data=ModelCardData(
- license='mit',
- library_name="pytorch",
- ...
- ),
- model_summary=...,
- model_type=...,
- ...
- )
- (save_directory / "README.md").write_text(str(model))
- torch.save(obj=self.module.state_dict(), f=save_directory / "pytorch_model.bin")
+ def _save_pretrained(self, save_directory: Path) -> None:
+ """Save weights from a Pytorch model to a local directory."""
+ save_model_as_safetensor(self.module, str(save_directory / SAFETENSORS_SINGLE_FILE))
+
```
3. Implement the `_from_pretrained` method:
@@ -255,13 +243,15 @@ class PyTorchModelHubMixin(ModelHubMixin):
**model_kwargs,
):
"""Load Pytorch pretrained weights and return the loaded model."""
- if os.path.isdir(model_id): # Can either be a local directory
- print("Loading weights from local directory")
- model_file = os.path.join(model_id, "pytorch_model.bin")
- else: # Or a model on the Hub
- model_file = hf_hub_download( # Download from the hub, passing same input args
+ model = cls(**model_kwargs)
+ if os.path.isdir(model_id):
+ print("Loading weights from local directory")
+ model_file = os.path.join(model_id, SAFETENSORS_SINGLE_FILE)
+ return cls._load_as_safetensor(model, model_file, map_location, strict)
+
+ model_file = hf_hub_download(
repo_id=model_id,
- filename="pytorch_model.bin",
+ filename=SAFETENSORS_SINGLE_FILE,
revision=revision,
cache_dir=cache_dir,
force_download=force_download,
@@ -269,14 +259,8 @@ class PyTorchModelHubMixin(ModelHubMixin):
resume_download=resume_download,
token=token,
local_files_only=local_files_only,
- )
-
- # Load model and return - custom logic depending on your framework
- model = cls(**model_kwargs)
- state_dict = torch.load(model_file, map_location=torch.device(map_location))
- model.load_state_dict(state_dict, strict=strict)
- model.eval()
- return model
+ )
+ return cls._load_as_safetensor(model, model_file, map_location, strict)
```
And that's it! Your library now enables users to upload and download files to and from the Hub.
| [
{
"components": [
{
"doc": "Inspect __init__ signature only once when subclassing.",
"lines": [
103,
110
],
"name": "ModelHubMixin.__init_subclass__",
"signature": "def __init_subclass__(cls) -> None:",
"type": "function"
},
{... | [
"tests/test_utils_typing.py::test_is_jsonable_success[123]",
"tests/test_utils_typing.py::test_is_jsonable_success[3.14]",
"tests/test_utils_typing.py::test_is_jsonable_success[Hello,",
"tests/test_utils_typing.py::test_is_jsonable_success[True]",
"tests/test_utils_typing.py::test_is_jsonable_success[None]"... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Correctly inject config in `PytorchModelHubMixin`
Solves https://github.com/huggingface/huggingface_hub/pull/2079.
With this PR, users don't have to rely on a `config` parameter to configure their model. We read default+passed values automatically to build the config file ourselves. This way, we are able to save a correct `config.json` file so that reloaded model have exact same config. Only "jsonable" fields are saved in config.json.
And ofc it's backward compatible so users that prefer to pass `config.json` directly are still supported.
PR is inspired by Inspired by https://github.com/facebookresearch/hiera/pull/26. cc @dbolya with this change, you won't need the `@has_config` decorator anymore :hugs:
Here is an example to showcase it:
```python
>>> import torch
>>> import torch.nn as nn
>>> from huggingface_hub import PyTorchModelHubMixin
>>> class MyModel(nn.Module, PyTorchModelHubMixin):
... def __init__(self, hidden_size: int = 512, vocab_size: int = 30000, output_size: int = 4):
... super().__init__()
... self.param = nn.Parameter(torch.rand(hidden_size, vocab_size))
... self.linear = nn.Linear(output_size, vocab_size)
... def forward(self, x):
... return self.linear(x + self.param)
>>> model = MyModel(hidden_size=256)
# Save model weights to local directory
>>> model.save_pretrained("my-awesome-model")
# Push model weights to the Hub
>>> model.push_to_hub("my-awesome-model")
# Download and initialize weights from the Hub
>>> model = MyModel.from_pretrained("username/my-awesome-model")
>>> model.hidden_size
256
```
cc @mfuntowicz as well. This shouldn't impact you except that the generated config.json files should be more complete.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in src/huggingface_hub/hub_mixin.py]
(definition of ModelHubMixin.__init_subclass__:)
def __init_subclass__(cls) -> None:
"""Inspect __init__ signature only once when subclassing."""
(definition of _load_dataclass:)
def _load_dataclass(datacls: Type["DataclassInstance"], data: dict) -> "DataclassInstance":
"""Load a dataclass instance from a dictionary.
Fields not expected by the dataclass are ignored."""
[end of new definitions in src/huggingface_hub/hub_mixin.py]
[start of new definitions in src/huggingface_hub/utils/_typing.py]
(definition of is_jsonable:)
def is_jsonable(obj: Any) -> bool:
"""Check if an object is JSON serializable.
This is a weak check, as it does not check for the actual JSON serialization, but only for the types of the object.
It works correctly for basic use cases but do not guarantee an exhaustive check.
Object is considered to be recursively json serializable if:
- it is an instance of int, float, str, bool, or NoneType
- it is a list or tuple and all its items are json serializable
- it is a dict and all its keys are strings and all its values are json serializable"""
[end of new definitions in src/huggingface_hub/utils/_typing.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4058e1f97ebe256b2f3006d4bc31be275c66df6b | |
astropy__astropy-16135 | 16,135 | astropy/astropy | v5.3 | ea875472867f296eee3ed75989ed402d55587940 | 2024-02-29T23:40:43Z | diff --git a/astropy/coordinates/representation/cylindrical.py b/astropy/coordinates/representation/cylindrical.py
index 9127fb2dcb08..acd9ab936953 100644
--- a/astropy/coordinates/representation/cylindrical.py
+++ b/astropy/coordinates/representation/cylindrical.py
@@ -11,7 +11,7 @@
from .base import BaseDifferential, BaseRepresentation
from .cartesian import CartesianRepresentation
-from .spherical import _spherical_op_funcs
+from .spherical import PhysicsSphericalRepresentation, _spherical_op_funcs
class CylindricalRepresentation(BaseRepresentation):
@@ -135,6 +135,22 @@ def _scale_operation(self, op, *args):
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
+ def represent_as(self, other_class, differential_class=None):
+ if isinstance(other_class, type):
+ if issubclass(other_class, PhysicsSphericalRepresentation):
+ diffs = self._re_represent_differentials(
+ other_class, differential_class
+ )
+ r = np.hypot(self.rho, self.z)
+ return other_class(
+ r=r,
+ theta=np.arccos(self.z / r),
+ phi=self.phi,
+ differentials=diffs,
+ )
+
+ return super().represent_as(other_class, differential_class)
+
class CylindricalDifferential(BaseDifferential):
"""Differential(s) of points in cylindrical coordinates.
diff --git a/astropy/coordinates/representation/spherical.py b/astropy/coordinates/representation/spherical.py
index 5ef93c8f4a00..dba9c7e1f9bc 100644
--- a/astropy/coordinates/representation/spherical.py
+++ b/astropy/coordinates/representation/spherical.py
@@ -750,6 +750,19 @@ def represent_as(self, other_class, differential_class=None):
differentials=diffs,
copy=False,
)
+ from .cylindrical import CylindricalRepresentation
+
+ if issubclass(other_class, CylindricalRepresentation):
+ diffs = self._re_represent_differentials(
+ other_class, differential_class
+ )
+ return other_class(
+ rho=self.r * np.sin(self.theta),
+ phi=self.phi,
+ z=self.r * np.cos(self.theta),
+ differentials=diffs,
+ copy=False,
+ )
return super().represent_as(other_class, differential_class)
| diff --git a/astropy/coordinates/tests/test_representation.py b/astropy/coordinates/tests/test_representation.py
index d2d257e30a6f..2f84236bdd5e 100644
--- a/astropy/coordinates/tests/test_representation.py
+++ b/astropy/coordinates/tests/test_representation.py
@@ -842,6 +842,25 @@ def test_representation_shortcuts(self):
)
assert representation_equal_up_to_angular_type(got, expected)
+ got = sph.represent_as(CylindricalRepresentation, CylindricalDifferential)
+ assert np.may_share_memory(sph.phi, got.phi)
+ expected = BaseRepresentation.represent_as(
+ sph, CylindricalRepresentation, CylindricalDifferential
+ )
+ assert_allclose_quantity(got.rho, expected.rho, atol=5e-17 * u.kpc)
+ assert_allclose_quantity(got.phi, expected.phi, atol=3e-16 * u.deg)
+ assert_array_equal(got.z, expected.z)
+
+ def test_to_cylindrical_at_the_origin(self):
+ """Test that the transformation to cylindrical at the origin preserves phi."""
+ sph = PhysicsSphericalRepresentation(
+ phi=270 * u.deg, theta=45 * u.deg, r=0 * u.kpc
+ )
+ cyl = sph.represent_as(CylindricalRepresentation)
+ assert cyl.rho == 0.0 * u.kpc
+ assert cyl.z == 0.0 * u.kpc
+ assert cyl.phi == 270 * u.deg # phi is preserved exactly
+
def test_initialize_with_nan(self):
# Regression test for gh-11558: initialization used to fail.
psr = PhysicsSphericalRepresentation(
@@ -1380,6 +1399,39 @@ def test_transform(self):
assert_allclose_quantity(s3.z, expected.z)
assert_allclose_quantity(s3.rho, expected.rho)
+ def test_representation_shortcuts(self):
+ """Test that shortcuts in ``represent_as`` don't fail."""
+ difs = CylindricalDifferential(
+ d_rho=4 * u.km / u.s, d_phi=5 * u.mas / u.yr, d_z=6 * u.km / u.s
+ )
+ cyl = CylindricalRepresentation(
+ rho=1 * u.kpc, phi=2 * u.deg, z=3 * u.kpc, differentials={"s": difs}
+ )
+
+ # PhysicsSpherical Representation
+ got = cyl.represent_as(
+ PhysicsSphericalRepresentation, PhysicsSphericalDifferential
+ )
+ expected = BaseRepresentation.represent_as(
+ cyl, PhysicsSphericalRepresentation, PhysicsSphericalDifferential
+ )
+ assert_allclose_quantity(got.r, expected.r)
+ assert_allclose_quantity(got.phi, expected.phi)
+ assert_allclose_quantity(got.theta, expected.theta)
+ assert representation_equal_up_to_angular_type(got, expected)
+
+ def test_to_physicsspherical_at_the_origin(self):
+ """Test that the transformation to physicsspherical at the origin preserves phi."""
+ cyl = CylindricalRepresentation(
+ rho=0 * u.kpc,
+ phi=23.5 * u.deg,
+ z=3 * u.kpc,
+ )
+ sph = cyl.represent_as(PhysicsSphericalRepresentation)
+ assert_allclose(sph.r, 3 * u.kpc)
+ assert_allclose(sph.theta, 0 * u.deg)
+ assert cyl.phi == 23.5 * u.deg # phi is preserved exactly
+
class TestUnitSphericalCosLatDifferential:
@pytest.mark.parametrize("matrix", list(matrices.values()))
| [
{
"components": [
{
"doc": "",
"lines": [
138,
152
],
"name": "CylindricalRepresentation.represent_as",
"signature": "def represent_as(self, other_class, differential_class=None):",
"type": "function"
}
],
"file": "astropy/c... | [
"astropy/coordinates/tests/test_representation.py::TestPhysicsSphericalRepresentation::test_representation_shortcuts",
"astropy/coordinates/tests/test_representation.py::TestPhysicsSphericalRepresentation::test_to_cylindrical_at_the_origin"
] | [
"astropy/coordinates/tests/test_representation.py::TestRadialRepresentation::test_transform",
"astropy/coordinates/tests/test_representation.py::TestSphericalRepresentation::test_name",
"astropy/coordinates/tests/test_representation.py::TestSphericalRepresentation::test_empty_init",
"astropy/coordinates/tests... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat: fast-path physicsspherical to cylindrical
- [ ] By checking this box, the PR author has requested that maintainers do **NOT** use the "Squash and Merge" button. Maintainers should respect this when possible; however, the final decision is at the discretion of the maintainer that merges the PR.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in astropy/coordinates/representation/cylindrical.py]
(definition of CylindricalRepresentation.represent_as:)
def represent_as(self, other_class, differential_class=None):
[end of new definitions in astropy/coordinates/representation/cylindrical.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 2d281019494aaebf522f6626c0dae37510c16688 | ||
conan-io__conan-15775 | 15,775 | conan-io/conan | null | 0d09f6bc9d97bea13e2ddfae32295261d49fb8f6 | 2024-02-28T15:22:32Z | diff --git a/conan/internal/internal_tools.py b/conan/internal/internal_tools.py
new file mode 100644
index 00000000000..03a4cc0c01b
--- /dev/null
+++ b/conan/internal/internal_tools.py
@@ -0,0 +1,18 @@
+from conans.errors import ConanException
+
+universal_arch_separator = '|'
+
+
+def is_universal_arch(settings_value, valid_definitions):
+ if settings_value is None or valid_definitions is None or universal_arch_separator not in settings_value:
+ return False
+
+ parts = settings_value.split(universal_arch_separator)
+
+ if parts != sorted(parts):
+ raise ConanException(f"Architectures must be in alphabetical order separated by "
+ f"{universal_arch_separator}")
+
+ valid_macos_values = [val for val in valid_definitions if ("arm" in val or "x86" in val)]
+
+ return all(part in valid_macos_values for part in parts)
diff --git a/conan/tools/cmake/toolchain/blocks.py b/conan/tools/cmake/toolchain/blocks.py
index 01f5dc07437..c1c68a4de9f 100644
--- a/conan/tools/cmake/toolchain/blocks.py
+++ b/conan/tools/cmake/toolchain/blocks.py
@@ -5,7 +5,8 @@
from jinja2 import Template
-from conan.tools.apple.apple import get_apple_sdk_fullname
+from conan.internal.internal_tools import universal_arch_separator, is_universal_arch
+from conan.tools.apple.apple import get_apple_sdk_fullname, _to_apple_arch
from conan.tools.android.utils import android_abi
from conan.tools.apple.apple import is_apple_os, to_apple_arch
from conan.tools.build import build_jobs
@@ -355,10 +356,19 @@ def context(self):
if not is_apple_os(self._conanfile):
return None
+ def to_apple_archs(conanfile, default=None):
+ f"""converts conan-style architectures into Apple-style archs
+ to be used by CMake also supports multiple architectures
+ separated by '{universal_arch_separator}'"""
+ arch_ = conanfile.settings.get_safe("arch") if conanfile else None
+ if arch_ is not None:
+ return ";".join([_to_apple_arch(arch, default) for arch in
+ arch_.split(universal_arch_separator)])
+
# check valid combinations of architecture - os ?
# for iOS a FAT library valid for simulator and device can be generated
# if multiple archs are specified "-DCMAKE_OSX_ARCHITECTURES=armv7;armv7s;arm64;i386;x86_64"
- host_architecture = to_apple_arch(self._conanfile)
+ host_architecture = to_apple_archs(self._conanfile)
host_os_version = self._conanfile.settings.get_safe("os.version")
host_sdk_name = self._conanfile.conf.get("tools.apple:sdk_path") or get_apple_sdk_fullname(self._conanfile)
@@ -815,6 +825,11 @@ def _get_generic_system_name(self):
return cmake_system_name_map.get(os_host, os_host)
def _is_apple_cross_building(self):
+
+ if is_universal_arch(self._conanfile.settings.get_safe("arch"),
+ self._conanfile.settings.possible_values().get("arch")):
+ return False
+
os_host = self._conanfile.settings.get_safe("os")
arch_host = self._conanfile.settings.get_safe("arch")
arch_build = self._conanfile.settings_build.get_safe("arch")
@@ -829,7 +844,9 @@ def _get_cross_build(self):
system_version = self._conanfile.conf.get("tools.cmake.cmaketoolchain:system_version")
system_processor = self._conanfile.conf.get("tools.cmake.cmaketoolchain:system_processor")
- if not user_toolchain: # try to detect automatically
+ # try to detect automatically
+ if not user_toolchain and not is_universal_arch(self._conanfile.settings.get_safe("arch"),
+ self._conanfile.settings.possible_values().get("arch")):
os_host = self._conanfile.settings.get_safe("os")
arch_host = self._conanfile.settings.get_safe("arch")
if arch_host == "armv8":
diff --git a/conans/model/settings.py b/conans/model/settings.py
index c7b3837aa3b..d9cf3e8ed7b 100644
--- a/conans/model/settings.py
+++ b/conans/model/settings.py
@@ -1,5 +1,6 @@
import yaml
+from conan.internal.internal_tools import is_universal_arch
from conans.errors import ConanException
@@ -98,7 +99,8 @@ def __delattr__(self, item):
def _validate(self, value):
value = str(value) if value is not None else None
- if "ANY" not in self._definition and value not in self._definition:
+ is_universal = is_universal_arch(value, self._definition) if self._name == "settings.arch" else False
+ if "ANY" not in self._definition and value not in self._definition and not is_universal:
raise ConanException(bad_value_msg(self._name, value, self._definition))
return value
| diff --git a/conans/test/functional/toolchains/cmake/test_universal_binaries.py b/conans/test/functional/toolchains/cmake/test_universal_binaries.py
new file mode 100644
index 00000000000..b4743c93030
--- /dev/null
+++ b/conans/test/functional/toolchains/cmake/test_universal_binaries.py
@@ -0,0 +1,101 @@
+import os
+import platform
+import textwrap
+
+import pytest
+
+from conans.test.utils.tools import TestClient
+from conans.util.files import rmdir
+
+
+@pytest.mark.skipif(platform.system() != "Darwin", reason="Only OSX")
+@pytest.mark.tool("cmake", "3.23")
+def test_create_universal_binary():
+ client = TestClient()
+
+ conanfile = textwrap.dedent("""
+ from conan import ConanFile
+ from conan.tools.cmake import CMake, cmake_layout
+ class mylibraryRecipe(ConanFile):
+ package_type = "library"
+ generators = "CMakeToolchain"
+ settings = "os", "compiler", "build_type", "arch"
+ options = {"shared": [True, False], "fPIC": [True, False]}
+ default_options = {"shared": False, "fPIC": True}
+ exports_sources = "CMakeLists.txt", "src/*", "include/*"
+
+ def layout(self):
+ cmake_layout(self)
+
+ def build(self):
+ cmake = CMake(self)
+ cmake.configure()
+ cmake.build()
+ self.run("lipo -info libmylibrary.a")
+
+ def package(self):
+ cmake = CMake(self)
+ cmake.install()
+
+ def package_info(self):
+ self.cpp_info.libs = ["mylibrary"]
+ """)
+
+ test_conanfile = textwrap.dedent("""
+ import os
+ from conan import ConanFile
+ from conan.tools.cmake import CMake, cmake_layout
+ from conan.tools.build import can_run
+
+ class mylibraryTestConan(ConanFile):
+ settings = "os", "compiler", "build_type", "arch"
+ generators = "CMakeDeps", "CMakeToolchain"
+
+ def requirements(self):
+ self.requires(self.tested_reference_str)
+
+ def build(self):
+ cmake = CMake(self)
+ cmake.configure()
+ cmake.build()
+
+ def layout(self):
+ cmake_layout(self)
+
+ def test(self):
+ exe = os.path.join(self.cpp.build.bindir, "example")
+ self.run(f"lipo {exe} -info", env="conanrun")
+ """)
+
+ client.run("new cmake_lib -d name=mylibrary -d version=1.0")
+ client.save({"conanfile.py": conanfile, "test_package/conanfile.py": test_conanfile})
+
+ client.run('create . --name=mylibrary --version=1.0 '
+ '-s="arch=armv8|armv8.3|x86_64" --build=missing -tf=""')
+
+ assert "libmylibrary.a are: x86_64 arm64 arm64e" in client.out
+
+ client.run('test test_package mylibrary/1.0 -s="arch=armv8|armv8.3|x86_64"')
+
+ assert "example are: x86_64 arm64 arm64e" in client.out
+
+ client.run('new cmake_exe -d name=foo -d version=1.0 -d requires=mylibrary/1.0 --force')
+
+ client.run('install . -s="arch=armv8|armv8.3|x86_64"')
+
+ client.run_command("cmake --preset conan-release")
+ client.run_command("cmake --build --preset conan-release")
+ client.run_command("lipo -info ./build/Release/foo")
+
+ assert "foo are: x86_64 arm64 arm64e" in client.out
+
+ rmdir(os.path.join(client.current_folder, "build"))
+
+ client.run('install . -s="arch=armv8|armv8.3|x86_64" '
+ '-c tools.cmake.cmake_layout:build_folder_vars=\'["settings.arch"]\'')
+
+ client.run_command("cmake --preset \"conan-armv8|armv8.3|x86_64-release\" ")
+ client.run_command("cmake --build --preset \"conan-armv8|armv8.3|x86_64-release\" ")
+ client.run_command("lipo -info './build/armv8|armv8.3|x86_64/Release/foo'")
+
+ assert "foo are: x86_64 arm64 arm64e" in client.out
diff --git a/conans/test/unittests/tools/apple/test_apple_tools.py b/conans/test/unittests/tools/apple/test_apple_tools.py
index 50e410e3756..ee8586b713f 100644
--- a/conans/test/unittests/tools/apple/test_apple_tools.py
+++ b/conans/test/unittests/tools/apple/test_apple_tools.py
@@ -2,10 +2,13 @@
import pytest
import textwrap
+from conan.internal.internal_tools import is_universal_arch
+from conans.errors import ConanException
from conans.test.utils.mocks import ConanFileMock, MockSettings, MockOptions
from conans.test.utils.test_files import temp_folder
from conan.tools.apple import is_apple_os, to_apple_arch, fix_apple_shared_install_name, XCRun
-from conan.tools.apple.apple import _get_dylib_install_name # testing private function
+from conan.tools.apple.apple import _get_dylib_install_name
+
def test_tools_apple_is_apple_os():
conanfile = ConanFileMock()
@@ -51,6 +54,7 @@ def test_xcrun_public_settings():
assert settings.os == "watchOS"
+
def test_get_dylib_install_name():
# https://github.com/conan-io/conan/issues/13014
single_arch = textwrap.dedent("""
@@ -70,3 +74,25 @@ def test_get_dylib_install_name():
mock_output_runner.return_value = mock_output
install_name = _get_dylib_install_name("otool", "/path/to/libwebp.7.dylib")
assert "/absolute/path/lib/libwebp.7.dylib" == install_name
+
+
+@pytest.mark.parametrize("settings_value,valid_definitions,result", [
+ ("arm64|x86_64", ["arm64", "x86_64", "armv7", "x86"], True),
+ ("x86_64|arm64", ["arm64", "x86_64", "armv7", "x86"], None),
+ ("armv7|x86", ["arm64", "x86_64", "armv7", "x86"], True),
+ ("x86|armv7", ["arm64", "x86_64", "armv7", "x86"], None),
+ (None, ["arm64", "x86_64", "armv7", "x86"], False),
+ ("arm64|armv7|x86_64", ["arm64", "x86_64", "armv7", "x86"], True),
+ ("x86|arm64", ["arm64", "x86_64", "armv7", "x86"], None),
+ ("arm64|ppc32", None, False),
+ (None, None, False),
+ ("armv7|x86", None, False),
+ ("arm64", ["arm64", "x86_64"], False),
+])
+# None is for the exception case
+def test_is_universal_arch(settings_value, valid_definitions, result):
+ if result is None:
+ with pytest.raises(ConanException):
+ is_universal_arch(settings_value, valid_definitions)
+ else:
+ assert is_universal_arch(settings_value, valid_definitions) == result
| [
{
"components": [
{
"doc": "",
"lines": [
6,
18
],
"name": "is_universal_arch",
"signature": "def is_universal_arch(settings_value, valid_definitions):",
"type": "function"
}
],
"file": "conan/internal/internal_tools.py"
}... | [
"conans/test/unittests/tools/apple/test_apple_tools.py::test_tools_apple_is_apple_os",
"conans/test/unittests/tools/apple/test_apple_tools.py::test_tools_apple_to_apple_arch",
"conans/test/unittests/tools/apple/test_apple_tools.py::test_fix_shared_install_name_no_libraries",
"conans/test/unittests/tools/apple... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Minimal proof of concept of universal binaries support for CMakeToolchain
Changelog: Feature: Add basic support in CMakeToolchain for universal binaries.
Docs: https://github.com/conan-io/docs/pull/3642
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conan/internal/internal_tools.py]
(definition of is_universal_arch:)
def is_universal_arch(settings_value, valid_definitions):
[end of new definitions in conan/internal/internal_tools.py]
[start of new definitions in conan/tools/cmake/toolchain/blocks.py]
(definition of AppleSystemBlock.context.to_apple_archs:)
def to_apple_archs(conanfile, default=None):
[end of new definitions in conan/tools/cmake/toolchain/blocks.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
joke2k__faker-1999 | 1,999 | joke2k/faker | null | a8e137a1fe2bf1d02b4322cc2152a40634b2b3bd | 2024-02-26T22:56:14Z | diff --git a/faker/providers/bank/uk_UA/__init__.py b/faker/providers/bank/uk_UA/__init__.py
index 3b53dd45f4..5f5a560fea 100644
--- a/faker/providers/bank/uk_UA/__init__.py
+++ b/faker/providers/bank/uk_UA/__init__.py
@@ -5,7 +5,83 @@ class Provider(BankProvider):
"""Implement bank provider for ``uk_UA`` locale.
Source for rules for bban format:
https://bank.gov.ua/en/iban
+ Banks list:
+ https://ubanks.com.ua/adr/
"""
bban_format = "#" * 27
country_code = "UA"
+ banks = (
+ 'izibank',
+ 'monobank',
+ 'O.Bank',
+ 'sportbank',
+ 'А-Банк',
+ 'Агропросперіс Банк',
+ 'АкордБанк',
+ 'Альтбанк',
+ 'Асвіо Банк',
+ 'Банк 3/4',
+ 'Банк Авангард',
+ 'Банк Альянс',
+ 'Банк Власний Рахунок',
+ 'Банк Восток',
+ 'Банк інвестицій та заощаджень',
+ 'Банк Кредит Дніпро',
+ 'Банк Портал',
+ 'Банк Український Капітал',
+ 'Банк Фамільний',
+ 'БТА Банк',
+ 'Глобус',
+ 'Грант',
+ 'Дойче Банк ДБУ',
+ 'Європейський Промисловий Банк',
+ 'Ідея Банк',
+ 'ІНГ Банк Україна',
+ 'Індустріалбанк',
+ 'Кліринговий Дім',
+ 'Комінбанк',
+ 'КомІнвестБанк',
+ 'Кредит Європа Банк',
+ 'Кредитвест Банк',
+ 'Креді Агріколь',
+ 'Кредобанк',
+ 'Кристалбанк',
+ 'Львів',
+ 'МетаБанк',
+ 'Міжнародний Інвестиційний Банк',
+ 'Мотор-Банк',
+ 'МТБ Банк',
+ 'Національний банк України',
+ 'Оксі Банк',
+ 'ОТП Банк',
+ 'Ощадбанк',
+ 'Перший Інвестиційний Банк',
+ 'Перший Український Міжнародний Банк',
+ 'Південний',
+ 'Піреус Банк',
+ 'Полікомбанк',
+ 'Полтава-Банк',
+ 'Правекс Банк',
+ 'ПриватБанк',
+ 'ПроКредит Банк',
+ 'Радабанк',
+ 'Райффайзен Банк',
+ 'РВС Банк',
+ 'СЕБ Корпоративний Банк',
+ 'Сенс Банк',
+ 'Сітібанк',
+ 'Скай Банк',
+ 'ТАСкомбанк',
+ 'Траст-капітал',
+ 'Український банк реконструкції та розвитку',
+ 'Укргазбанк',
+ 'Укрексімбанк',
+ 'УкрСиббанк',
+ 'Універсал Банк',
+ 'Юнекс Банк',
+ )
+
+ def bank(self) -> str:
+ """Generate a bank name."""
+ return self.random_element(self.banks)
diff --git a/faker/providers/credit_card/uk_UA/__init__.py b/faker/providers/credit_card/uk_UA/__init__.py
new file mode 100644
index 0000000000..8eee3afd0a
--- /dev/null
+++ b/faker/providers/credit_card/uk_UA/__init__.py
@@ -0,0 +1,56 @@
+from collections import OrderedDict
+from typing import Optional
+
+from faker.providers.person.uk_UA import translit
+from .. import CardType, CreditCard
+from .. import Provider as CreditCardProvider
+
+
+class Provider(CreditCardProvider):
+ """Implement credit card provider for ``uk_UA`` locale.
+ https://blog.ipay.ua/uk/sekrety-bankovskix-kart-kak-identificirovat-bank-po-nomeru-karty/
+ """
+ prefix_visa = ["4"]
+ prefix_mastercard = ["51", "52", "53", "54"]
+ prefix_prostir = ["9"]
+ prefix_maestro = ["6762"]
+
+ credit_card_types = OrderedDict(
+ (
+ ("visa", CreditCard("Visa", prefix_visa, security_code="CVV2")),
+ ("mastercard", CreditCard("Mastercard", prefix_mastercard, security_code="CVC2")),
+ ("prostir", CreditCard("ПРОСТІР", prefix_prostir, security_code="CVC2")),
+ ("maestro", CreditCard("Maestro", prefix_maestro, security_code="CVV")),
+ )
+ )
+
+ def credit_card_full(self, card_type: Optional[CardType] = None) -> str:
+ """ Generate UA Credit Card:
+ Supported card types 'visa', 'mastercard', 'prostir', 'maestro'
+
+ :sample:
+ :sample: card_type="prostir"
+ :sample: card_type="mastercard"
+ """
+ card = self._credit_card_type(card_type)
+ tpl = "{provider}\n" "{owner}\n" "{number} {expire_date}\n" "{security}: {security_nb}\n" "{issuer}"
+ tpl = tpl.format(
+ provider=card.name,
+ owner=translit(
+ self.generator.parse(
+ self.random_element(
+ [
+ "{{first_name_male}} {{last_name_male}}",
+ "{{first_name_female}} {{last_name_female}}",
+ ]
+ )
+ )
+ ),
+ number=self.credit_card_number(card),
+ expire_date=self.credit_card_expire(),
+ security=card.security_code,
+ security_nb=self.credit_card_security_code(card),
+ issuer=self.generator.parse("{{bank}}"),
+ )
+
+ return self.generator.parse(tpl)
diff --git a/faker/providers/person/uk_UA/__init__.py b/faker/providers/person/uk_UA/__init__.py
index c991c0ac0d..56a785cf0e 100644
--- a/faker/providers/person/uk_UA/__init__.py
+++ b/faker/providers/person/uk_UA/__init__.py
@@ -1,7 +1,8 @@
from collections import OrderedDict
from typing import Dict, Optional
-from ....typing import SexLiteral
+from faker.typing import SexLiteral
+
from .. import ElementsType
from .. import Provider as PersonProvider
| diff --git a/tests/providers/test_credit_card.py b/tests/providers/test_credit_card.py
index 6cd379345b..a0c8fab3b6 100644
--- a/tests/providers/test_credit_card.py
+++ b/tests/providers/test_credit_card.py
@@ -4,6 +4,7 @@
from faker.providers.bank.ru_RU import Provider as RuRuBankProvider
from faker.providers.credit_card import Provider as CreditCardProvider
+from faker.providers.bank.uk_UA import Provider as UkUaBankProvider
class TestCreditCardProvider:
@@ -152,3 +153,39 @@ def test_maestro(self, faker, num_samples):
for _ in range(num_samples):
number = faker.credit_card_number("maestro")
assert self.maestro_pattern.fullmatch(number)
+
+
+class TestUkUa:
+ mastercard_pattern: Pattern = re.compile(
+ r"(?:5[1-5][0-9]{2}|222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}",
+ )
+ visa_pattern: Pattern = re.compile(r"4[0-9]{12}([0-9]{3}){0,2}")
+ maestro_pattern: Pattern = re.compile(r"(67)[0-9]{14}")
+ prostir_pattern: Pattern = re.compile(r"(9)[0-9]{15}")
+
+ def test_mastercard(self, faker, num_samples):
+ for _ in range(num_samples):
+ number = faker.credit_card_number("mastercard")
+ assert self.mastercard_pattern.fullmatch(number)
+
+ def test_visa(self, faker, num_samples):
+ for _ in range(num_samples):
+ number = faker.credit_card_number("visa")
+ assert self.visa_pattern.fullmatch(number)
+
+ def test_maestro(self, faker, num_samples):
+ for _ in range(num_samples):
+ number = faker.credit_card_number("maestro")
+ assert self.maestro_pattern.fullmatch(number)
+
+ def test_prostir(self, faker, num_samples):
+ for _ in range(num_samples):
+ number = faker.credit_card_number("prostir")
+ assert self.prostir_pattern.fullmatch(number)
+
+ def test_credit_card_full(self, faker, num_samples):
+ for _ in range(num_samples):
+ card_data = faker.credit_card_full('prostir').split("\n")
+ assert re.match("[A-Za-z]+", card_data[1])
+ assert card_data[4] in UkUaBankProvider.banks
+ assert card_data[0] == 'ПРОСТІР'
| [
{
"components": [
{
"doc": "Generate a bank name.",
"lines": [
85,
87
],
"name": "Provider.bank",
"signature": "def bank(self) -> str:",
"type": "function"
}
],
"file": "faker/providers/bank/uk_UA/__init__.py"
},
{
"... | [
"tests/providers/test_credit_card.py::TestUkUa::test_maestro",
"tests/providers/test_credit_card.py::TestUkUa::test_prostir",
"tests/providers/test_credit_card.py::TestUkUa::test_credit_card_full"
] | [
"tests/providers/test_credit_card.py::TestCreditCardProvider::test_mastercard",
"tests/providers/test_credit_card.py::TestCreditCardProvider::test_visa13",
"tests/providers/test_credit_card.py::TestCreditCardProvider::test_visa16",
"tests/providers/test_credit_card.py::TestCreditCardProvider::test_visa19",
... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat(uk_UA-credit-cards) Add Uk-UA credit card provider
### Add UA Credit card provider
Add tests
Add bank list
Add translit function
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in faker/providers/bank/uk_UA/__init__.py]
(definition of Provider.bank:)
def bank(self) -> str:
"""Generate a bank name."""
[end of new definitions in faker/providers/bank/uk_UA/__init__.py]
[start of new definitions in faker/providers/credit_card/uk_UA/__init__.py]
(definition of Provider:)
class Provider(CreditCardProvider):
"""Implement credit card provider for ``uk_UA`` locale.
https://blog.ipay.ua/uk/sekrety-bankovskix-kart-kak-identificirovat-bank-po-nomeru-karty/"""
(definition of Provider.credit_card_full:)
def credit_card_full(self, card_type: Optional[CardType] = None) -> str:
"""Generate UA Credit Card:
Supported card types 'visa', 'mastercard', 'prostir', 'maestro'
:sample:
:sample: card_type="prostir"
:sample: card_type="mastercard""""
[end of new definitions in faker/providers/credit_card/uk_UA/__init__.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 6edfdbf6ae90b0153309e3bf066aa3b2d16494a7 | ||
pygments__pygments-2654 | 2,654 | pygments/pygments | null | 41a8a63c993affb665d193222d8da5fdb9ae173a | 2024-02-25T22:46:46Z | diff --git a/AUTHORS b/AUTHORS
index a7928ea88b..4ec64ba1ef 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -116,6 +116,8 @@ Other contributors, listed alphabetically, are:
MSDOS session, BC, WDiff
* Brian R. Jackson -- Tea lexer
* Christian Jann -- ShellSession lexer
+* Jonas Camillus Jeppesen -- Line numbers and line highlighting for
+ RTF-formatter
* Dennis Kaarsemaker -- sources.list lexer
* Dmitri Kabak -- Inferno Limbo lexer
* Igor Kalnitsky -- vhdl lexer
diff --git a/pygments/formatters/rtf.py b/pygments/formatters/rtf.py
index 9905ca0045..ee0e581553 100644
--- a/pygments/formatters/rtf.py
+++ b/pygments/formatters/rtf.py
@@ -8,8 +8,10 @@
:license: BSD, see LICENSE for details.
"""
+from collections import OrderedDict
from pygments.formatter import Formatter
-from pygments.util import get_int_opt, surrogatepair
+from pygments.style import _ansimap
+from pygments.util import get_bool_opt, get_int_opt, get_list_opt, surrogatepair
__all__ = ['RtfFormatter']
@@ -42,6 +44,59 @@ class RtfFormatter(Formatter):
default is 24 half-points, giving a size 12 font.
.. versionadded:: 2.0
+
+ `linenos`
+ Turn on line numbering (default: ``False``).
+
+ .. versionadded:: 2.18
+
+ `lineno_fontsize`
+ Font size for line numbers. Size is specified in half points
+ (default: `fontsize`).
+
+ .. versionadded:: 2.18
+
+ `lineno_padding`
+ Number of spaces between the (inline) line numbers and the
+ source code (default: ``2``).
+
+ .. versionadded:: 2.18
+
+ `linenostart`
+ The line number for the first line (default: ``1``).
+
+ .. versionadded:: 2.18
+
+ `linenostep`
+ If set to a number n > 1, only every nth line number is printed.
+
+ .. versionadded:: 2.18
+
+ `lineno_color`
+ Color for line numbers specified as a hex triplet, e.g. ``'5e5e5e'``.
+ Defaults to the style's line number color if it is a hex triplet,
+ otherwise ansi bright black.
+
+ .. versionadded:: 2.18
+
+ `hl_lines`
+ Specify a list of lines to be highlighted, as line numbers separated by
+ spaces, e.g. ``'3 7 8'``. The line numbers are relative to the input
+ (i.e. the first line is line 1) unless `hl_linenostart` is set.
+
+ .. versionadded:: 2.18
+
+ `hl_color`
+ Color for highlighting the lines specified in `hl_lines`, specified as
+ a hex triplet (default: style's `highlight_color`).
+
+ .. versionadded:: 2.18
+
+ `hl_linenostart`
+ If set to ``True`` line numbers in `hl_lines` are specified
+ relative to `linenostart` (default ``False``).
+
+ .. versionadded:: 2.18
"""
name = 'RTF'
aliases = ['rtf']
@@ -62,6 +117,40 @@ def __init__(self, **options):
Formatter.__init__(self, **options)
self.fontface = options.get('fontface') or ''
self.fontsize = get_int_opt(options, 'fontsize', 0)
+ self.linenos = get_bool_opt(options, 'linenos', False)
+ self.lineno_fontsize = get_int_opt(options, 'lineno_fontsize',
+ self.fontsize)
+ self.lineno_padding = get_int_opt(options, 'lineno_padding', 2)
+ self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
+ self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
+ self.hl_linenostart = get_bool_opt(options, 'hl_linenostart', False)
+
+ self.hl_color = options.get('hl_color', '')
+ if not self.hl_color:
+ self.hl_color = self.style.highlight_color
+
+ self.hl_lines = []
+ for lineno in get_list_opt(options, 'hl_lines', []):
+ try:
+ lineno = int(lineno)
+ if self.hl_linenostart:
+ lineno = lineno - self.linenostart + 1
+ self.hl_lines.append(lineno)
+ except ValueError:
+ pass
+
+ self.lineno_color = options.get('lineno_color', '')
+ if not self.lineno_color:
+ if self.style.line_number_color == 'inherit':
+ # style color is the css value 'inherit'
+ # default to ansi bright-black
+ self.lineno_color = _ansimap['ansibrightblack']
+ else:
+ # style color is assumed to be a hex triplet as other
+ # colors in pygments/style.py
+ self.lineno_color = self.style.line_number_color
+
+ self.color_mapping = self._create_color_mapping()
def _escape(self, text):
return text.replace('\\', '\\\\') \
@@ -90,43 +179,147 @@ def _escape_text(self, text):
# Force surrogate pairs
buf.append('{\\u%d}{\\u%d}' % surrogatepair(cn))
- return ''.join(buf).replace('\n', '\\par\n')
+ return ''.join(buf).replace('\n', '\\par')
- def format_unencoded(self, tokensource, outfile):
- # rtf 1.8 header
- outfile.write('{\\rtf1\\ansi\\uc0\\deff0'
- '{\\fonttbl{\\f0\\fmodern\\fprq1\\fcharset0%s;}}'
- '{\\colortbl;' % (self.fontface and
- ' ' + self._escape(self.fontface) or
- ''))
-
- # convert colors and save them in a mapping to access them later.
- color_mapping = {}
+ @staticmethod
+ def hex_to_rtf_color(hex_color):
+ if hex_color[0] == "#":
+ hex_color = hex_color[1:]
+
+ return '\\red%d\\green%d\\blue%d;' % (
+ int(hex_color[0:2], 16),
+ int(hex_color[2:4], 16),
+ int(hex_color[4:6], 16)
+ )
+
+ def _split_tokens_on_newlines(self, tokensource):
+ """
+ Split tokens containing newline characters into multiple token
+ each representing a line of the input file. Needed for numbering
+ lines of e.g. multiline comments.
+ """
+ for ttype, value in tokensource:
+ if value == '\n':
+ yield (ttype, value)
+ elif "\n" in value:
+ lines = value.split("\n")
+ for line in lines[:-1]:
+ yield (ttype, line+"\n")
+ if lines[-1]:
+ yield (ttype, lines[-1])
+ else:
+ yield (ttype, value)
+
+ def _create_color_mapping(self):
+ """
+ Create a mapping of style hex colors to index/offset in
+ the RTF color table.
+ """
+ color_mapping = OrderedDict()
offset = 1
+
+ if self.linenos:
+ color_mapping[self.lineno_color] = offset
+ offset += 1
+
+ if self.hl_lines:
+ color_mapping[self.hl_color] = offset
+ offset += 1
+
for _, style in self.style:
for color in style['color'], style['bgcolor'], style['border']:
if color and color not in color_mapping:
color_mapping[color] = offset
- outfile.write('\\red%d\\green%d\\blue%d;' % (
- int(color[0:2], 16),
- int(color[2:4], 16),
- int(color[4:6], 16)
- ))
offset += 1
- outfile.write('}\\f0 ')
+
+ return color_mapping
+
+ @property
+ def _lineno_template(self):
+ if self.lineno_fontsize != self.fontsize:
+ return '{\\fs%s \\cf%s %%s%s}' \
+ % (self.lineno_fontsize,
+ self.color_mapping[self.lineno_color],
+ " " * self.lineno_padding)
+
+ return '{\\cf%s %%s%s}' \
+ % (self.color_mapping[self.lineno_color],
+ " " * self.lineno_padding)
+
+ @property
+ def _hl_open_str(self):
+ return r'{\highlight%s ' % self.color_mapping[self.hl_color]
+
+ @property
+ def _rtf_header(self):
+ lines = []
+ # rtf 1.8 header
+ lines.append('{\\rtf1\\ansi\\uc0\\deff0'
+ '{\\fonttbl{\\f0\\fmodern\\fprq1\\fcharset0%s;}}'
+ % (self.fontface and ' '
+ + self._escape(self.fontface) or ''))
+
+ # color table
+ lines.append('{\\colortbl;')
+ for color, _ in self.color_mapping.items():
+ lines.append(self.hex_to_rtf_color(color))
+ lines.append('}')
+
+ # font and fontsize
+ lines.append('\\f0')
if self.fontsize:
- outfile.write('\\fs%d' % self.fontsize)
+ lines.append('\\fs%d' % self.fontsize)
+
+ # ensure Libre Office Writer imports and renders consecutive
+ # space characters the same width, needed for line numbering.
+ # https://bugs.documentfoundation.org/show_bug.cgi?id=144050
+ lines.append('\\dntblnsbdb')
+
+ return lines
+
+ def format_unencoded(self, tokensource, outfile):
+ for line in self._rtf_header:
+ outfile.write(line + "\n")
+
+ tokensource = self._split_tokens_on_newlines(tokensource)
+
+ # first pass of tokens to count lines, needed for line numbering
+ if self.linenos:
+ line_count = 0
+ tokens = [] # for copying the token source generator
+ for ttype, value in tokensource:
+ tokens.append((ttype, value))
+ if value.endswith("\n"):
+ line_count += 1
+
+ # width of line number strings (for padding with spaces)
+ linenos_width = len(str(line_count+self.linenostart-1))
+
+ tokensource = tokens
# highlight stream
+ lineno = 1
+ start_new_line = True
for ttype, value in tokensource:
+ if start_new_line and lineno in self.hl_lines:
+ outfile.write(self._hl_open_str)
+
+ if start_new_line and self.linenos:
+ if (lineno-self.linenostart+1)%self.linenostep == 0:
+ current_lineno = lineno + self.linenostart - 1
+ lineno_str = str(current_lineno).rjust(linenos_width)
+ else:
+ lineno_str = "".rjust(linenos_width)
+ outfile.write(self._lineno_template % lineno_str)
+
while not self.style.styles_token(ttype) and ttype.parent:
ttype = ttype.parent
style = self.style.style_for_token(ttype)
buf = []
if style['bgcolor']:
- buf.append('\\cb%d' % color_mapping[style['bgcolor']])
+ buf.append('\\cb%d' % self.color_mapping[style['bgcolor']])
if style['color']:
- buf.append('\\cf%d' % color_mapping[style['color']])
+ buf.append('\\cf%d' % self.color_mapping[style['color']])
if style['bold']:
buf.append('\\b')
if style['italic']:
@@ -135,12 +328,24 @@ def format_unencoded(self, tokensource, outfile):
buf.append('\\ul')
if style['border']:
buf.append('\\chbrdr\\chcfpat%d' %
- color_mapping[style['border']])
+ self.color_mapping[style['border']])
start = ''.join(buf)
if start:
outfile.write('{%s ' % start)
outfile.write(self._escape_text(value))
if start:
outfile.write('}')
+ start_new_line = False
+
+ # complete line of input
+ if value.endswith("\n"):
+ # close line highlighting
+ if lineno in self.hl_lines:
+ outfile.write('}')
+ # newline in RTF file after closing }
+ outfile.write("\n")
+
+ start_new_line = True
+ lineno += 1
- outfile.write('}')
+ outfile.write('}\n')
| diff --git a/tests/test_rtf_formatter.py b/tests/test_rtf_formatter.py
index a21939f043..6379e37d16 100644
--- a/tests/test_rtf_formatter.py
+++ b/tests/test_rtf_formatter.py
@@ -7,12 +7,17 @@
"""
from io import StringIO
+import itertools
+import re
+import pytest
from pygments.formatters import RtfFormatter
+from pygments.lexers import CppLexer, PythonLexer
from pygments.lexers.special import TextLexer
+from pygments.style import _ansimap, Style
+from pygments.token import Name, String, Token
-
-foot = (r'\par' '\n' r'}')
+foot = r'\par' '\n' r'}' + '\n'
def _escape(string):
@@ -26,9 +31,9 @@ def _build_message(*args, **kwargs):
result = _escape(kwargs.get('result', ''))
if string is None:
- string = ("The expected output of '{t}'\n"
- "\t\tShould be '{expected}'\n"
- "\t\tActually outputs '{result}'\n"
+ string = ("The expected output of '{t}'\n\n"
+ "\t\tShould be '{expected}'\n\n"
+ "\t\tActually outputs '{result}'\n\n"
"\t(WARNING: Partial Output of Result!)")
end = -len(_escape(foot))
@@ -39,9 +44,11 @@ def _build_message(*args, **kwargs):
expected = expected)
-def format_rtf(t):
- tokensource = list(TextLexer().get_tokens(t))
- fmt = RtfFormatter()
+def format_rtf(t, options=None, lexer=TextLexer):
+ if options is None:
+ options = {}
+ tokensource = lexer().get_tokens(t)
+ fmt = RtfFormatter(**options)
buf = StringIO()
fmt.format(tokensource, buf)
result = buf.getvalue()
@@ -49,6 +56,17 @@ def format_rtf(t):
return result
+def extract_color_table(rtf):
+ r"""
+ Return af list of \redR\greenG\blueB; color definitions
+ extracted from the input (the color table).
+ """
+ return re.findall((r"\\red[0-9]{1,3}"
+ r"\\green[0-9]{1,3}"
+ r"\\blue[0-9]{1,3};"),
+ rtf)
+
+
def test_rtf_header():
t = ''
result = format_rtf(t)
@@ -72,7 +90,7 @@ def test_rtf_footer():
def test_ascii_characters():
t = 'a b c d ~'
result = format_rtf(t)
- expected = (r'a b c d ~')
+ expected = r'a b c d ~'
msg = _build_message(t=t, result=result, expected=expected)
assert result.endswith(expected+foot), msg
@@ -101,3 +119,434 @@ def test_double_characters():
r'{\u8597}{\u65038} {\u55422}{\u56859}')
msg = _build_message(t=t, result=result, expected=expected)
assert result.endswith(expected+foot), msg
+
+
+def test_linenos_all_defaults():
+ t = 'line1\nline2\n'
+ options = {}
+ result = format_rtf(t, options)
+ expected = (r'line1\par' + '\n'
+ r'line2\par' + '\n'
+ r'}' + '\n')
+ msg = _build_message(t=t, result=result, expected=expected)
+ assert result.endswith(expected), msg
+
+
+def test_linenos_text():
+ t = 'line1\nline2\n'
+ options = dict(linenos=True, lineno_padding=2)
+ result = format_rtf(t, options)
+ expected = (r'{\cf1 1 }line1\par' + '\n'
+ r'{\cf1 2 }line2\par' + '\n'
+ r'}' + '\n')
+ msg = _build_message(t=t, result=result, expected=expected)
+ assert result.endswith(expected), msg
+
+
+def test_linenos_newline_characters():
+ t = r'line1\nline2' + '\n'
+ options = dict(linenos=True, lineno_padding=2)
+ result = format_rtf(t, options)
+ expected = (r'{\cf1 1 }line1\\nline2\par' + '\n'
+ r'}' + '\n')
+ msg = _build_message(t=t, result=result, expected=expected)
+ assert result.endswith(expected), msg
+
+
+def test_linenos_python():
+ class TestStyle(Style):
+ name = 'rtf_formatter_test'
+ line_number_color = "#ff0000"
+ styles = {Token: '', String: '#00ff00', Name: '#0000ff'}
+
+ t = r's = "line1\nline2"' + '\n'
+ options = dict(linenos=True, lineno_padding=2, style=TestStyle)
+ result = format_rtf(t, options, PythonLexer)
+ expected = (r'{\rtf1\ansi\uc0\deff0{\fonttbl{\f0\fmodern\fprq1\fcharset0;}}' + '\n'
+ r'{\colortbl;' + '\n'
+ r'\red255\green0\blue0;' + '\n'
+ r'\red0\green255\blue0;' + '\n'
+ r'\red0\green0\blue255;' + '\n'
+ r'}' + '\n'
+ r'\f0' + '\n'
+ r'\dntblnsbdb' + '\n'
+ r'{\cf1 1 }{\cf3 s} = {\cf2 "}{\cf2 line1}{\cf2 \\n}{\cf2 line2}{\cf2 "}\par' + '\n'
+ r'}' + '\n')
+ msg = _build_message(t=t, result=result, expected=expected)
+ assert result.endswith(expected), msg
+
+
+def test_linenos_left_padding():
+ t = '0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n'
+ options = dict(linenos=True, lineno_padding=2)
+ result = format_rtf(t, options)
+ expected = (r'{\cf1 9 }8\par' + '\n'
+ r'{\cf1 10 }9\par' + '\n'
+ r'}' + '\n')
+ msg = _build_message(t=t, result=result, expected=expected)
+ assert result.endswith(expected), msg
+
+
+def test_lineno_padding():
+ t = 'line1\nline2\n'
+ options = dict(linenos=True, lineno_padding=3)
+ result = format_rtf(t, options)
+ expected = (r'{\cf1 1 }line1\par' + '\n'
+ r'{\cf1 2 }line2\par' + '\n'
+ r'}' + '\n')
+ msg = _build_message(t=t, result=result, expected=expected)
+ assert result.endswith(expected), msg
+
+
+def test_linenostep():
+ t = 'line1\nline2\nline3\nline4\n'
+ options = dict(linenos=True,
+ linenostep=2,
+ lineno_padding=2)
+ result = format_rtf(t, options)
+ expected = (r'{\cf1 }line1\par' + '\n'
+ r'{\cf1 2 }line2\par' + '\n'
+ r'{\cf1 }line3\par' + '\n'
+ r'{\cf1 4 }line4\par' + '\n'
+ r'}' + '\n')
+ msg = _build_message(t=t, result=result, expected=expected)
+ assert result.endswith(expected), msg
+
+
+def test_linenostart():
+ t = 'line1\nline2\nline3\nline4\n'
+ options = dict(linenos=True,
+ linenostart=3,
+ lineno_padding=2)
+ result = format_rtf(t, options)
+ expected = (r'{\cf1 3 }line1\par' + '\n'
+ r'{\cf1 4 }line2\par' + '\n'
+ r'{\cf1 5 }line3\par' + '\n'
+ r'{\cf1 6 }line4\par' + '\n'
+ r'}' + '\n')
+ msg = _build_message(t=t, result=result, expected=expected)
+ assert result.endswith(expected), msg
+
+
+def test_linenostart_left_padding():
+ t = 'line1\nline2\nline3\n'
+ options = dict(linenos=True,
+ linenostart=98,
+ lineno_padding=2)
+ result = format_rtf(t, options)
+ expected = (r'{\cf1 98 }line1\par' + '\n'
+ r'{\cf1 99 }line2\par' + '\n'
+ r'{\cf1 100 }line3\par' + '\n'
+ r'}' + '\n')
+ msg = _build_message(t=t, result=result, expected=expected)
+ assert result.endswith(expected), msg
+
+
+def test_linenos_hl_lines():
+ t = 'line1\nline2\nline3\n'
+ options = dict(linenos=True,
+ hl_lines="2 3",
+ lineno_padding=2)
+ result = format_rtf(t, options)
+ expected = (r'{\cf1 1 }line1\par' + '\n'
+ r'{\highlight2 {\cf1 2 }line2\par}' + '\n'
+ r'{\highlight2 {\cf1 3 }line3\par}' + '\n'
+ r'}' + '\n')
+ msg = _build_message(t=t, result=result, expected=expected)
+ assert result.endswith(expected), msg
+
+
+def test_linenos_off_hl_lines():
+ t = 'line1\nline2\nline3\n'
+ options = dict(linenos=False,
+ hl_lines="2 3")
+ result = format_rtf(t, options)
+ expected = (r'line1\par' + '\n'
+ r'{\highlight1 line2\par}' + '\n'
+ r'{\highlight1 line3\par}' + '\n'
+ r'}' + '\n')
+ msg = _build_message(t=t, result=result, expected=expected)
+ assert result.endswith(expected), msg
+
+
+def test_hl_linenostart_no_lines_highlighted():
+ t = 'line11\nline12\nline13\n'
+ options = dict(linenos=False,
+ hl_lines="2 3",
+ hl_linenostart=True,
+ linenostart=11)
+ result = format_rtf(t, options)
+ expected = (r'line11\par' + '\n'
+ r'line12\par' + '\n'
+ r'line13\par' + '\n'
+ r'}' + '\n')
+ msg = _build_message(t=t, result=result, expected=expected)
+ assert result.endswith(expected), msg
+
+
+def test_hl_linenostart_lines_highlighted():
+ t = 'line11\nline12\nline13\n'
+ options = dict(linenos=False,
+ hl_lines="12 13",
+ hl_linenostart=True,
+ linenostart=11)
+ result = format_rtf(t, options)
+ expected = (r'line11\par' + '\n'
+ r'{\highlight1 line12\par}' + '\n'
+ r'{\highlight1 line13\par}' + '\n'
+ r'}' + '\n')
+ msg = _build_message(t=t, result=result, expected=expected)
+ assert result.endswith(expected), msg
+
+
+def test_lineno_color_style_specify_hex():
+ class TestStyle(Style):
+ name = 'rtf_formatter_test'
+ line_number_color = "#123456"
+
+ t = 'line1\nline2\n'
+ options = dict(linenos=True,
+ style=TestStyle)
+ result = format_rtf(t, options)
+ rtf_color_str = RtfFormatter.hex_to_rtf_color(TestStyle.line_number_color)
+ color_tbl = extract_color_table(result)
+ msg = (f"Color table {color_tbl} "
+ f"should have '{rtf_color_str}' "
+ "as first entry")
+
+ # With linenos=True the color table should contain:
+ # 1st entry: line number color (hence \cf1)
+ assert color_tbl[0] == rtf_color_str, msg
+
+
+def test_lineno_color_style_specify_inherit():
+ class TestStyle(Style):
+ name = 'rtf_formatter_test'
+ line_number_color = "inherit" # Default from pygments/style.py
+
+ t = 'line1\nline2\n'
+ options = dict(linenos=True,
+ style=TestStyle)
+ result = format_rtf(t, options)
+ rtf_color_str = RtfFormatter.hex_to_rtf_color(_ansimap['ansibrightblack'])
+ color_tbl = extract_color_table(result)
+ msg = (f"Color table {color_tbl} "
+ f"should have '{rtf_color_str}' "
+ "as first entry")
+
+ # With linenos=True the color table should contain:
+ # 1st entry: line number color (hence \cf1)
+ assert color_tbl[0] == rtf_color_str, msg
+
+
+def test_lineno_color_from_cli_option():
+ class TestStyle(Style):
+ name = 'rtf_formatter_test'
+ line_number_color = "#123456" # Default from pygments/style.py
+
+ option_color = "112233"
+ t = 'line1\nline2\n'
+ options = dict(linenos=True,
+ style=TestStyle,
+ lineno_color=option_color)
+ result = format_rtf(t, options)
+ rtf_color_str = RtfFormatter.hex_to_rtf_color(option_color)
+ color_tbl = extract_color_table(result)
+ msg = (f"Color table {color_tbl} "
+ f"should have '{rtf_color_str}' "
+ "as first entry")
+
+ # With linenos=True the color table should contain:
+ # 1st entry: line number color (hence \cf1)
+ assert color_tbl[0] == rtf_color_str, msg
+
+
+def test_hl_color_style():
+ class TestStyle(Style):
+ name = 'rtf_formatter_test'
+ line_number_color = "#123456"
+ highlight_color = "#abcdef"
+
+ t = 'line1\nline2\n'
+ options = dict(linenos=True,
+ lineno_padding=2,
+ style=TestStyle,
+ hl_lines="1 2")
+ result = format_rtf(t, options)
+
+ rtf_color = RtfFormatter.hex_to_rtf_color(TestStyle.highlight_color)
+
+ color_tbl = extract_color_table(result)
+ msg = (f"Color table {color_tbl} "
+ f"should have '{rtf_color}' "
+ "as second entry")
+
+ # With linenos=True and hl_lines="1 2" the color table should contain:
+ # 1st entry: line number color (hence \cf1)
+ # 2nd entry: highlight color (hence \highlight2)
+ assert color_tbl[1] == rtf_color, msg
+
+ expected = (r'{\highlight2 {\cf1 1 }line1\par}' + '\n'
+ r'{\highlight2 {\cf1 2 }line2\par}' + '\n'
+ r'}' + '\n')
+ msg = _build_message(t=t, result=result, expected=expected)
+
+ assert result.endswith(expected), msg
+
+
+def test_hl_color_style_no_linenos():
+ class TestStyle(Style):
+ name = 'rtf_formatter_test'
+ line_number_color = "#123456"
+ highlight_color = "#abcdef"
+
+ t = 'line1\nline2\n'
+ options = dict(linenos=False,
+ style=TestStyle,
+ hl_lines="1 2")
+ result = format_rtf(t, options)
+
+ rtf_color = RtfFormatter.hex_to_rtf_color(TestStyle.highlight_color)
+
+ color_tbl = extract_color_table(result)
+ msg = (f"Color table {color_tbl} "
+ f"should have '{rtf_color}' "
+ "as second entry")
+
+ # With linenos=False and hl_lines="1 2" the color table should contain:
+ # 1st entry: highlight color (hence \highlight1)
+ assert rtf_color in color_tbl and color_tbl[0] == rtf_color, msg
+
+ expected = (r'{\highlight1 line1\par}' + '\n'
+ r'{\highlight1 line2\par}' + '\n'
+ r'}' + '\n')
+ msg = _build_message(t=t, result=result, expected=expected)
+
+ assert result.endswith(expected), msg
+
+
+def test_hl_color_option():
+ class TestStyle(Style):
+ name = 'rtf_formatter_test'
+ line_number_color = "#123456"
+ highlight_color = "#abcdef"
+
+ t = 'line1\nline2\n'
+ hl_color = "aabbcc"
+ options = dict(linenos=False,
+ style=TestStyle,
+ hl_lines="1 2",
+ hl_color=hl_color)
+ result = format_rtf(t, options)
+
+ rtf_color = RtfFormatter.hex_to_rtf_color(hl_color)
+
+ color_tbl = extract_color_table(result)
+ msg = (f"Color table {color_tbl} "
+ f"should have '{rtf_color}' "
+ "as second entry")
+
+ # With linenos=False and hl_lines="1 2" the color table should contain:
+ # 1st entry: highlight color (hence \highlight1)
+ assert rtf_color in color_tbl and color_tbl[0] == rtf_color, msg
+
+ expected = (r'{\highlight1 line1\par}' + '\n'
+ r'{\highlight1 line2\par}' + '\n'
+ r'}' + '\n')
+ msg = _build_message(t=t, result=result, expected=expected)
+
+ assert result.endswith(expected), msg
+
+
+def test_all_options():
+ # Test if all combinations of options (given values and defaults)
+ # produce output:
+ #
+ # - No uncaught exceptions
+ # - Output contains one \par control word per input line
+
+ def get_option_combinations(options):
+ for _, values in options.items():
+ values.append('default')
+ # https://stackoverflow.com/a/40623158
+ combinations = (dict(zip(options.keys(), x))
+ for x in itertools.product(*options.values()))
+ for c in combinations:
+ yield {opt:val for opt,val in c.items() if val!='default'}
+
+ options = {'linenos': [True],
+ 'lineno_fontsize': [36],
+ 'fontsize': [36],
+ 'lineno_padding': [4],
+ 'linenostart': [10],
+ 'linenostep': [3],
+ 'lineno_color': ['ff0000'],
+ 'hl_lines': ['2'],
+ 'hl_linenostart': [True],
+ 'hl_color': ['00ff00']
+ }
+
+ t_cpp = [r'#include <iostream>',
+ r'int main(int argc, char** argv) {',
+ r' /* Multi-line comment',
+ r' with \n escape sequence */'
+ r' for (int i = 0; i < argc; i++){',
+ r' std::cout << i << ": " << argv[i] << "\n";',
+ r' }',
+ r' return 0;',
+ r'}'
+ ]
+
+ t_python = [r'# Description of program',
+ r'def add(a, b):',
+ r' """ Add numbers a and b.',
+ r' Newline \n in docstring."""',
+ r' return a+b',
+ r'if __name__ == "__main__":',
+ r'result = add(2,2)',
+ r'print(f"Result:\n{result}")'
+ ]
+
+ t_text = [r'Header1;"Long',
+ r'Header2";Header3',
+ r'1,2;Single Line;20/02/2024',
+ r'1,3;"Multiple',
+ r'Lines";21/02/2024']
+
+
+ for opts in get_option_combinations(options):
+
+ opt_strs = '\n'.join([f"{k}: {v}" for k,v in opts.items()])
+ opt_str_for_copying = "-O " + ",".join([f"{k}={v}" for k,v in opts.items()])
+
+ for t, lexer in [(t_cpp, CppLexer),
+ (t_python, PythonLexer),
+ (t_text, TextLexer)]:
+
+ input_text = '\n'.join(t) + '\n' # Last line should end in \n
+
+ try:
+ result = format_rtf(input_text, opts,lexer)
+ except Exception as e:
+ msg = (f"RTF-formatting caused an exception with options\n\n"
+ f"{opt_strs}\n\n"
+ f"{opt_str_for_copying}\n\n"
+ f"Lexer: {lexer.__name__}\n\n"
+ f"Input:\n"
+ f"{input_text}\n"
+ f"{type(e)}: {e}\n")
+
+ pytest.fail(msg)
+
+ num_input_lines = len(t)
+ num_of_pars = result.count(r'\par')
+
+ msg = (f"Different of number of input lines and formatted lines:\n"
+ f"{opt_strs}\n\n"
+ f"{opt_str_for_copying}\n\n"
+ f"\\par control words: {num_of_pars}\n"
+ f"Input lines: {num_input_lines}\n\n"
+ f"Input:\n"
+ f"{input_text}\n")
+
+ assert num_of_pars == num_input_lines, msg
| diff --git a/AUTHORS b/AUTHORS
index a7928ea88b..4ec64ba1ef 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -116,6 +116,8 @@ Other contributors, listed alphabetically, are:
MSDOS session, BC, WDiff
* Brian R. Jackson -- Tea lexer
* Christian Jann -- ShellSession lexer
+* Jonas Camillus Jeppesen -- Line numbers and line highlighting for
+ RTF-formatter
* Dennis Kaarsemaker -- sources.list lexer
* Dmitri Kabak -- Inferno Limbo lexer
* Igor Kalnitsky -- vhdl lexer
| [
{
"components": [
{
"doc": "",
"lines": [
185,
192
],
"name": "RtfFormatter.hex_to_rtf_color",
"signature": "def hex_to_rtf_color(hex_color):",
"type": "function"
},
{
"doc": "Split tokens containing newline characters... | [
"tests/test_rtf_formatter.py::test_rtf_footer",
"tests/test_rtf_formatter.py::test_ascii_characters",
"tests/test_rtf_formatter.py::test_escape_characters",
"tests/test_rtf_formatter.py::test_single_characters",
"tests/test_rtf_formatter.py::test_double_characters",
"tests/test_rtf_formatter.py::test_line... | [
"tests/test_rtf_formatter.py::test_rtf_header",
"tests/test_rtf_formatter.py::test_all_options"
] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add line numbers and line highlighting to the RTF-formatter
Finally implemented line numbers and line highlighting as discussed in issue #1217.

If desirable, I could use f-strings instead of `%`-based string formatting while we're at it (maybe capitalize comments). I chose not to because it is not directly related to the issue.
Merging this would cause https://github.com/pygments/pygments/pull/2607 to need a minor rewrite. `\\sa0` would need to be added to [rtf.py#L269](https://github.com/jonascj/pygments/blob/83019842402a743b3b6828b73b61840c25bf0ae3/pygments/formatters/rtf.py#L269)
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in pygments/formatters/rtf.py]
(definition of RtfFormatter.hex_to_rtf_color:)
def hex_to_rtf_color(hex_color):
(definition of RtfFormatter._split_tokens_on_newlines:)
def _split_tokens_on_newlines(self, tokensource):
"""Split tokens containing newline characters into multiple token
each representing a line of the input file. Needed for numbering
lines of e.g. multiline comments."""
(definition of RtfFormatter._create_color_mapping:)
def _create_color_mapping(self):
"""Create a mapping of style hex colors to index/offset in
the RTF color table."""
(definition of RtfFormatter._lineno_template:)
def _lineno_template(self):
(definition of RtfFormatter._hl_open_str:)
def _hl_open_str(self):
(definition of RtfFormatter._rtf_header:)
def _rtf_header(self):
[end of new definitions in pygments/formatters/rtf.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | e08bdbba2fa78270dba5ca700d053569f85d0351 | |
tobymao__sqlglot-3010 | 3,010 | tobymao/sqlglot | null | 9079ead97701b32bde0b2d704bbf8f9b67f5a740 | 2024-02-22T14:27:04Z | diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py
index 570b92782e..849239f6b7 100644
--- a/sqlglot/dialects/duckdb.py
+++ b/sqlglot/dialects/duckdb.py
@@ -79,6 +79,21 @@ def _build_date_diff(args: t.List) -> exp.Expression:
return exp.DateDiff(this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0))
+def _build_generate_series(end_exclusive: bool = False) -> t.Callable[[t.List], exp.GenerateSeries]:
+ def _builder(args: t.List) -> exp.GenerateSeries:
+ # Check https://duckdb.org/docs/sql/functions/nested.html#range-functions
+ if len(args) == 1:
+ # DuckDB uses 0 as a default for the series' start when it's omitted
+ args.insert(0, exp.Literal.number("0"))
+
+ gen_series = exp.GenerateSeries.from_arg_list(args)
+ gen_series.set("is_end_exclusive", end_exclusive)
+
+ return gen_series
+
+ return _builder
+
+
def _build_make_timestamp(args: t.List) -> exp.Expression:
if len(args) == 1:
return exp.UnixToTime(this=seq_get(args, 0), scale=exp.UnixToTime.MICROS)
@@ -267,6 +282,8 @@ class Parser(parser.Parser):
"TO_TIMESTAMP": exp.UnixToTime.from_arg_list,
"UNNEST": exp.Explode.from_arg_list,
"XOR": binary_from_function(exp.BitwiseXor),
+ "GENERATE_SERIES": _build_generate_series(),
+ "RANGE": _build_generate_series(end_exclusive=True),
}
FUNCTION_PARSERS = parser.Parser.FUNCTION_PARSERS.copy()
@@ -548,3 +565,11 @@ def join_sql(self, expression: exp.Join) -> str:
return super().join_sql(expression.on(exp.true()))
return super().join_sql(expression)
+
+ def generateseries_sql(self, expression: exp.GenerateSeries) -> str:
+ # GENERATE_SERIES(a, b) -> [a, b], RANGE(a, b) -> [a, b)
+ if expression.args.get("is_end_exclusive"):
+ expression.set("is_end_exclusive", None)
+ return rename_func("RANGE")(self, expression)
+
+ return super().generateseries_sql(expression)
diff --git a/sqlglot/dialects/sqlite.py b/sqlglot/dialects/sqlite.py
index 6596c5bb88..2b17ff9d9c 100644
--- a/sqlglot/dialects/sqlite.py
+++ b/sqlglot/dialects/sqlite.py
@@ -92,6 +92,7 @@ class Generator(generator.Generator):
NVL2_SUPPORTED = False
JSON_PATH_BRACKETED_KEY_SUPPORTED = False
SUPPORTS_CREATE_TABLE_LIKE = False
+ SUPPORTS_TABLE_ALIAS_COLUMNS = False
SUPPORTED_JSON_PATH_PARTS = {
exp.JSONPathKey,
@@ -173,6 +174,21 @@ def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) ->
return super().cast_sql(expression)
+ def generateseries_sql(self, expression: exp.GenerateSeries) -> str:
+ parent = expression.parent
+ alias = parent and parent.args.get("alias")
+
+ if isinstance(alias, exp.TableAlias) and alias.columns:
+ column_alias = alias.columns[0]
+ alias.set("columns", None)
+ sql = self.sql(
+ exp.select(exp.alias_("value", column_alias)).from_(expression).subquery()
+ )
+ else:
+ sql = super().generateseries_sql(expression)
+
+ return sql
+
def datediff_sql(self, expression: exp.DateDiff) -> str:
unit = expression.args.get("unit")
unit = unit.name.upper() if unit else "DAY"
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 8cc2a02506..553c91d0ff 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -4434,7 +4434,7 @@ class ToChar(Func):
class GenerateSeries(Func):
- arg_types = {"start": True, "end": True, "step": False}
+ arg_types = {"start": True, "end": True, "step": False, "is_end_exclusive": False}
class ArrayAgg(AggFunc):
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index ae974fc241..b4fdbcadf2 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -3454,3 +3454,7 @@ def _ensure_string_if_null(self, values: t.List[exp.Expression]) -> t.List[exp.E
for value in values
if value
]
+
+ def generateseries_sql(self, expression: exp.GenerateSeries) -> str:
+ expression.set("is_end_exclusive", None)
+ return self.function_fallback_sql(expression)
| diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py
index 5a81320e9b..fe7cc3d53e 100644
--- a/tests/dialects/test_duckdb.py
+++ b/tests/dialects/test_duckdb.py
@@ -174,7 +174,6 @@ def test_duckdb(self):
},
)
- self.validate_identity("SELECT i FROM RANGE(5) AS _(i) ORDER BY i ASC")
self.validate_identity("INSERT INTO x BY NAME SELECT 1 AS y")
self.validate_identity("SELECT 1 AS x UNION ALL BY NAME SELECT 2 AS x")
self.validate_identity("SELECT SUM(x) FILTER (x = 1)", "SELECT SUM(x) FILTER(WHERE x = 1)")
@@ -626,6 +625,27 @@ def test_duckdb(self):
},
)
+ self.validate_identity("SELECT * FROM RANGE(1, 5, 10)")
+ self.validate_identity("SELECT * FROM GENERATE_SERIES(2, 13, 4)")
+
+ self.validate_all(
+ "WITH t AS (SELECT i, i * i * i * i * i AS i5 FROM RANGE(1, 5) t(i)) SELECT * FROM t",
+ write={
+ "duckdb": "WITH t AS (SELECT i, i * i * i * i * i AS i5 FROM RANGE(1, 5) AS t(i)) SELECT * FROM t",
+ "sqlite": "WITH t AS (SELECT i, i * i * i * i * i AS i5 FROM (SELECT value AS i FROM GENERATE_SERIES(1, 5)) AS t) SELECT * FROM t",
+ },
+ )
+
+ self.validate_identity(
+ """SELECT i FROM RANGE(5) AS _(i) ORDER BY i ASC""",
+ """SELECT i FROM RANGE(0, 5) AS _(i) ORDER BY i ASC""",
+ )
+
+ self.validate_identity(
+ """SELECT i FROM GENERATE_SERIES(12) AS _(i) ORDER BY i ASC""",
+ """SELECT i FROM GENERATE_SERIES(0, 12) AS _(i) ORDER BY i ASC""",
+ )
+
def test_array_index(self):
with self.assertLogs(helper_logger) as cm:
self.validate_all(
diff --git a/tests/dialects/test_sqlite.py b/tests/dialects/test_sqlite.py
index f7a3dd7802..2421987bf6 100644
--- a/tests/dialects/test_sqlite.py
+++ b/tests/dialects/test_sqlite.py
@@ -1,5 +1,7 @@
from tests.dialects.test_dialect import Validator
+from sqlglot.helper import logger as helper_logger
+
class TestSQLite(Validator):
dialect = "sqlite"
@@ -76,6 +78,7 @@ def test_sqlite(self):
self.validate_identity(
"""SELECT item AS "item", some AS "some" FROM data WHERE (item = 'value_1' COLLATE NOCASE) AND (some = 't' COLLATE NOCASE) ORDER BY item ASC LIMIT 1 OFFSET 0"""
)
+ self.validate_identity("SELECT * FROM GENERATE_SERIES(1, 5)")
self.validate_all("SELECT LIKE(y, x)", write={"sqlite": "SELECT x LIKE y"})
self.validate_all("SELECT GLOB('*y*', 'xyz')", write={"sqlite": "SELECT 'xyz' GLOB '*y*'"})
@@ -178,3 +181,12 @@ def test_longvarchar_dtype(self):
"CREATE TABLE foo (bar LONGVARCHAR)",
write={"sqlite": "CREATE TABLE foo (bar TEXT)"},
)
+
+ def test_warnings(self):
+ with self.assertLogs(helper_logger) as cm:
+ self.validate_identity(
+ "SELECT * FROM t AS t(c1, c2)",
+ "SELECT * FROM t AS t",
+ )
+
+ self.assertIn("Named columns are not supported in table alias.", cm.output[0])
| [] | [
"tests/dialects/test_duckdb.py::TestDuckDB::test_duckdb",
"tests/dialects/test_sqlite.py::TestSQLite::test_warnings"
] | [
"tests/dialects/test_duckdb.py::TestDuckDB::test_array",
"tests/dialects/test_duckdb.py::TestDuckDB::test_array_index",
"tests/dialects/test_duckdb.py::TestDuckDB::test_bool_or",
"tests/dialects/test_duckdb.py::TestDuckDB::test_cast",
"tests/dialects/test_duckdb.py::TestDuckDB::test_encode_decode",
"tests... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Feat: Supporting RANGE <-> GENERATE_SERIES between DuckDB & SQLite
Hi,
The following PR aims to address [discussion#2995 ](https://github.com/tobymao/sqlglot/discussions/2995)
Key changes:
- SQLite now explicitly rejects table alias columns. In order to implement the sample query from the discussion which uses this DuckDB feature, SQLite now wraps the output of `exp.GenerateSeries` in a subquery with the proper (i.e first) column reference if a TableAlias is binding to the function call.
- The AST node `exp.GenerateSeries` now carries one more argument (state) to mark whether the end is inclusive/exclusive, as the same function appears on other dialects and/or in other flavors e.g. DuckDB supports `RANGE` (exclusive end) _and_ `GENERATE_SERIES` (inclusive end).
Future PRs should hopefully:
- Aim to support the flavors of `GENERATE_SERIES / RANGE` across all dialects through the `exp.GenerateSeries`
- Transform the range appropriately (wherever possible) e.g in DuckDB the proper transpilation should be `RANGE(1, 5) -> GENERATE_SERIES(1, 4)` to match the data output
Docs
---------
- [DuckDB Range Functions](https://duckdb.org/docs/sql/functions/nested.html#range-functions)
- [SQLite GENERATE_SERIES Function](https://www.sqlite.org/series.html)
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | ceb42fabad60312699e4b15936aeebac00e22e4d | ||
conan-io__conan-15731 | 15,731 | conan-io/conan | null | 1763159dc74a54cb4920a55a7620557687e1dc25 | 2024-02-22T07:51:29Z | diff --git a/conan/tools/build/__init__.py b/conan/tools/build/__init__.py
index 829886a8de3..da2223ff12e 100644
--- a/conan/tools/build/__init__.py
+++ b/conan/tools/build/__init__.py
@@ -3,3 +3,4 @@
from conan.tools.build.cppstd import check_min_cppstd, valid_min_cppstd, default_cppstd, \
supported_cppstd
from conan.tools.build.stdcpp_library import stdcpp_library
+from conan.tools.build.flags import cppstd_flag
diff --git a/conan/tools/build/flags.py b/conan/tools/build/flags.py
new file mode 100644
index 00000000000..9cd9f06385a
--- /dev/null
+++ b/conan/tools/build/flags.py
@@ -0,0 +1,16 @@
+from conan.tools._compilers import cppstd_flag as cppstd_flag_settings
+
+
+def cppstd_flag(conanfile):
+ """
+ Returns flags specific to the C++ standard based on the ``conanfile.settings.compiler``,
+ ``conanfile.settings.compiler.version`` and ``conanfile.settings.compiler.cppstd``.
+ It also considers when using GNU extension in ``settings.compiler.cppstd``, reflecting it in the
+ compiler flag. Currently, it supports GCC, Clang, AppleClang, MSVC, Intel, MCST-LCC.
+ In case there is no ``settings.compiler`` or ``settings.cppstd`` in the profile, the result will
+ be an **empty string**.
+ :param conanfile: The current recipe object. Always use ``self``.
+ :return: ``str`` with the standard C++ flag used by the compiler. e.g. "-std=c++11", "/std:c++latest"
+ """
+ settings = conanfile.settings
+ return cppstd_flag_settings(settings)
| diff --git a/conans/test/unittests/client/build/cpp_std_flags_test.py b/conans/test/unittests/client/build/cpp_std_flags_test.py
index 1badf2c90ab..3c30e83717a 100644
--- a/conans/test/unittests/client/build/cpp_std_flags_test.py
+++ b/conans/test/unittests/client/build/cpp_std_flags_test.py
@@ -1,9 +1,11 @@
import unittest
from conans.client.build.cppstd_flags import cppstd_default
-from conans.test.utils.mocks import MockSettings
+from conans.test.utils.mocks import MockSettings, ConanFileMock
from conans.tools import cppstd_flag
+from conan.tools.build import cppstd_flag as cppstd_flag_conanfile
+
def _make_cppstd_flag(compiler, compiler_version, cppstd=None, compiler_base=None):
settings = MockSettings({"compiler": compiler,
@@ -399,3 +401,12 @@ def test_mcst_lcc_cppstd_flag(self):
self.assertEqual(_make_cppstd_flag("mcst-lcc", "1.25", "14", "gcc"), "-std=c++14")
self.assertEqual(_make_cppstd_flag("mcst-lcc", "1.25", "17", "gcc"), "-std=c++17")
self.assertEqual(_make_cppstd_flag("mcst-lcc", "1.25", "20", "gcc"), "-std=c++2a")
+
+ def test_cppstd_flag_conanfile(self):
+ """The conan.tools.build.cppstd_flag should work when passing a ConanFile instance
+ """
+ conanfile = ConanFileMock()
+ conanfile.settings = MockSettings({"compiler": "gcc",
+ "compiler.version": "9",
+ "compiler.cppstd": "17"})
+ self.assertEqual(cppstd_flag_conanfile(conanfile), "-std=c++17")
| [
{
"components": [
{
"doc": "Returns flags specific to the C++ standard based on the ``conanfile.settings.compiler``,\n``conanfile.settings.compiler.version`` and ``conanfile.settings.compiler.cppstd``.\nIt also considers when using GNU extension in ``settings.compiler.cppstd``, reflecting it in th... | [
"conans/test/unittests/client/build/cpp_std_flags_test.py::CompilerFlagsTest::test_apple_clang_cppstd_defaults",
"conans/test/unittests/client/build/cpp_std_flags_test.py::CompilerFlagsTest::test_apple_clang_cppstd_flags",
"conans/test/unittests/client/build/cpp_std_flags_test.py::CompilerFlagsTest::test_clang_... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Backport cppstd_flag from Conan 2.x
Related to the PR #15710, this PR backports the method available in Conan 2.x
In Conan 1.x we have [cppstd_flag](https://docs.conan.io/1/reference/tools.html#tools-cppstd-flag) available already, but it receives `settings` only is under `conans.tools`.
This change creates a wrapper to make it available under `conan.tools.build` and should pass `conanfile` instead.
Changelog: Feature: Promote cppstd_flag in the new conan.tools.build module.
Docs: https://github.com/conan-io/docs/pull/3602
- [x] Refer to the issue that supports this Pull Request.
- [x] If the issue has missing info, explain the purpose/use case/pain/need that covers this Pull Request.
- [x] I've read the [Contributing guide](https://github.com/conan-io/conan/blob/develop/.github/CONTRIBUTING.md).
- [x] I've followed the PEP8 style guides for Python code.
- [x] I've opened another PR in the Conan docs repo to the ``develop`` branch, documenting this one.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conan/tools/build/flags.py]
(definition of cppstd_flag:)
def cppstd_flag(conanfile):
"""Returns flags specific to the C++ standard based on the ``conanfile.settings.compiler``,
``conanfile.settings.compiler.version`` and ``conanfile.settings.compiler.cppstd``.
It also considers when using GNU extension in ``settings.compiler.cppstd``, reflecting it in the
compiler flag. Currently, it supports GCC, Clang, AppleClang, MSVC, Intel, MCST-LCC.
In case there is no ``settings.compiler`` or ``settings.cppstd`` in the profile, the result will
be an **empty string**.
:param conanfile: The current recipe object. Always use ``self``.
:return: ``str`` with the standard C++ flag used by the compiler. e.g. "-std=c++11", "/std:c++latest""""
[end of new definitions in conan/tools/build/flags.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
deepset-ai__haystack-7050 | 7,050 | deepset-ai/haystack | null | 0507fce2567b0e8e48ed334469bad16260a8709e | 2024-02-21T09:20:17Z | diff --git a/haystack/components/evaluators/__init__.py b/haystack/components/evaluators/__init__.py
new file mode 100644
index 0000000000..9550a5f42d
--- /dev/null
+++ b/haystack/components/evaluators/__init__.py
@@ -0,0 +1,3 @@
+from .answer_exact_match import AnswerExactMatchEvaluator
+
+__all__ = ["AnswerExactMatchEvaluator"]
diff --git a/haystack/components/evaluators/answer_exact_match.py b/haystack/components/evaluators/answer_exact_match.py
new file mode 100644
index 0000000000..eb509e8bed
--- /dev/null
+++ b/haystack/components/evaluators/answer_exact_match.py
@@ -0,0 +1,49 @@
+from typing import Any, Dict, List
+
+from haystack import default_from_dict, default_to_dict
+from haystack.core.component import component
+
+
+@component
+class AnswerExactMatchEvaluator:
+ """
+ Evaluator that checks if the predicted answers matches any of the ground truth answers exactly.
+ The result is a number from 0.0 to 1.0, it represents the proportion of questions where any predicted answer
+ matched one of the ground truth answers.
+ Each question can have multiple ground truth answers and multiple predicted answers.
+ """
+
+ def to_dict(self) -> Dict[str, Any]:
+ return default_to_dict(self)
+
+ @classmethod
+ def from_dict(cls, data: Dict[str, Any]) -> "AnswerExactMatchEvaluator":
+ return default_from_dict(cls, data)
+
+ @component.output_types(result=float)
+ def run(
+ self, questions: List[str], ground_truth_answers: List[List[str]], predicted_answers: List[List[str]]
+ ) -> Dict[str, float]:
+ """
+ Run the AnswerExactMatchEvaluator on the given inputs.
+ All lists must have the same length.
+
+ :param questions: A list of questions.
+ :param ground_truth_answers: A list of expected answers for each question.
+ :param predicted_answers: A list of predicted answers for each question.
+ :returns: A dictionary with the following outputs:
+ * `result` - A number from 0.0 to 1.0 that represents the proportion of questions where any predicted
+ answer matched one of the ground truth answers.
+ """
+ if not len(questions) == len(ground_truth_answers) == len(predicted_answers):
+ raise ValueError("The length of questions, ground_truth_answers, and predicted_answers must be the same.")
+
+ matches = 0
+ for truths, extracted in zip(ground_truth_answers, predicted_answers):
+ if set(truths) & set(extracted):
+ matches += 1
+
+ # The proportion of questions where any predicted answer matched one of the ground truth answers
+ result = matches / len(questions)
+
+ return {"result": result}
diff --git a/releasenotes/notes/exact-match-evaluator-197bb87b65e19d0c.yaml b/releasenotes/notes/exact-match-evaluator-197bb87b65e19d0c.yaml
new file mode 100644
index 0000000000..ad380617d9
--- /dev/null
+++ b/releasenotes/notes/exact-match-evaluator-197bb87b65e19d0c.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add `AnswerExactMatchEvaluator`, a Component that can be used to calculate the Exact Match metric
+ given a list of questions, a list of expected answers for each question and the list of predicted
+ answers for each question.
| diff --git a/test/components/evaluators/test_answer_exact_match.py b/test/components/evaluators/test_answer_exact_match.py
new file mode 100644
index 0000000000..c179c74a25
--- /dev/null
+++ b/test/components/evaluators/test_answer_exact_match.py
@@ -0,0 +1,61 @@
+import pytest
+
+from haystack.components.evaluators import AnswerExactMatchEvaluator
+
+
+def test_run_with_all_matching():
+ evaluator = AnswerExactMatchEvaluator()
+ result = evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_answers=[["Berlin"], ["Paris"]],
+ predicted_answers=[["Berlin"], ["Paris"]],
+ )
+
+ assert result["result"] == 1.0
+
+
+def test_run_with_no_matching():
+ evaluator = AnswerExactMatchEvaluator()
+ result = evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_answers=[["Berlin"], ["Paris"]],
+ predicted_answers=[["Paris"], ["London"]],
+ )
+
+ assert result["result"] == 0.0
+
+
+def test_run_with_partial_matching():
+ evaluator = AnswerExactMatchEvaluator()
+ result = evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_answers=[["Berlin"], ["Paris"]],
+ predicted_answers=[["Berlin"], ["London"]],
+ )
+
+ assert result["result"] == 0.5
+
+
+def test_run_with_different_lengths():
+ evaluator = AnswerExactMatchEvaluator()
+
+ with pytest.raises(ValueError):
+ evaluator.run(
+ questions=["What is the capital of Germany?"],
+ ground_truth_answers=[["Berlin"], ["Paris"]],
+ predicted_answers=[["Berlin"], ["London"]],
+ )
+
+ with pytest.raises(ValueError):
+ evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_answers=[["Berlin"]],
+ predicted_answers=[["Berlin"], ["London"]],
+ )
+
+ with pytest.raises(ValueError):
+ evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_answers=[["Berlin"], ["Paris"]],
+ predicted_answers=[["Berlin"]],
+ )
| diff --git a/releasenotes/notes/exact-match-evaluator-197bb87b65e19d0c.yaml b/releasenotes/notes/exact-match-evaluator-197bb87b65e19d0c.yaml
new file mode 100644
index 0000000000..ad380617d9
--- /dev/null
+++ b/releasenotes/notes/exact-match-evaluator-197bb87b65e19d0c.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add `AnswerExactMatchEvaluator`, a Component that can be used to calculate the Exact Match metric
+ given a list of questions, a list of expected answers for each question and the list of predicted
+ answers for each question.
| [
{
"components": [
{
"doc": "Evaluator that checks if the predicted answers matches any of the ground truth answers exactly.\nThe result is a number from 0.0 to 1.0, it represents the proportion of questions where any predicted answer\nmatched one of the ground truth answers.\nEach question can hav... | [
"test/components/evaluators/test_answer_exact_match.py::test_run_with_all_matching",
"test/components/evaluators/test_answer_exact_match.py::test_run_with_no_matching",
"test/components/evaluators/test_answer_exact_match.py::test_run_with_partial_matching",
"test/components/evaluators/test_answer_exact_match.... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add `AnswerExactMatchEvaluator`
### Related Issues
- fixes #6067
### Proposed Changes:
Add `AnswerExactMatchEvaluator`. This Component calculates the Exact Match metrics given a list of questions, a list of expected answers for each question and the list of predicted answers for each question.
### How did you test it?
I added unit tests.
### Notes for the reviewer
N/A
### Checklist
- I have read the [contributors guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md) and the [code of conduct](https://github.com/deepset-ai/haystack/blob/main/code_of_conduct.txt)
- I have updated the related issue with new insights and changes
- I added unit tests and updated the docstrings
- I've used one of the [conventional commit types](https://www.conventionalcommits.org/en/v1.0.0/) for my PR title: `fix:`, `feat:`, `build:`, `chore:`, `ci:`, `docs:`, `style:`, `refactor:`, `perf:`, `test:`.
- I documented my code
- I ran [pre-commit hooks](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md#installation) and fixed any issue
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/components/evaluators/answer_exact_match.py]
(definition of AnswerExactMatchEvaluator:)
class AnswerExactMatchEvaluator:
"""Evaluator that checks if the predicted answers matches any of the ground truth answers exactly.
The result is a number from 0.0 to 1.0, it represents the proportion of questions where any predicted answer
matched one of the ground truth answers.
Each question can have multiple ground truth answers and multiple predicted answers."""
(definition of AnswerExactMatchEvaluator.to_dict:)
def to_dict(self) -> Dict[str, Any]:
(definition of AnswerExactMatchEvaluator.from_dict:)
def from_dict(cls, data: Dict[str, Any]) -> "AnswerExactMatchEvaluator":
(definition of AnswerExactMatchEvaluator.run:)
def run( self, questions: List[str], ground_truth_answers: List[List[str]], predicted_answers: List[List[str]] ) -> Dict[str, float]:
"""Run the AnswerExactMatchEvaluator on the given inputs.
All lists must have the same length.
:param questions: A list of questions.
:param ground_truth_answers: A list of expected answers for each question.
:param predicted_answers: A list of predicted answers for each question.
:returns: A dictionary with the following outputs:
* `result` - A number from 0.0 to 1.0 that represents the proportion of questions where any predicted
answer matched one of the ground truth answers."""
[end of new definitions in haystack/components/evaluators/answer_exact_match.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Implement function to calculate Exact Match metric
As specified in proposal #5794 we need to implement a function to calculate the Exact Match metric.
Ideally the function should be part of the private interface and called only through the `calculate_metrics` function (see #6063). `_calculate_em()` could be a nice name.
For more detailed information check out the original proposal.
----------
--------------------
</issues> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 |
deepset-ai__haystack-7046 | 7,046 | deepset-ai/haystack | null | 7f4d11c38e9aefdc2152653151bb42f808d87c9b | 2024-02-20T16:07:55Z | diff --git a/haystack/core/pipeline/pipeline.py b/haystack/core/pipeline/pipeline.py
index b4dfebe942..bc33b8a920 100644
--- a/haystack/core/pipeline/pipeline.py
+++ b/haystack/core/pipeline/pipeline.py
@@ -26,6 +26,7 @@
from haystack.marshal import Marshaller, YamlMarshaller
from haystack.telemetry import pipeline_running
from haystack.utils import is_in_jupyter
+from haystack import tracing
from .descriptions import find_pipeline_inputs, find_pipeline_outputs
from .draw import _to_mermaid_image
@@ -744,168 +745,191 @@ def run(self, word: str):
# The waiting_for_input list is used to keep track of components that are waiting for input.
waiting_for_input: List[Tuple[str, Component]] = []
- # This is what we'll return at the end
- final_outputs = {}
- while len(to_run) > 0:
- name, comp = to_run.pop(0)
-
- if any(socket.is_variadic for socket in comp.__haystack_input__._sockets_dict.values()) and not getattr( # type: ignore
- comp, "is_greedy", False
- ):
- there_are_non_variadics = False
- for _, other_comp in to_run:
- if not any(socket.is_variadic for socket in other_comp.__haystack_input__._sockets_dict.values()): # type: ignore
- there_are_non_variadics = True
- break
-
- if there_are_non_variadics:
- if (name, comp) not in waiting_for_input:
- waiting_for_input.append((name, comp))
- continue
-
- if name in last_inputs and len(comp.__haystack_input__._sockets_dict) == len(last_inputs[name]): # type: ignore
- if self.graph.nodes[name]["visits"] > self.max_loops_allowed:
- msg = f"Maximum loops count ({self.max_loops_allowed}) exceeded for component '{name}'"
- raise PipelineMaxLoops(msg)
- # This component has all the inputs it needs to run
- res = comp.run(**last_inputs[name])
- self.graph.nodes[name]["visits"] += 1
-
- if not isinstance(res, Mapping):
- raise PipelineRuntimeError(
- f"Component '{name}' didn't return a dictionary. "
- "Components must always return dictionaries: check the the documentation."
- )
+ with tracing.tracer.trace(
+ "haystack.pipeline.run",
+ tags={
+ "haystack.pipeline.debug": debug,
+ "haystack.pipeline.metadata": self.metadata,
+ "haystack.pipeline.max_loops_allowed": self.max_loops_allowed,
+ },
+ ):
+ # This is what we'll return at the end
+ final_outputs = {}
+ while len(to_run) > 0:
+ name, comp = to_run.pop(0)
+
+ if any(socket.is_variadic for socket in comp.__haystack_input__._sockets_dict.values()) and not getattr( # type: ignore
+ comp, "is_greedy", False
+ ):
+ there_are_non_variadics = False
+ for _, other_comp in to_run:
+ if not any(socket.is_variadic for socket in other_comp.__haystack_input__._sockets_dict.values()): # type: ignore
+ there_are_non_variadics = True
+ break
- # Reset the waiting for input previous states, we managed to run a component
- before_last_waiting_for_input = None
- last_waiting_for_input = None
+ if there_are_non_variadics:
+ if (name, comp) not in waiting_for_input:
+ waiting_for_input.append((name, comp))
+ continue
- if (name, comp) in waiting_for_input:
- # We manage to run this component that was in the waiting list, we can remove it.
- # This happens when a component was put in the waiting list but we reached it from another edge.
- waiting_for_input.remove((name, comp))
+ if name in last_inputs and len(comp.__haystack_input__._sockets_dict) == len(last_inputs[name]): # type: ignore
+ if self.graph.nodes[name]["visits"] > self.max_loops_allowed:
+ msg = f"Maximum loops count ({self.max_loops_allowed}) exceeded for component '{name}'"
+ raise PipelineMaxLoops(msg)
+ # This component has all the inputs it needs to run
+ with tracing.tracer.trace(
+ "haystack.component.run",
+ tags={
+ "haystack.component.name": name,
+ "haystack.component.type": comp.__class__.__name__,
+ "haystack.component.inputs": {k: type(v).__name__ for k, v in last_inputs[name].items()},
+ },
+ ) as span:
+ res = comp.run(**last_inputs[name])
+ self.graph.nodes[name]["visits"] += 1
+
+ if not isinstance(res, Mapping):
+ raise PipelineRuntimeError(
+ f"Component '{name}' didn't return a dictionary. "
+ "Components must always return dictionaries: check the the documentation."
+ )
- # We keep track of which keys to remove from res at the end of the loop.
- # This is done after the output has been distributed to the next components, so that
- # we're sure all components that need this output have received it.
- to_remove_from_res = set()
- for sender_component_name, receiver_component_name, edge_data in self.graph.edges(data=True):
- if receiver_component_name == name and edge_data["to_socket"].is_variadic:
- # Delete variadic inputs that were already consumed
- last_inputs[name][edge_data["to_socket"].name] = []
+ span.set_tags(
+ tags={
+ "haystack.component.outputs": {k: type(v).__name__ for k, v in res.items()},
+ "haystack.component.visits": self.graph.nodes[name]["visits"],
+ }
+ )
+
+ # Reset the waiting for input previous states, we managed to run a component
+ before_last_waiting_for_input = None
+ last_waiting_for_input = None
+
+ if (name, comp) in waiting_for_input:
+ # We manage to run this component that was in the waiting list, we can remove it.
+ # This happens when a component was put in the waiting list but we reached it from another edge.
+ waiting_for_input.remove((name, comp))
+
+ # We keep track of which keys to remove from res at the end of the loop.
+ # This is done after the output has been distributed to the next components, so that
+ # we're sure all components that need this output have received it.
+ to_remove_from_res = set()
+ for sender_component_name, receiver_component_name, edge_data in self.graph.edges(data=True):
+ if receiver_component_name == name and edge_data["to_socket"].is_variadic:
+ # Delete variadic inputs that were already consumed
+ last_inputs[name][edge_data["to_socket"].name] = []
+
+ if name != sender_component_name:
+ continue
- if name != sender_component_name:
- continue
+ if edge_data["from_socket"].name not in res:
+ # This output has not been produced by the component, skip it
+ continue
- if edge_data["from_socket"].name not in res:
- # This output has not been produced by the component, skip it
- continue
+ if receiver_component_name not in last_inputs:
+ last_inputs[receiver_component_name] = {}
+ to_remove_from_res.add(edge_data["from_socket"].name)
+ value = res[edge_data["from_socket"].name]
+
+ if edge_data["to_socket"].is_variadic:
+ if edge_data["to_socket"].name not in last_inputs[receiver_component_name]:
+ last_inputs[receiver_component_name][edge_data["to_socket"].name] = []
+ # Add to the list of variadic inputs
+ last_inputs[receiver_component_name][edge_data["to_socket"].name].append(value)
+ else:
+ last_inputs[receiver_component_name][edge_data["to_socket"].name] = value
+
+ pair = (receiver_component_name, self.graph.nodes[receiver_component_name]["instance"])
+ if pair not in waiting_for_input and pair not in to_run:
+ to_run.append(pair)
+
+ res = {k: v for k, v in res.items() if k not in to_remove_from_res}
+
+ if len(res) > 0:
+ final_outputs[name] = res
+ else:
+ # This component doesn't have enough inputs so we can't run it yet
+ if (name, comp) not in waiting_for_input:
+ waiting_for_input.append((name, comp))
- if receiver_component_name not in last_inputs:
- last_inputs[receiver_component_name] = {}
- to_remove_from_res.add(edge_data["from_socket"].name)
- value = res[edge_data["from_socket"].name]
+ if len(to_run) == 0 and len(waiting_for_input) > 0:
+ # Check if we're stuck in a loop.
+ # It's important to check whether previous waitings are None as it could be that no
+ # Component has actually been run yet.
+ if (
+ before_last_waiting_for_input is not None
+ and last_waiting_for_input is not None
+ and before_last_waiting_for_input == last_waiting_for_input
+ ):
+ # Are we actually stuck or there's a lazy variadic waiting for input?
+ # This is our last resort, if there's no lazy variadic waiting for input
+ # we're stuck for real and we can't make any progress.
+ for name, comp in waiting_for_input:
+ is_variadic = any(socket.is_variadic for socket in comp.__haystack_input__._sockets_dict.values()) # type: ignore
+ if is_variadic and not comp.__haystack_is_greedy__: # type: ignore[attr-defined]
+ break
+ else:
+ # We're stuck in a loop for real, we can't make any progress.
+ # BAIL!
+ break
- if edge_data["to_socket"].is_variadic:
- if edge_data["to_socket"].name not in last_inputs[receiver_component_name]:
- last_inputs[receiver_component_name][edge_data["to_socket"].name] = []
- # Add to the list of variadic inputs
- last_inputs[receiver_component_name][edge_data["to_socket"].name].append(value)
- else:
- last_inputs[receiver_component_name][edge_data["to_socket"].name] = value
+ if len(waiting_for_input) == 1:
+ # We have a single component with variadic input waiting for input.
+ # If we're at this point it means it has been waiting for input for at least 2 iterations.
+ # This will never run.
+ # BAIL!
+ break
- pair = (receiver_component_name, self.graph.nodes[receiver_component_name]["instance"])
- if pair not in waiting_for_input and pair not in to_run:
- to_run.append(pair)
+ # There was a lazy variadic waiting for input, we can run it
+ waiting_for_input.remove((name, comp))
+ to_run.append((name, comp))
+ continue
- res = {k: v for k, v in res.items() if k not in to_remove_from_res}
+ before_last_waiting_for_input = (
+ last_waiting_for_input.copy() if last_waiting_for_input is not None else None
+ )
+ last_waiting_for_input = {item[0] for item in waiting_for_input}
- if len(res) > 0:
- final_outputs[name] = res
- else:
- # This component doesn't have enough inputs so we can't run it yet
- if (name, comp) not in waiting_for_input:
- waiting_for_input.append((name, comp))
-
- if len(to_run) == 0 and len(waiting_for_input) > 0:
- # Check if we're stuck in a loop.
- # It's important to check whether previous waitings are None as it could be that no
- # Component has actually been run yet.
- if (
- before_last_waiting_for_input is not None
- and last_waiting_for_input is not None
- and before_last_waiting_for_input == last_waiting_for_input
- ):
- # Are we actually stuck or there's a lazy variadic waiting for input?
- # This is our last resort, if there's no lazy variadic waiting for input
- # we're stuck for real and we can't make any progress.
+ # Remove from waiting only if there is actually enough input to run
for name, comp in waiting_for_input:
+ if name not in last_inputs:
+ last_inputs[name] = {}
+
+ # Lazy variadics must be removed only if there's nothing else to run at this stage
is_variadic = any(socket.is_variadic for socket in comp.__haystack_input__._sockets_dict.values()) # type: ignore
if is_variadic and not comp.__haystack_is_greedy__: # type: ignore[attr-defined]
- break
- else:
- # We're stuck in a loop for real, we can't make any progress.
- # BAIL!
- break
-
- if len(waiting_for_input) == 1:
- # We have a single component with variadic input waiting for input.
- # If we're at this point it means it has been waiting for input for at least 2 iterations.
- # This will never run.
- # BAIL!
- break
-
- # There was a lazy variadic waiting for input, we can run it
- waiting_for_input.remove((name, comp))
- to_run.append((name, comp))
- continue
+ there_are_only_lazy_variadics = True
+ for other_name, other_comp in waiting_for_input:
+ if name == other_name:
+ continue
+ there_are_only_lazy_variadics &= (
+ any(
+ socket.is_variadic for socket in other_comp.__haystack_input__._sockets_dict.values() # type: ignore
+ )
+ and not other_comp.__haystack_is_greedy__ # type: ignore[attr-defined]
+ )
- before_last_waiting_for_input = (
- last_waiting_for_input.copy() if last_waiting_for_input is not None else None
- )
- last_waiting_for_input = {item[0] for item in waiting_for_input}
-
- # Remove from waiting only if there is actually enough input to run
- for name, comp in waiting_for_input:
- if name not in last_inputs:
- last_inputs[name] = {}
-
- # Lazy variadics must be removed only if there's nothing else to run at this stage
- is_variadic = any(socket.is_variadic for socket in comp.__haystack_input__._sockets_dict.values()) # type: ignore
- if is_variadic and not comp.__haystack_is_greedy__: # type: ignore[attr-defined]
- there_are_only_lazy_variadics = True
- for other_name, other_comp in waiting_for_input:
- if name == other_name:
+ if not there_are_only_lazy_variadics:
continue
- there_are_only_lazy_variadics &= (
- any(
- socket.is_variadic for socket in other_comp.__haystack_input__._sockets_dict.values() # type: ignore
- )
- and not other_comp.__haystack_is_greedy__ # type: ignore[attr-defined]
- )
- if not there_are_only_lazy_variadics:
- continue
+ # Find the first component that has all the inputs it needs to run
+ has_enough_inputs = True
+ for input_socket in comp.__haystack_input__._sockets_dict.values(): # type: ignore
+ if input_socket.is_mandatory and input_socket.name not in last_inputs[name]:
+ has_enough_inputs = False
+ break
+ if input_socket.is_mandatory:
+ continue
- # Find the first component that has all the inputs it needs to run
- has_enough_inputs = True
- for input_socket in comp.__haystack_input__._sockets_dict.values(): # type: ignore
- if input_socket.is_mandatory and input_socket.name not in last_inputs[name]:
- has_enough_inputs = False
+ if input_socket.name not in last_inputs[name]:
+ last_inputs[name][input_socket.name] = input_socket.default_value
+ if has_enough_inputs:
break
- if input_socket.is_mandatory:
- continue
-
- if input_socket.name not in last_inputs[name]:
- last_inputs[name][input_socket.name] = input_socket.default_value
- if has_enough_inputs:
- break
- waiting_for_input.remove((name, comp))
- to_run.append((name, comp))
+ waiting_for_input.remove((name, comp))
+ to_run.append((name, comp))
- return final_outputs
+ return final_outputs
def _prepare_component_input_data(self, data: Dict[str, Any]) -> Tuple[Dict[str, Dict[str, Any]], Dict[str, Any]]:
"""
diff --git a/haystack/tracing/__init__.py b/haystack/tracing/__init__.py
new file mode 100644
index 0000000000..c4461b43ce
--- /dev/null
+++ b/haystack/tracing/__init__.py
@@ -0,0 +1,1 @@
+from .tracer import Tracer, Span, enable_tracing, disable_tracing, tracer, is_tracing_enabled
diff --git a/haystack/tracing/tracer.py b/haystack/tracing/tracer.py
new file mode 100644
index 0000000000..e1732dbeaf
--- /dev/null
+++ b/haystack/tracing/tracer.py
@@ -0,0 +1,118 @@
+import abc
+import contextlib
+from typing import Dict, Any, Optional, Iterator
+
+
+class Span(abc.ABC):
+ """Interface for an instrumented operation."""
+
+ @abc.abstractmethod
+ def set_tag(self, key: str, value: Any) -> None:
+ """Set a single tag on the span.
+
+ Note that the value will be serialized to a string, so it's best to use simple types like strings, numbers, or
+ booleans.
+
+ :param key: the name of the tag.
+ :param value: the value of the tag.
+ """
+ pass
+
+ def set_tags(self, tags: Dict[str, Any]) -> None:
+ """Set multiple tags on the span.
+
+ :param tags: a mapping of tag names to tag values.
+ """
+ for key, value in tags.items():
+ self.set_tag(key, value)
+
+ def raw_span(self) -> Any:
+ """Provides access to the underlying span object of the tracer.
+
+ Use this if you need full access to the underlying span object.
+
+ :return: The underlying span object.
+ """
+ return self
+
+
+class Tracer(abc.ABC):
+ """Interface for instrumenting code by creating and submitting spans."""
+
+ @abc.abstractmethod
+ @contextlib.contextmanager
+ def trace(self, operation_name: str, tags: Optional[Dict[str, Any]] = None) -> Iterator[Span]:
+ """Trace the execution of a block of code.
+
+ :param operation_name: the name of the operation being traced.
+ :param tags: tags to apply to the newly created span.
+ :return: the newly created span.
+ """
+ pass
+
+ @abc.abstractmethod
+ def current_span(self) -> Optional[Span]:
+ """Returns the currently active span. If no span is active, returns `None`.
+
+ :return: Currently active span or `None` if no span is active.
+ """
+ pass
+
+
+class ProxyTracer(Tracer):
+ """Container for the actual tracer instance.
+
+ This eases
+ - replacing the actual tracer instance without having to change the global tracer instance
+ - implementing default behavior for the tracer
+ """
+
+ def __init__(self, provided_tracer: Tracer) -> None:
+ self.actual_tracer: Tracer = provided_tracer
+
+ @contextlib.contextmanager
+ def trace(self, operation_name: str, tags: Optional[Dict[str, Any]] = None) -> Iterator[Span]:
+ with self.actual_tracer.trace(operation_name, tags=tags) as span:
+ yield span
+
+ def current_span(self) -> Optional[Span]:
+ return self.actual_tracer.current_span()
+
+
+class NullSpan(Span):
+ """A no-op implementation of the `Span` interface. This is used when tracing is disabled."""
+
+ def set_tag(self, key: str, value: Any) -> None:
+ pass
+
+
+class NullTracer(Tracer):
+ """A no-op implementation of the `Tracer` interface. This is used when tracing is disabled."""
+
+ @contextlib.contextmanager
+ def trace(self, operation_name: str, tags: Optional[Dict[str, Any]] = None) -> Iterator[Span]:
+ yield NullSpan()
+
+ def current_span(self) -> Optional[Span]:
+ return NullSpan()
+
+
+# We use the proxy pattern to allow for easy enabling and disabling of tracing without having to change the global
+# tracer instance. That's especially convenient if users import the object directly
+# (in that case we'd have to monkey-patch it in all of these modules).
+tracer: ProxyTracer = ProxyTracer(provided_tracer=NullTracer())
+
+
+def enable_tracing(provided_tracer: Tracer) -> None:
+ """Enable tracing by setting the global tracer instance."""
+ tracer.actual_tracer = provided_tracer
+
+
+def disable_tracing() -> None:
+ """Disable tracing by setting the global tracer instance to a no-op tracer."""
+ tracer.actual_tracer = NullTracer()
+
+
+def is_tracing_enabled() -> bool:
+ """Return whether tracing is enabled."""
+ return not isinstance(tracer.actual_tracer, NullTracer)
diff --git a/haystack/tracing/utils.py b/haystack/tracing/utils.py
new file mode 100644
index 0000000000..2a980a3cdf
--- /dev/null
+++ b/haystack/tracing/utils.py
@@ -0,0 +1,30 @@
+import json
+import logging
+from typing import Any, Union
+
+
+logger = logging.getLogger(__name__)
+
+
+def coerce_tag_value(value: Any) -> Union[bool, str, int, float]:
+ """Coerces span tag values to compatible types for the tracing backend.
+
+ Most tracing libraries don't support sending complex types to the backend. Hence, we need to convert them to
+ compatible types.
+
+ :param value: an arbitrary value which should be coerced to a compatible type
+ :return: the value coerced to a compatible type
+ """
+ if isinstance(value, (bool, str, int, float)):
+ return value
+
+ if value is None:
+ return ""
+
+ try:
+ return json.dumps(value)
+ except Exception as error:
+ logger.debug("Failed to coerce tag value to string: %s", error, exc_info=True)
+
+ # Our last resort is to convert the value to a string
+ return str(value)
diff --git a/releasenotes/notes/code-instrumentation-9ef657728bec3508.yaml b/releasenotes/notes/code-instrumentation-9ef657728bec3508.yaml
new file mode 100644
index 0000000000..d3709c2e21
--- /dev/null
+++ b/releasenotes/notes/code-instrumentation-9ef657728bec3508.yaml
@@ -0,0 +1,89 @@
+---
+features:
+ - |
+ Added option to instrument pipeline and component runs.
+ This allows users to observe their pipeline runs and component runs in real-time via their chosen observability
+ tool. Out-of-the-box support for OpenTelemetry and Datadog will be added in separate contributions.
+
+ Example usage for [OpenTelemetry](https://opentelemetry.io/docs/languages/python/):
+
+ 1. Install OpenTelemetry SDK and exporter:
+ ```bash
+ pip install opentelemetry-sdk opentelemetry-exporter-otlp-proto-http
+ ```
+
+ 2. Configure OpenTelemetry SDK with your tracing provider and exporter:
+ ```python
+ from opentelemetry.sdk.resources import SERVICE_NAME, Resource
+
+ from opentelemetry import trace
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor
+
+ # Service name is required for most backends
+ resource = Resource(attributes={
+ SERVICE_NAME: "haystack"
+ })
+
+ traceProvider = TracerProvider(resource=resource)
+ processor = BatchSpanProcessor(OTLPSpanExporter(endpoint="http://localhost:4318/v1/traces"))
+ traceProvider.add_span_processor(processor)
+ trace.set_tracer_provider(traceProvider)
+
+ tracer = traceProvider.get_tracer("my_application")
+
+
+ 3. Create tracer
+ ```python
+ import contextlib
+ from typing import Optional, Dict, Any, Iterator
+
+ from opentelemetry import trace
+ from opentelemetry.trace import NonRecordingSpan
+
+ from haystack.tracing import Tracer, Span
+ from haystack.tracing import utils as tracing_utils
+ import opentelemetry.trace
+
+
+ class OpenTelemetrySpan(Span):
+ def __init__(self, span: opentelemetry.trace.Span) -> None:
+ self._span = span
+
+ def set_tag(self, key: str, value: Any) -> None:
+ coerced_value = tracing_utils.coerce_tag_value(value)
+ self._span.set_attribute(key, coerced_value)
+
+
+ class OpenTelemetryTracer(Tracer):
+ def __init__(self, tracer: opentelemetry.trace.Tracer) -> None:
+ self._tracer = tracer
+
+ @contextlib.contextmanager
+ def trace(self, operation_name: str, tags: Optional[Dict[str, Any]] = None) -> Iterator[Span]:
+ with self._tracer.start_as_current_span(operation_name) as span:
+ span = OpenTelemetrySpan(span)
+ if tags:
+ span.set_tags(tags)
+
+ yield span
+
+ def current_span(self) -> Optional[Span]:
+ current_span = trace.get_current_span()
+ if isinstance(current_span, NonRecordingSpan):
+ return None
+
+ return OpenTelemetrySpan(current_span)
+
+ ```
+
+ 4. Use the tracer with Haystack:
+ ```python
+ from haystack import tracing
+
+ haystack_tracer = OpenTelemetryTracer(tracer)
+ tracing.enable_tracing(haystack_tracer)
+ ```
+
+ 5. Run your pipeline
| diff --git a/test/conftest.py b/test/conftest.py
index c3400260cb..4ad832f42b 100644
--- a/test/conftest.py
+++ b/test/conftest.py
@@ -1,12 +1,15 @@
from datetime import datetime
from pathlib import Path
+from typing import Generator
from unittest.mock import Mock, patch
import pytest
from openai.types.chat import ChatCompletion, ChatCompletionMessage
from openai.types.chat.chat_completion import Choice
+from haystack import tracing
from haystack.testing.test_utils import set_all_seeds
+from test.tracing.utils import SpyingTracer
set_all_seeds(0)
@@ -68,3 +71,14 @@ def urlopen_mock(self, method, url, *args, **kwargs):
raise RuntimeError(f"The test was about to {method} {self.scheme}://{self.host}{url}")
monkeypatch.setattr("urllib3.connectionpool.HTTPConnectionPool.urlopen", urlopen_mock)
+
+
+@pytest.fixture()
+def spying_tracer() -> Generator[SpyingTracer, None, None]:
+ tracer = SpyingTracer()
+ tracing.enable_tracing(tracer)
+
+ yield tracer
+
+ # Make sure to disable tracing after the test to avoid affecting other tests
+ tracing.disable_tracing()
diff --git a/test/core/pipeline/test_tracing.py b/test/core/pipeline/test_tracing.py
new file mode 100644
index 0000000000..c05f00abbb
--- /dev/null
+++ b/test/core/pipeline/test_tracing.py
@@ -0,0 +1,63 @@
+import pytest
+
+from haystack import component, Pipeline
+from haystack.tracing.tracer import enable_tracing
+from test.tracing.utils import SpyingSpan, SpyingTracer
+
+
+@component
+class Hello:
+ @component.output_types(output=str)
+ def run(self, word: str):
+ """
+ Takes a string in input and returns "Hello, <string>!"
+ in output.
+ """
+ return {"output": f"Hello, {word}!"}
+
+
+@pytest.fixture()
+def pipeline() -> Pipeline:
+ pipeline = Pipeline()
+ pipeline.add_component("hello", Hello())
+ pipeline.add_component("hello2", Hello())
+ pipeline.connect("hello.output", "hello2.word")
+ return pipeline
+
+
+class TestTracing:
+ def test_with_enabled_tracing(self, pipeline: Pipeline, spying_tracer: SpyingTracer) -> None:
+ pipeline.run(data={"word": "world"})
+
+ assert len(spying_tracer.spans) == 3
+
+ assert spying_tracer.spans == [
+ SpyingSpan(
+ operation_name="haystack.pipeline.run",
+ tags={
+ "haystack.pipeline.debug": False,
+ "haystack.pipeline.metadata": {},
+ "haystack.pipeline.max_loops_allowed": 100,
+ },
+ ),
+ SpyingSpan(
+ operation_name="haystack.component.run",
+ tags={
+ "haystack.component.name": "hello",
+ "haystack.component.type": "Hello",
+ "haystack.component.inputs": {"word": "str"},
+ "haystack.component.outputs": {"output": "str"},
+ "haystack.component.visits": 1,
+ },
+ ),
+ SpyingSpan(
+ operation_name="haystack.component.run",
+ tags={
+ "haystack.component.name": "hello2",
+ "haystack.component.type": "Hello",
+ "haystack.component.inputs": {"word": "str"},
+ "haystack.component.outputs": {"output": "str"},
+ "haystack.component.visits": 1,
+ },
+ ),
+ ]
diff --git a/test/tracing/__init__.py b/test/tracing/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test/tracing/test_tracer.py b/test/tracing/test_tracer.py
new file mode 100644
index 0000000000..92016e61fb
--- /dev/null
+++ b/test/tracing/test_tracer.py
@@ -0,0 +1,63 @@
+from unittest.mock import Mock
+
+from haystack.tracing.tracer import (
+ NullTracer,
+ NullSpan,
+ enable_tracing,
+ Tracer,
+ disable_tracing,
+ is_tracing_enabled,
+ ProxyTracer,
+ tracer,
+)
+from test.tracing.utils import SpyingTracer
+
+
+class TestNullTracer:
+ def test_tracing(self) -> None:
+ assert isinstance(tracer.actual_tracer, NullTracer)
+
+ # None of this raises
+ with tracer.trace("operation", {"key": "value"}) as span:
+ span.set_tag("key", "value")
+ span.set_tags({"key": "value"})
+
+ assert isinstance(tracer.current_span(), NullSpan)
+ assert isinstance(tracer.current_span().raw_span(), NullSpan)
+
+
+class TestProxyTracer:
+ def test_tracing(self) -> None:
+ spying_tracer = SpyingTracer()
+ my_tracer = ProxyTracer(provided_tracer=spying_tracer)
+
+ enable_tracing(spying_tracer)
+
+ with my_tracer.trace("operation", {"key": "value"}) as span:
+ span.set_tag("key", "value")
+ span.set_tags({"key2": "value2"})
+
+ assert len(spying_tracer.spans) == 1
+ assert spying_tracer.spans[0].operation_name == "operation"
+ assert spying_tracer.spans[0].tags == {"key": "value", "key2": "value2"}
+
+
+class TestConfigureTracer:
+ def test_enable_tracer(self) -> None:
+ my_tracer = Mock(spec=Tracer) # anything else than `NullTracer` works for this test
+
+ enable_tracing(my_tracer)
+
+ assert isinstance(tracer, ProxyTracer)
+ assert tracer.actual_tracer is my_tracer
+ assert is_tracing_enabled()
+
+ def test_disable_tracing(self) -> None:
+ my_tracker = Mock(spec=Tracer) # anything else than `NullTracer` works for this test
+
+ enable_tracing(my_tracker)
+ assert tracer.actual_tracer is my_tracker
+
+ disable_tracing()
+ assert isinstance(tracer.actual_tracer, NullTracer)
+ assert is_tracing_enabled() is False
diff --git a/test/tracing/test_utils.py b/test/tracing/test_utils.py
new file mode 100644
index 0000000000..86965e0ff1
--- /dev/null
+++ b/test/tracing/test_utils.py
@@ -0,0 +1,30 @@
+from typing import Any, Union
+
+import pytest
+
+from haystack.tracing import utils
+
+
+class NonSerializableClass:
+ def __str__(self) -> str:
+ return "NonSerializableClass"
+
+
+class TestTypeCoercion:
+ @pytest.mark.parametrize(
+ "raw_value,expected_tag_value",
+ [
+ (1, 1),
+ (1.0, 1.0),
+ (True, True),
+ (None, ""),
+ ("string", "string"),
+ ([1, 2, 3], "[1, 2, 3]"),
+ ({"key": "value"}, '{"key": "value"}'),
+ (NonSerializableClass(), "NonSerializableClass"),
+ ],
+ )
+ def test_type_coercion(self, raw_value: Any, expected_tag_value: Union[bool, str, int, float]) -> None:
+ coerced_value = utils.coerce_tag_value(raw_value)
+
+ assert coerced_value == expected_tag_value
diff --git a/test/tracing/utils.py b/test/tracing/utils.py
new file mode 100644
index 0000000000..a8970005e2
--- /dev/null
+++ b/test/tracing/utils.py
@@ -0,0 +1,33 @@
+import contextlib
+import dataclasses
+from typing import Dict, Any, Optional, List, Iterator
+
+from haystack.tracing import Span, Tracer
+
+
+@dataclasses.dataclass
+class SpyingSpan(Span):
+ operation_name: str
+ tags: Dict[str, Any] = dataclasses.field(default_factory=dict)
+
+ def set_tag(self, key: str, value: Any) -> None:
+ self.tags[key] = value
+
+
+class SpyingTracer(Tracer):
+ def current_span(self) -> Optional[Span]:
+ return self.spans[-1] if self.spans else None
+
+ def __init__(self) -> None:
+ self.spans: List[SpyingSpan] = []
+
+ @contextlib.contextmanager
+ def trace(self, operation_name: str, tags: Optional[Dict[str, Any]] = None) -> Iterator[Span]:
+ new_span = SpyingSpan(operation_name)
+
+ for key, value in (tags or {}).items():
+ new_span.set_tag(key, value)
+
+ self.spans.append(new_span)
+
+ yield new_span
| diff --git a/releasenotes/notes/code-instrumentation-9ef657728bec3508.yaml b/releasenotes/notes/code-instrumentation-9ef657728bec3508.yaml
new file mode 100644
index 0000000000..d3709c2e21
--- /dev/null
+++ b/releasenotes/notes/code-instrumentation-9ef657728bec3508.yaml
@@ -0,0 +1,89 @@
+---
+features:
+ - |
+ Added option to instrument pipeline and component runs.
+ This allows users to observe their pipeline runs and component runs in real-time via their chosen observability
+ tool. Out-of-the-box support for OpenTelemetry and Datadog will be added in separate contributions.
+
+ Example usage for [OpenTelemetry](https://opentelemetry.io/docs/languages/python/):
+
+ 1. Install OpenTelemetry SDK and exporter:
+ ```bash
+ pip install opentelemetry-sdk opentelemetry-exporter-otlp-proto-http
+ ```
+
+ 2. Configure OpenTelemetry SDK with your tracing provider and exporter:
+ ```python
+ from opentelemetry.sdk.resources import SERVICE_NAME, Resource
+
+ from opentelemetry import trace
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor
+
+ # Service name is required for most backends
+ resource = Resource(attributes={
+ SERVICE_NAME: "haystack"
+ })
+
+ traceProvider = TracerProvider(resource=resource)
+ processor = BatchSpanProcessor(OTLPSpanExporter(endpoint="http://localhost:4318/v1/traces"))
+ traceProvider.add_span_processor(processor)
+ trace.set_tracer_provider(traceProvider)
+
+ tracer = traceProvider.get_tracer("my_application")
+
+
+ 3. Create tracer
+ ```python
+ import contextlib
+ from typing import Optional, Dict, Any, Iterator
+
+ from opentelemetry import trace
+ from opentelemetry.trace import NonRecordingSpan
+
+ from haystack.tracing import Tracer, Span
+ from haystack.tracing import utils as tracing_utils
+ import opentelemetry.trace
+
+
+ class OpenTelemetrySpan(Span):
+ def __init__(self, span: opentelemetry.trace.Span) -> None:
+ self._span = span
+
+ def set_tag(self, key: str, value: Any) -> None:
+ coerced_value = tracing_utils.coerce_tag_value(value)
+ self._span.set_attribute(key, coerced_value)
+
+
+ class OpenTelemetryTracer(Tracer):
+ def __init__(self, tracer: opentelemetry.trace.Tracer) -> None:
+ self._tracer = tracer
+
+ @contextlib.contextmanager
+ def trace(self, operation_name: str, tags: Optional[Dict[str, Any]] = None) -> Iterator[Span]:
+ with self._tracer.start_as_current_span(operation_name) as span:
+ span = OpenTelemetrySpan(span)
+ if tags:
+ span.set_tags(tags)
+
+ yield span
+
+ def current_span(self) -> Optional[Span]:
+ current_span = trace.get_current_span()
+ if isinstance(current_span, NonRecordingSpan):
+ return None
+
+ return OpenTelemetrySpan(current_span)
+
+ ```
+
+ 4. Use the tracer with Haystack:
+ ```python
+ from haystack import tracing
+
+ haystack_tracer = OpenTelemetryTracer(tracer)
+ tracing.enable_tracing(haystack_tracer)
+ ```
+
+ 5. Run your pipeline
| [
{
"components": [
{
"doc": "Interface for an instrumented operation.",
"lines": [
6,
36
],
"name": "Span",
"signature": "class Span(abc.ABC):",
"type": "class"
},
{
"doc": "Set a single tag on the span.\n\nNote that th... | [
"test/core/pipeline/test_tracing.py::TestTracing::test_with_enabled_tracing",
"test/tracing/test_tracer.py::TestNullTracer::test_tracing",
"test/tracing/test_tracer.py::TestProxyTracer::test_tracing",
"test/tracing/test_tracer.py::TestConfigureTracer::test_enable_tracer",
"test/tracing/test_tracer.py::TestC... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat: implement pipeline tracing
### Related Issues
- related to https://github.com/deepset-ai/haystack/issues/7026
### Proposed Changes:
- add tracing to pipeline runs and components
- add interface to implement custom tracer
- allow enabling / disabling it
### How did you test it?
- added unit tests
### Notes for the reviewer
- I'd implement the datadog / opentelemetry tracer in separate PRs to keep them concise ⚠️
- I'd implement content tracing (actual input / output) in separate PRs ⚠️
- To test
1. Install libs
```bash
pip install opentelemetry-sdk opentelemetry-exporter-otlp-proto-http
pip install opentelemetry-instrumentation-urllib3
pip install opentelemetry-instrumentation-openai
```
2. Set up OpenTelemetry
```python
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
from opentelemetry import trace
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
from opentelemetry import metrics
from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader
# Service name is required for most backends
resource = Resource(attributes={
SERVICE_NAME: "haystack"
})
traceProvider = TracerProvider(resource=resource)
processor = BatchSpanProcessor(OTLPSpanExporter(endpoint="http://localhost:4318/v1/traces"))
traceProvider.add_span_processor(processor)
trace.set_tracer_provider(traceProvider)
tracer = traceProvider.get_tracer("my_application")
from opentelemetry.instrumentation.urllib3 import URLLib3Instrumentor
URLLib3Instrumentor().instrument()
from opentelemetry.instrumentation.openai import OpenAIInstrumentor
OpenAIInstrumentor().instrument()
```
3. Start tracing backend
```bash
docker run --rm -d --name jaeger \
-e COLLECTOR_ZIPKIN_HOST_PORT=:9411 \
-p 6831:6831/udp \
-p 6832:6832/udp \
-p 5778:5778 \
-p 16686:16686 \
-p 4317:4317 \
-p 4318:4318 \
-p 14250:14250 \
-p 14268:14268 \
-p 14269:14269 \
-p 9411:9411 \
jaegertracing/all-in-one:1
```
2. Create tracer
```python
import contextlib
from typing import Optional, Dict, Any, Iterator
from opentelemetry import trace
from opentelemetry.trace import NonRecordingSpan
from haystack.tracing import Tracer, Span
from haystack.tracing import utils as tracing_utils
import opentelemetry.trace
class OpenTelemetrySpan(Span):
def __init__(self, span: opentelemetry.trace.Span) -> None:
self._span = span
def set_tag(self, key: str, value: Any) -> None:
coerced_value = tracing_utils.coerce_tag_value(value)
self._span.set_attribute(key, coerced_value)
class OpenTelemetryTracer(Tracer):
def __init__(self, tracer: opentelemetry.trace.Tracer) -> None:
self._tracer = tracer
@contextlib.contextmanager
def trace(self, operation_name: str, tags: Optional[Dict[str, Any]] = None) -> Iterator[Span]:
with self._tracer.start_as_current_span(operation_name) as span:
span = OpenTelemetrySpan(span)
if tags:
span.set_tags(tags)
yield span
def current_span(self) -> Optional[Span]:
current_span = trace.get_current_span()
if isinstance(current_span, NonRecordingSpan):
return None
return OpenTelemetrySpan(current_span)
```
5. Enable tracing
```
from haystack import tracing
haystack_tracer = OpenTelemetryTracer(tracer)
tracing.enable_tracing(haystack_tracer)
```
6. Run any Haystack pipeline via `pipeline.run`
7. Look at trace in Jaeger UI: http://localhost:16686/search
### Checklist
- I have read the [contributors guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md) and the [code of conduct](https://github.com/deepset-ai/haystack/blob/main/code_of_conduct.txt)
- I have updated the related issue with new insights and changes
- I added unit tests and updated the docstrings
- I've used one of the [conventional commit types](https://www.conventionalcommits.org/en/v1.0.0/) for my PR title: `fix:`, `feat:`, `build:`, `chore:`, `ci:`, `docs:`, `style:`, `refactor:`, `perf:`, `test:`.
- I documented my code
- I ran [pre-commit hooks](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md#installation) and fixed any issue

----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/tracing/tracer.py]
(definition of Span:)
class Span(abc.ABC):
"""Interface for an instrumented operation."""
(definition of Span.set_tag:)
def set_tag(self, key: str, value: Any) -> None:
"""Set a single tag on the span.
Note that the value will be serialized to a string, so it's best to use simple types like strings, numbers, or
booleans.
:param key: the name of the tag.
:param value: the value of the tag."""
(definition of Span.set_tags:)
def set_tags(self, tags: Dict[str, Any]) -> None:
"""Set multiple tags on the span.
:param tags: a mapping of tag names to tag values."""
(definition of Span.raw_span:)
def raw_span(self) -> Any:
"""Provides access to the underlying span object of the tracer.
Use this if you need full access to the underlying span object.
:return: The underlying span object."""
(definition of Tracer:)
class Tracer(abc.ABC):
"""Interface for instrumenting code by creating and submitting spans."""
(definition of Tracer.trace:)
def trace(self, operation_name: str, tags: Optional[Dict[str, Any]] = None) -> Iterator[Span]:
"""Trace the execution of a block of code.
:param operation_name: the name of the operation being traced.
:param tags: tags to apply to the newly created span.
:return: the newly created span."""
(definition of Tracer.current_span:)
def current_span(self) -> Optional[Span]:
"""Returns the currently active span. If no span is active, returns `None`.
:return: Currently active span or `None` if no span is active."""
(definition of ProxyTracer:)
class ProxyTracer(Tracer):
"""Container for the actual tracer instance.
This eases
- replacing the actual tracer instance without having to change the global tracer instance
- implementing default behavior for the tracer"""
(definition of ProxyTracer.__init__:)
def __init__(self, provided_tracer: Tracer) -> None:
(definition of ProxyTracer.trace:)
def trace(self, operation_name: str, tags: Optional[Dict[str, Any]] = None) -> Iterator[Span]:
(definition of ProxyTracer.current_span:)
def current_span(self) -> Optional[Span]:
(definition of NullSpan:)
class NullSpan(Span):
"""A no-op implementation of the `Span` interface. This is used when tracing is disabled."""
(definition of NullSpan.set_tag:)
def set_tag(self, key: str, value: Any) -> None:
(definition of NullTracer:)
class NullTracer(Tracer):
"""A no-op implementation of the `Tracer` interface. This is used when tracing is disabled."""
(definition of NullTracer.trace:)
def trace(self, operation_name: str, tags: Optional[Dict[str, Any]] = None) -> Iterator[Span]:
(definition of NullTracer.current_span:)
def current_span(self) -> Optional[Span]:
(definition of enable_tracing:)
def enable_tracing(provided_tracer: Tracer) -> None:
"""Enable tracing by setting the global tracer instance."""
(definition of disable_tracing:)
def disable_tracing() -> None:
"""Disable tracing by setting the global tracer instance to a no-op tracer."""
(definition of is_tracing_enabled:)
def is_tracing_enabled() -> bool:
"""Return whether tracing is enabled."""
[end of new definitions in haystack/tracing/tracer.py]
[start of new definitions in haystack/tracing/utils.py]
(definition of coerce_tag_value:)
def coerce_tag_value(value: Any) -> Union[bool, str, int, float]:
"""Coerces span tag values to compatible types for the tracing backend.
Most tracing libraries don't support sending complex types to the backend. Hence, we need to convert them to
compatible types.
:param value: an arbitrary value which should be coerced to a compatible type
:return: the value coerced to a compatible type"""
[end of new definitions in haystack/tracing/utils.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 | |
deepset-ai__haystack-7042 | 7,042 | deepset-ai/haystack | null | 05af9c3439b05c8e730a3185fc84e2b45cda22b3 | 2024-02-20T08:45:55Z | diff --git a/haystack/components/evaluators/statistical_evaluator.py b/haystack/components/evaluators/statistical_evaluator.py
index a6c0096410..abf3386872 100644
--- a/haystack/components/evaluators/statistical_evaluator.py
+++ b/haystack/components/evaluators/statistical_evaluator.py
@@ -19,6 +19,7 @@ class StatisticalMetric(Enum):
EM = "exact_match"
RECALL_SINGLE_HIT = "recall_single_hit"
RECALL_MULTI_HIT = "recall_multi_hit"
+ MRR = "mean_reciprocal_rank"
@classmethod
def from_str(cls, metric: str) -> "StatisticalMetric":
@@ -55,6 +56,7 @@ def __init__(self, metric: Union[str, StatisticalMetric]):
StatisticalMetric.EM: self._exact_match,
StatisticalMetric.RECALL_SINGLE_HIT: self._recall_single_hit,
StatisticalMetric.RECALL_MULTI_HIT: self._recall_multi_hit,
+ StatisticalMetric.MRR: self._mrr,
}[self._metric]
def to_dict(self) -> Dict[str, Any]:
@@ -111,7 +113,7 @@ def _f1(labels: List[str], predictions: List[str]):
@staticmethod
def _exact_match(labels: List[str], predictions: List[str]) -> float:
"""
- Measure the proportion of cases where predictiond is identical to the the expected label.
+ Measure the proportion of cases where prediction is identical to the the expected label.
"""
if len(labels) != len(predictions):
raise ValueError("The number of predictions and labels must be the same.")
@@ -150,3 +152,20 @@ def _recall_multi_hit(labels: List[str], predictions: List[str]) -> float:
correct_retrievals += 1
return correct_retrievals / len(labels)
+
+ @staticmethod
+ def _mrr(labels: List[str], predictions: List[str]) -> float:
+ """
+ Measures the mean reciprocal rank of times a label is present in at least one or more predictions.
+ """
+ if len(labels) == 0:
+ return 0.0
+
+ mrr_sum = 0.0
+ for label in labels:
+ for rank, prediction in enumerate(predictions):
+ if label in prediction:
+ mrr_sum += 1 / (rank + 1)
+ break
+
+ return mrr_sum / len(labels)
diff --git a/releasenotes/notes/add-mrr-metric-362527e55e21c24c.yaml b/releasenotes/notes/add-mrr-metric-362527e55e21c24c.yaml
new file mode 100644
index 0000000000..2048fcf8d7
--- /dev/null
+++ b/releasenotes/notes/add-mrr-metric-362527e55e21c24c.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Add support for Mean Reciprocal Rank (MRR) Metric to `StatisticalEvaluator`.
+ MRR measures the mean reciprocal rank of times a label is present in at least one or more predictions.
| diff --git a/test/components/evaluators/test_statistical_evaluator.py b/test/components/evaluators/test_statistical_evaluator.py
index 619b258433..51efb1e98d 100644
--- a/test/components/evaluators/test_statistical_evaluator.py
+++ b/test/components/evaluators/test_statistical_evaluator.py
@@ -189,3 +189,37 @@ def test_run_with_empty_predictions(self):
result = evaluator.run(labels=labels, predictions=[])
assert len(result) == 1
assert result["result"] == 0.0
+
+
+class TestStatisticalEvaluatorMRR:
+ def test_run(self):
+ evaluator = StatisticalEvaluator(metric=StatisticalMetric.MRR)
+ labels = ["Eiffel Tower", "Louvre Museum", "Colosseum", "Trajan's Column"]
+ predictions = [
+ "The Eiffel Tower, completed in 1889, symbolizes Paris's cultural magnificence.",
+ "The Eiffel Tower max height is 330 meters.",
+ "Louvre Museum is the world's largest art museum and a historic monument in Paris, France.",
+ "The Leaning Tower of Pisa is the campanile, or freestanding bell tower, of Pisa Cathedral.",
+ ]
+ result = evaluator.run(labels=labels, predictions=predictions)
+ assert len(result) == 1
+ assert result["result"] == 1 / 3
+
+ def test_run_with_empty_labels(self):
+ evaluator = StatisticalEvaluator(metric=StatisticalMetric.MRR)
+ predictions = [
+ "The Eiffel Tower, completed in 1889, symbolizes Paris's cultural magnificence.",
+ "The Eiffel Tower max height is 330 meters.",
+ "Louvre Museum is the world's largest art museum and a historic monument in Paris, France.",
+ "The Leaning Tower of Pisa is the campanile, or freestanding bell tower, of Pisa Cathedral.",
+ ]
+ result = evaluator.run(labels=[], predictions=predictions)
+ assert len(result) == 1
+ assert result["result"] == 0.0
+
+ def test_run_with_empty_predictions(self):
+ evaluator = StatisticalEvaluator(metric=StatisticalMetric.MRR)
+ labels = ["Eiffel Tower", "Louvre Museum", "Colosseum", "Trajan's Column"]
+ result = evaluator.run(labels=labels, predictions=[])
+ assert len(result) == 1
+ assert result["result"] == 0.0
| diff --git a/releasenotes/notes/add-mrr-metric-362527e55e21c24c.yaml b/releasenotes/notes/add-mrr-metric-362527e55e21c24c.yaml
new file mode 100644
index 0000000000..2048fcf8d7
--- /dev/null
+++ b/releasenotes/notes/add-mrr-metric-362527e55e21c24c.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Add support for Mean Reciprocal Rank (MRR) Metric to `StatisticalEvaluator`.
+ MRR measures the mean reciprocal rank of times a label is present in at least one or more predictions.
| [
{
"components": [
{
"doc": "Measures the mean reciprocal rank of times a label is present in at least one or more predictions.",
"lines": [
157,
171
],
"name": "StatisticalEvaluator._mrr",
"signature": "def _mrr(labels: List[str], predictions: Li... | [
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluatorMRR::test_run",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluatorMRR::test_run_with_empty_labels",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluatorMRR::test_run... | [
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluator::test_init_default",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluator::test_init_with_string",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluator::test_to_dict"... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat: Add Mean Reciprocal Rank (MRR) metric to `StatisticalEvaluator`
### Related Issues
fixes #6065
### Proposed Changes:
Add support for Mean Reciprocal Rank (MRR) Metric to `StatisticalEvaluator`.
MRR measures the mean reciprocal rank of times a label is present in at least one or more predictions.
### How did you test it?
Unit tests were added to `test_statistical_evaluator.py`.
### Checklist
- I have read the [contributors guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md) and the [code of conduct](https://github.com/deepset-ai/haystack/blob/main/code_of_conduct.txt)
- I have updated the related issue with new insights and changes
- I added unit tests and updated the docstrings
- I've used one of the [conventional commit types](https://www.conventionalcommits.org/en/v1.0.0/) for my PR title: `fix:`, `feat:`, `build:`, `chore:`, `ci:`, `docs:`, `style:`, `refactor:`, `perf:`, `test:`.
- I documented my code
- I ran [pre-commit hooks](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md#installation) and fixed any issue
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/components/evaluators/statistical_evaluator.py]
(definition of StatisticalEvaluator._mrr:)
def _mrr(labels: List[str], predictions: List[str]) -> float:
"""Measures the mean reciprocal rank of times a label is present in at least one or more predictions."""
[end of new definitions in haystack/components/evaluators/statistical_evaluator.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Implement function to calculate Mean Reciprocal Rank metric
As specified in proposal #5794 we need to implement a function to calculate the Mean Reciprocal Rank metric.
Ideally the function should be part of the private interface and called only through the `calculate_metrics` function (see #6063). `_calculate_mrr()` could be a nice name.
For more detailed information check out the original proposal.
----------
--------------------
</issues> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 |
deepset-ai__haystack-7038 | 7,038 | deepset-ai/haystack | null | 5910b4adc9b2688155abb8d2290e5cf56833eb0b | 2024-02-19T15:37:23Z | diff --git a/haystack/components/evaluators/statistical_evaluator.py b/haystack/components/evaluators/statistical_evaluator.py
index 6f65fc105b..a6c0096410 100644
--- a/haystack/components/evaluators/statistical_evaluator.py
+++ b/haystack/components/evaluators/statistical_evaluator.py
@@ -1,4 +1,5 @@
import collections
+import itertools
from enum import Enum
from typing import Any, Dict, List, Union
@@ -16,6 +17,8 @@ class StatisticalMetric(Enum):
F1 = "f1"
EM = "exact_match"
+ RECALL_SINGLE_HIT = "recall_single_hit"
+ RECALL_MULTI_HIT = "recall_multi_hit"
@classmethod
def from_str(cls, metric: str) -> "StatisticalMetric":
@@ -47,7 +50,12 @@ def __init__(self, metric: Union[str, StatisticalMetric]):
metric = StatisticalMetric.from_str(metric)
self._metric = metric
- self._metric_function = {StatisticalMetric.F1: self._f1, StatisticalMetric.EM: self._exact_match}[self._metric]
+ self._metric_function = {
+ StatisticalMetric.F1: self._f1,
+ StatisticalMetric.EM: self._exact_match,
+ StatisticalMetric.RECALL_SINGLE_HIT: self._recall_single_hit,
+ StatisticalMetric.RECALL_MULTI_HIT: self._recall_multi_hit,
+ }[self._metric]
def to_dict(self) -> Dict[str, Any]:
return default_to_dict(self, metric=self._metric.value)
@@ -68,9 +76,6 @@ def run(self, labels: List[str], predictions: List[str]) -> Dict[str, Any]:
:returns: A dictionary with the following outputs:
* `result` - Calculated result of the chosen metric.
"""
- if len(labels) != len(predictions):
- raise ValueError("The number of predictions and labels must be the same.")
-
return {"result": self._metric_function(labels, predictions)}
@staticmethod
@@ -78,6 +83,9 @@ def _f1(labels: List[str], predictions: List[str]):
"""
Measure word overlap between predictions and labels.
"""
+ if len(labels) != len(predictions):
+ raise ValueError("The number of predictions and labels must be the same.")
+
if len(predictions) == 0:
# We expect callers of this function already checked if predictions and labels are equal length
return 0.0
@@ -105,8 +113,40 @@ def _exact_match(labels: List[str], predictions: List[str]) -> float:
"""
Measure the proportion of cases where predictiond is identical to the the expected label.
"""
+ if len(labels) != len(predictions):
+ raise ValueError("The number of predictions and labels must be the same.")
+
if len(predictions) == 0:
# We expect callers of this function already checked if predictions and labels are equal length
return 0.0
score_list = np_array(predictions) == np_array(labels)
return np_mean(score_list)
+
+ @staticmethod
+ def _recall_single_hit(labels: List[str], predictions: List[str]) -> float:
+ """
+ Measures how many times a label is present in at least one prediction.
+ If the same label is found in multiple predictions it is only counted once.
+ """
+ if len(labels) == 0:
+ return 0.0
+
+ # In Recall Single Hit we only consider if a label is present in at least one prediction.
+ # No need to count multiple occurrences of the same label in different predictions
+ retrieved_labels = {l for l, p in itertools.product(labels, predictions) if l in p}
+ return len(retrieved_labels) / len(labels)
+
+ @staticmethod
+ def _recall_multi_hit(labels: List[str], predictions: List[str]) -> float:
+ """
+ Measures how many times a label is present in at least one or more predictions.
+ """
+ if len(labels) == 0:
+ return 0.0
+
+ correct_retrievals = 0
+ for label, prediction in itertools.product(labels, predictions):
+ if label in prediction:
+ correct_retrievals += 1
+
+ return correct_retrievals / len(labels)
| diff --git a/test/components/evaluators/test_statistical_evaluator.py b/test/components/evaluators/test_statistical_evaluator.py
index e98899cb71..619b258433 100644
--- a/test/components/evaluators/test_statistical_evaluator.py
+++ b/test/components/evaluators/test_statistical_evaluator.py
@@ -121,3 +121,71 @@ def test_run_with_mismatched_predictions(self):
result = evaluator.run(labels=labels, predictions=predictions)
assert len(result) == 1
assert result["result"] == 2 / 3
+
+
+class TestStatisticalEvaluatorRecallSingleHit:
+ def test_run(self):
+ evaluator = StatisticalEvaluator(metric=StatisticalMetric.RECALL_SINGLE_HIT)
+ labels = ["Eiffel Tower", "Louvre Museum", "Colosseum", "Trajan's Column"]
+ predictions = [
+ "The Eiffel Tower, completed in 1889, symbolizes Paris's cultural magnificence.",
+ "The Eiffel Tower max height is 330 meters.",
+ "Louvre Museum is the world's largest art museum and a historic monument in Paris, France.",
+ "The Leaning Tower of Pisa is the campanile, or freestanding bell tower, of Pisa Cathedral.",
+ ]
+ result = evaluator.run(labels=labels, predictions=predictions)
+ assert len(result) == 1
+ assert result["result"] == 2 / 4
+
+ def test_run_with_empty_labels(self):
+ evaluator = StatisticalEvaluator(metric=StatisticalMetric.RECALL_SINGLE_HIT)
+ predictions = [
+ "The Eiffel Tower, completed in 1889, symbolizes Paris's cultural magnificence.",
+ "The Eiffel Tower max height is 330 meters.",
+ "Louvre Museum is the world's largest art museum and a historic monument in Paris, France.",
+ "The Leaning Tower of Pisa is the campanile, or freestanding bell tower, of Pisa Cathedral.",
+ ]
+ result = evaluator.run(labels=[], predictions=predictions)
+ assert len(result) == 1
+ assert result["result"] == 0.0
+
+ def test_run_with_empty_predictions(self):
+ evaluator = StatisticalEvaluator(metric=StatisticalMetric.RECALL_SINGLE_HIT)
+ labels = ["Eiffel Tower", "Louvre Museum", "Colosseum", "Trajan's Column"]
+ result = evaluator.run(labels=labels, predictions=[])
+ assert len(result) == 1
+ assert result["result"] == 0.0
+
+
+class TestStatisticalEvaluatorRecallMultiHit:
+ def test_run(self):
+ evaluator = StatisticalEvaluator(metric=StatisticalMetric.RECALL_MULTI_HIT)
+ labels = ["Eiffel Tower", "Louvre Museum", "Colosseum", "Trajan's Column"]
+ predictions = [
+ "The Eiffel Tower, completed in 1889, symbolizes Paris's cultural magnificence.",
+ "The Eiffel Tower max height is 330 meters.",
+ "Louvre Museum is the world's largest art museum and a historic monument in Paris, France.",
+ "The Leaning Tower of Pisa is the campanile, or freestanding bell tower, of Pisa Cathedral.",
+ ]
+ result = evaluator.run(labels=labels, predictions=predictions)
+ assert len(result) == 1
+ assert result["result"] == 0.75
+
+ def test_run_with_empty_labels(self):
+ evaluator = StatisticalEvaluator(metric=StatisticalMetric.RECALL_MULTI_HIT)
+ predictions = [
+ "The Eiffel Tower, completed in 1889, symbolizes Paris's cultural magnificence.",
+ "The Eiffel Tower max height is 330 meters.",
+ "Louvre Museum is the world's largest art museum and a historic monument in Paris, France.",
+ "The Leaning Tower of Pisa is the campanile, or freestanding bell tower, of Pisa Cathedral.",
+ ]
+ result = evaluator.run(labels=[], predictions=predictions)
+ assert len(result) == 1
+ assert result["result"] == 0.0
+
+ def test_run_with_empty_predictions(self):
+ evaluator = StatisticalEvaluator(metric=StatisticalMetric.RECALL_MULTI_HIT)
+ labels = ["Eiffel Tower", "Louvre Museum", "Colosseum", "Trajan's Column"]
+ result = evaluator.run(labels=labels, predictions=[])
+ assert len(result) == 1
+ assert result["result"] == 0.0
| [
{
"components": [
{
"doc": "Measures how many times a label is present in at least one prediction.\nIf the same label is found in multiple predictions it is only counted once.",
"lines": [
126,
137
],
"name": "StatisticalEvaluator._recall_single_hit",
... | [
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluatorRecallSingleHit::test_run",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluatorRecallSingleHit::test_run_with_empty_labels",
"test/components/evaluators/test_statistical_evaluator.py::TestStatistic... | [
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluator::test_init_default",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluator::test_init_with_string",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluator::test_to_dict"... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat: Add Recall Multi Hit and Single Hit metric in `StatisticalEvaluator`
### Related Issues
- fixes #6064
### Proposed Changes:
Add support for Recall Single Hit and Recall Multi Hit metrics in `StatisticalEvaluator` Component.
They both measure how many times a label appears in a list of predictions, the first though takes into account only the first match it finds.
### How did you test it?
I added new unit tests.
### Notes for the reviewer
I chose to ignore the release notes as the `StatisticalEvaluator` still has not been release in any beta version and the PR that introduced it already has a release note.
### Checklist
- I have read the [contributors guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md) and the [code of conduct](https://github.com/deepset-ai/haystack/blob/main/code_of_conduct.txt)
- I have updated the related issue with new insights and changes
- I added unit tests and updated the docstrings
- I've used one of the [conventional commit types](https://www.conventionalcommits.org/en/v1.0.0/) for my PR title: `fix:`, `feat:`, `build:`, `chore:`, `ci:`, `docs:`, `style:`, `refactor:`, `perf:`, `test:`.
- I documented my code
- I ran [pre-commit hooks](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md#installation) and fixed any issue
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/components/evaluators/statistical_evaluator.py]
(definition of StatisticalEvaluator._recall_single_hit:)
def _recall_single_hit(labels: List[str], predictions: List[str]) -> float:
"""Measures how many times a label is present in at least one prediction.
If the same label is found in multiple predictions it is only counted once."""
(definition of StatisticalEvaluator._recall_multi_hit:)
def _recall_multi_hit(labels: List[str], predictions: List[str]) -> float:
"""Measures how many times a label is present in at least one or more predictions."""
[end of new definitions in haystack/components/evaluators/statistical_evaluator.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Implement function to calculate Recall metric
As specified in proposal #5794 we need to implement a function to calculate the Recall metric.
Ideally the function should be part of the private interface and called only through the `calculate_metrics` function (see #6063). `_calculate_recall()` could be a nice name.
For more detailed information check out the original proposal.
----------
--------------------
</issues> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 | |
sphinx-doc__sphinx-11989 | 11,989 | sphinx-doc/sphinx | 7.4 | e7beb8bc5c647d15fef9b5a2a9136b6a605d35db | 2024-02-18T23:36:36Z | diff --git a/CHANGES.rst b/CHANGES.rst
index 1f178f5c200..ab609469f98 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -74,6 +74,9 @@ Features added
* #11592: Add :confval:`coverage_modules` to the coverage builder
to allow explicitly specifying which modules should be documented.
Patch by Stephen Finucane.
+* #7896, #11989: Add a :rst:dir:`py:type` directiv for documenting type aliases,
+ and a :rst:role:`py:type` role for linking to them.
+ Patch by Ashley Whetter.
Bugs fixed
----------
diff --git a/doc/usage/domains/python.rst b/doc/usage/domains/python.rst
index 96982f12e32..5667bd7a9b6 100644
--- a/doc/usage/domains/python.rst
+++ b/doc/usage/domains/python.rst
@@ -124,8 +124,9 @@ The following directives are provided for module and class contents:
.. rst:directive:: .. py:data:: name
Describes global data in a module, including both variables and values used
- as "defined constants." Class and object attributes are not documented
- using this environment.
+ as "defined constants."
+ Consider using :rst:dir:`py:type` for type aliases instead
+ and :rst:dir:`py:attribute` for class variables and instance attributes.
.. rubric:: options
@@ -259,6 +260,7 @@ The following directives are provided for module and class contents:
Describes an object data attribute. The description should include
information about the type of the data to be expected and whether it may be
changed directly.
+ Type aliases should be documented with :rst:dir:`py:type`.
.. rubric:: options
@@ -315,6 +317,55 @@ The following directives are provided for module and class contents:
Describe the location where the object is defined. The default value is
the module specified by :rst:dir:`py:currentmodule`.
+.. rst:directive:: .. py:type:: name
+
+ Describe a :ref:`type alias <python:type-aliases>`.
+
+ The type that the alias represents should be described
+ with the :rst:dir:`!canonical` option.
+ This directive supports an optional description body.
+
+ For example:
+
+ .. code-block:: rst
+
+ .. py:type:: UInt64
+
+ Represent a 64-bit positive integer.
+
+ will be rendered as follows:
+
+ .. py:type:: UInt64
+ :no-contents-entry:
+ :no-index-entry:
+
+ Represent a 64-bit positive integer.
+
+ .. rubric:: options
+
+ .. rst:directive:option:: canonical
+ :type: text
+
+ The canonical type represented by this alias, for example:
+
+ .. code-block:: rst
+
+ .. py:type:: StrPattern
+ :canonical: str | re.Pattern[str]
+
+ Represent a regular expression or a compiled pattern.
+
+ This is rendered as:
+
+ .. py:type:: StrPattern
+ :no-contents-entry:
+ :no-index-entry:
+ :canonical: str | re.Pattern[str]
+
+ Represent a regular expression or a compiled pattern.
+
+ .. versionadded:: 7.4
+
.. rst:directive:: .. py:method:: name(parameters)
.. py:method:: name[type parameters](parameters)
@@ -649,6 +700,10 @@ a matching identifier is found:
.. note:: The role is also able to refer to property.
+.. rst:role:: py:type
+
+ Reference a type alias.
+
.. rst:role:: py:exc
Reference an exception. A dotted name may be used.
diff --git a/sphinx/domains/python/__init__.py b/sphinx/domains/python/__init__.py
index 75c2cddfba5..8f1c7d6d22d 100644
--- a/sphinx/domains/python/__init__.py
+++ b/sphinx/domains/python/__init__.py
@@ -389,6 +389,45 @@ def get_index_text(self, modname: str, name_cls: tuple[str, str]) -> str:
return _('%s (%s property)') % (attrname, clsname)
+class PyTypeAlias(PyObject):
+ """Description of a type alias."""
+
+ option_spec: ClassVar[OptionSpec] = PyObject.option_spec.copy()
+ option_spec.update({
+ 'canonical': directives.unchanged,
+ })
+
+ def get_signature_prefix(self, sig: str) -> list[nodes.Node]:
+ return [nodes.Text('type'), addnodes.desc_sig_space()]
+
+ def handle_signature(self, sig: str, signode: desc_signature) -> tuple[str, str]:
+ fullname, prefix = super().handle_signature(sig, signode)
+ if canonical := self.options.get('canonical'):
+ canonical_annotations = _parse_annotation(canonical, self.env)
+ signode += addnodes.desc_annotation(
+ canonical, '',
+ addnodes.desc_sig_space(),
+ addnodes.desc_sig_punctuation('', '='),
+ addnodes.desc_sig_space(),
+ *canonical_annotations,
+ )
+ return fullname, prefix
+
+ def get_index_text(self, modname: str, name_cls: tuple[str, str]) -> str:
+ name, cls = name_cls
+ try:
+ clsname, attrname = name.rsplit('.', 1)
+ if modname and self.env.config.add_module_names:
+ clsname = f'{modname}.{clsname}'
+ except ValueError:
+ if modname:
+ return _('%s (in module %s)') % (name, modname)
+ else:
+ return name
+
+ return _('%s (type alias in %s)') % (attrname, clsname)
+
+
class PyModule(SphinxDirective):
"""
Directive to mark description of a new module.
@@ -590,6 +629,7 @@ class PythonDomain(Domain):
'staticmethod': ObjType(_('static method'), 'meth', 'obj'),
'attribute': ObjType(_('attribute'), 'attr', 'obj'),
'property': ObjType(_('property'), 'attr', '_prop', 'obj'),
+ 'type': ObjType(_('type alias'), 'type', 'obj'),
'module': ObjType(_('module'), 'mod', 'obj'),
}
@@ -603,6 +643,7 @@ class PythonDomain(Domain):
'staticmethod': PyStaticMethod,
'attribute': PyAttribute,
'property': PyProperty,
+ 'type': PyTypeAlias,
'module': PyModule,
'currentmodule': PyCurrentModule,
'decorator': PyDecoratorFunction,
@@ -615,6 +656,7 @@ class PythonDomain(Domain):
'class': PyXRefRole(),
'const': PyXRefRole(),
'attr': PyXRefRole(),
+ 'type': PyXRefRole(),
'meth': PyXRefRole(fix_parens=True),
'mod': PyXRefRole(),
'obj': PyXRefRole(),
| diff --git a/tests/roots/test-domain-py/index.rst b/tests/roots/test-domain-py/index.rst
index b24bbea244a..71e45f744a6 100644
--- a/tests/roots/test-domain-py/index.rst
+++ b/tests/roots/test-domain-py/index.rst
@@ -8,3 +8,4 @@ test-domain-py
module_option
abbr
canonical
+ type_alias
diff --git a/tests/roots/test-domain-py/module.rst b/tests/roots/test-domain-py/module.rst
index 70098f68752..307e786e3ea 100644
--- a/tests/roots/test-domain-py/module.rst
+++ b/tests/roots/test-domain-py/module.rst
@@ -64,3 +64,6 @@ module
.. py:data:: test2
:type: typing.Literal[-2]
+
+.. py:type:: MyType1
+ :canonical: list[int | str]
diff --git a/tests/roots/test-domain-py/roles.rst b/tests/roots/test-domain-py/roles.rst
index 6bff2d2ca1b..d3492ceefb9 100644
--- a/tests/roots/test-domain-py/roles.rst
+++ b/tests/roots/test-domain-py/roles.rst
@@ -5,14 +5,19 @@ roles
.. py:method:: top_level
+.. py:type:: TopLevelType
+
* :py:class:`TopLevel`
* :py:meth:`top_level`
+* :py:type:`TopLevelType`
.. py:class:: NestedParentA
* Link to :py:meth:`child_1`
+ .. py:type:: NestedTypeA
+
.. py:method:: child_1()
* Link to :py:meth:`NestedChildA.subchild_2`
@@ -46,3 +51,4 @@ roles
* Link to :py:class:`NestedParentB`
* :py:class:`NestedParentA.NestedChildA`
+* :py:type:`NestedParentA.NestedTypeA`
diff --git a/tests/roots/test-domain-py/type_alias.rst b/tests/roots/test-domain-py/type_alias.rst
new file mode 100644
index 00000000000..6a3df44daae
--- /dev/null
+++ b/tests/roots/test-domain-py/type_alias.rst
@@ -0,0 +1,15 @@
+Type Alias
+==========
+
+.. py:module:: module_two
+
+ .. py:class:: SomeClass
+
+:py:type:`.MyAlias`
+:any:`MyAlias`
+:any:`module_one.MyAlias`
+
+.. py:module:: module_one
+
+ .. py:type:: MyAlias
+ :canonical: list[int | module_two.SomeClass]
diff --git a/tests/test_domains/test_domain_py.py b/tests/test_domains/test_domain_py.py
index e653c80fcb1..3f45842d8b8 100644
--- a/tests/test_domains/test_domain_py.py
+++ b/tests/test_domains/test_domain_py.py
@@ -92,19 +92,21 @@ def assert_refnode(node, module_name, class_name, target, reftype=None,
refnodes = list(doctree.findall(pending_xref))
assert_refnode(refnodes[0], None, None, 'TopLevel', 'class')
assert_refnode(refnodes[1], None, None, 'top_level', 'meth')
- assert_refnode(refnodes[2], None, 'NestedParentA', 'child_1', 'meth')
- assert_refnode(refnodes[3], None, 'NestedParentA', 'NestedChildA.subchild_2', 'meth')
- assert_refnode(refnodes[4], None, 'NestedParentA', 'child_2', 'meth')
- assert_refnode(refnodes[5], False, 'NestedParentA', 'any_child', domain='')
- assert_refnode(refnodes[6], None, 'NestedParentA', 'NestedChildA', 'class')
- assert_refnode(refnodes[7], None, 'NestedParentA.NestedChildA', 'subchild_2', 'meth')
- assert_refnode(refnodes[8], None, 'NestedParentA.NestedChildA',
+ assert_refnode(refnodes[2], None, None, 'TopLevelType', 'type')
+ assert_refnode(refnodes[3], None, 'NestedParentA', 'child_1', 'meth')
+ assert_refnode(refnodes[4], None, 'NestedParentA', 'NestedChildA.subchild_2', 'meth')
+ assert_refnode(refnodes[5], None, 'NestedParentA', 'child_2', 'meth')
+ assert_refnode(refnodes[6], False, 'NestedParentA', 'any_child', domain='')
+ assert_refnode(refnodes[7], None, 'NestedParentA', 'NestedChildA', 'class')
+ assert_refnode(refnodes[8], None, 'NestedParentA.NestedChildA', 'subchild_2', 'meth')
+ assert_refnode(refnodes[9], None, 'NestedParentA.NestedChildA',
'NestedParentA.child_1', 'meth')
- assert_refnode(refnodes[9], None, 'NestedParentA', 'NestedChildA.subchild_1', 'meth')
- assert_refnode(refnodes[10], None, 'NestedParentB', 'child_1', 'meth')
- assert_refnode(refnodes[11], None, 'NestedParentB', 'NestedParentB', 'class')
- assert_refnode(refnodes[12], None, None, 'NestedParentA.NestedChildA', 'class')
- assert len(refnodes) == 13
+ assert_refnode(refnodes[10], None, 'NestedParentA', 'NestedChildA.subchild_1', 'meth')
+ assert_refnode(refnodes[11], None, 'NestedParentB', 'child_1', 'meth')
+ assert_refnode(refnodes[12], None, 'NestedParentB', 'NestedParentB', 'class')
+ assert_refnode(refnodes[13], None, None, 'NestedParentA.NestedChildA', 'class')
+ assert_refnode(refnodes[14], None, None, 'NestedParentA.NestedTypeA', 'type')
+ assert len(refnodes) == 15
doctree = app.env.get_doctree('module')
refnodes = list(doctree.findall(pending_xref))
@@ -135,7 +137,10 @@ def assert_refnode(node, module_name, class_name, target, reftype=None,
assert_refnode(refnodes[15], False, False, 'index', 'doc', domain='std')
assert_refnode(refnodes[16], False, False, 'typing.Literal', 'obj', domain='py')
assert_refnode(refnodes[17], False, False, 'typing.Literal', 'obj', domain='py')
- assert len(refnodes) == 18
+ assert_refnode(refnodes[18], False, False, 'list', 'class', domain='py')
+ assert_refnode(refnodes[19], False, False, 'int', 'class', domain='py')
+ assert_refnode(refnodes[20], False, False, 'str', 'class', domain='py')
+ assert len(refnodes) == 21
doctree = app.env.get_doctree('module_option')
refnodes = list(doctree.findall(pending_xref))
@@ -191,7 +196,9 @@ def test_domain_py_objects(app, status, warning):
assert objects['TopLevel'][2] == 'class'
assert objects['top_level'][2] == 'method'
+ assert objects['TopLevelType'][2] == 'type'
assert objects['NestedParentA'][2] == 'class'
+ assert objects['NestedParentA.NestedTypeA'][2] == 'type'
assert objects['NestedParentA.child_1'][2] == 'method'
assert objects['NestedParentA.any_child'][2] == 'method'
assert objects['NestedParentA.NestedChildA'][2] == 'class'
@@ -233,6 +240,9 @@ def find_obj(modname, prefix, obj_name, obj_type, searchmode=0):
assert (find_obj(None, None, 'NONEXISTANT', 'class') == [])
assert (find_obj(None, None, 'NestedParentA', 'class') ==
[('NestedParentA', ('roles', 'NestedParentA', 'class', False))])
+ assert (find_obj(None, None, 'NestedParentA.NestedTypeA', 'type') ==
+ [('NestedParentA.NestedTypeA',
+ ('roles', 'NestedParentA.NestedTypeA', 'type', False))])
assert (find_obj(None, None, 'NestedParentA.NestedChildA', 'class') ==
[('NestedParentA.NestedChildA',
('roles', 'NestedParentA.NestedChildA', 'class', False))])
diff --git a/tests/test_domains/test_domain_py_pyobject.py b/tests/test_domains/test_domain_py_pyobject.py
index 04f934102e1..adc0453818f 100644
--- a/tests/test_domains/test_domain_py_pyobject.py
+++ b/tests/test_domains/test_domain_py_pyobject.py
@@ -2,6 +2,7 @@
from __future__ import annotations
+import pytest
from docutils import nodes
from sphinx import addnodes
@@ -362,6 +363,76 @@ def test_pyproperty(app):
assert domain.objects['Class.prop2'] == ('index', 'Class.prop2', 'property', False)
+def test_py_type_alias(app):
+ text = (".. py:module:: example\n"
+ ".. py:type:: Alias1\n"
+ " :canonical: list[str | int]\n"
+ "\n"
+ ".. py:class:: Class\n"
+ "\n"
+ " .. py:type:: Alias2\n"
+ " :canonical: int\n")
+ domain = app.env.get_domain('py')
+ doctree = restructuredtext.parse(app, text)
+ assert_node(doctree, (addnodes.index,
+ addnodes.index,
+ nodes.target,
+ [desc, ([desc_signature, ([desc_annotation, ('type', desc_sig_space)],
+ [desc_addname, 'example.'],
+ [desc_name, 'Alias1'],
+ [desc_annotation, (desc_sig_space,
+ [desc_sig_punctuation, '='],
+ desc_sig_space,
+ [pending_xref, 'list'],
+ [desc_sig_punctuation, '['],
+ [pending_xref, 'str'],
+ desc_sig_space,
+ [desc_sig_punctuation, '|'],
+ desc_sig_space,
+ [pending_xref, 'int'],
+ [desc_sig_punctuation, ']'],
+ )])],
+ [desc_content, ()])],
+ addnodes.index,
+ [desc, ([desc_signature, ([desc_annotation, ('class', desc_sig_space)],
+ [desc_addname, 'example.'],
+ [desc_name, 'Class'])],
+ [desc_content, (addnodes.index,
+ desc)])]))
+ assert_node(doctree[5][1][0], addnodes.index,
+ entries=[('single', 'Alias2 (type alias in example.Class)', 'example.Class.Alias2', '', None)])
+ assert_node(doctree[5][1][1], ([desc_signature, ([desc_annotation, ('type', desc_sig_space)],
+ [desc_name, 'Alias2'],
+ [desc_annotation, (desc_sig_space,
+ [desc_sig_punctuation, '='],
+ desc_sig_space,
+ [pending_xref, 'int'])])],
+ [desc_content, ()]))
+ assert 'example.Alias1' in domain.objects
+ assert domain.objects['example.Alias1'] == ('index', 'example.Alias1', 'type', False)
+ assert 'example.Class.Alias2' in domain.objects
+ assert domain.objects['example.Class.Alias2'] == ('index', 'example.Class.Alias2', 'type', False)
+
+
+@pytest.mark.sphinx('html', testroot='domain-py', freshenv=True)
+def test_domain_py_type_alias(app, status, warning):
+ app.build(force_all=True)
+
+ content = (app.outdir / 'type_alias.html').read_text(encoding='utf8')
+ assert ('<em class="property"><span class="pre">type</span><span class="w"> </span></em>'
+ '<span class="sig-prename descclassname"><span class="pre">module_one.</span></span>'
+ '<span class="sig-name descname"><span class="pre">MyAlias</span></span>'
+ '<em class="property"><span class="w"> </span><span class="p"><span class="pre">=</span></span>'
+ '<span class="w"> </span><span class="pre">list</span>'
+ '<span class="p"><span class="pre">[</span></span>'
+ '<span class="pre">int</span><span class="w"> </span>'
+ '<span class="p"><span class="pre">|</span></span><span class="w"> </span>'
+ '<a class="reference internal" href="#module_two.SomeClass" title="module_two.SomeClass">'
+ '<span class="pre">module_two.SomeClass</span></a>'
+ '<span class="p"><span class="pre">]</span></span></em>' in content)
+ assert warning.getvalue() == ''
+
+
def test_pydecorator_signature(app):
text = ".. py:decorator:: deco"
domain = app.env.get_domain('py')
| diff --git a/CHANGES.rst b/CHANGES.rst
index 1f178f5c200..ab609469f98 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -74,6 +74,9 @@ Features added
* #11592: Add :confval:`coverage_modules` to the coverage builder
to allow explicitly specifying which modules should be documented.
Patch by Stephen Finucane.
+* #7896, #11989: Add a :rst:dir:`py:type` directiv for documenting type aliases,
+ and a :rst:role:`py:type` role for linking to them.
+ Patch by Ashley Whetter.
Bugs fixed
----------
diff --git a/doc/usage/domains/python.rst b/doc/usage/domains/python.rst
index 96982f12e32..5667bd7a9b6 100644
--- a/doc/usage/domains/python.rst
+++ b/doc/usage/domains/python.rst
@@ -124,8 +124,9 @@ The following directives are provided for module and class contents:
.. rst:directive:: .. py:data:: name
Describes global data in a module, including both variables and values used
- as "defined constants." Class and object attributes are not documented
- using this environment.
+ as "defined constants."
+ Consider using :rst:dir:`py:type` for type aliases instead
+ and :rst:dir:`py:attribute` for class variables and instance attributes.
.. rubric:: options
@@ -259,6 +260,7 @@ The following directives are provided for module and class contents:
Describes an object data attribute. The description should include
information about the type of the data to be expected and whether it may be
changed directly.
+ Type aliases should be documented with :rst:dir:`py:type`.
.. rubric:: options
@@ -315,6 +317,55 @@ The following directives are provided for module and class contents:
Describe the location where the object is defined. The default value is
the module specified by :rst:dir:`py:currentmodule`.
+.. rst:directive:: .. py:type:: name
+
+ Describe a :ref:`type alias <python:type-aliases>`.
+
+ The type that the alias represents should be described
+ with the :rst:dir:`!canonical` option.
+ This directive supports an optional description body.
+
+ For example:
+
+ .. code-block:: rst
+
+ .. py:type:: UInt64
+
+ Represent a 64-bit positive integer.
+
+ will be rendered as follows:
+
+ .. py:type:: UInt64
+ :no-contents-entry:
+ :no-index-entry:
+
+ Represent a 64-bit positive integer.
+
+ .. rubric:: options
+
+ .. rst:directive:option:: canonical
+ :type: text
+
+ The canonical type represented by this alias, for example:
+
+ .. code-block:: rst
+
+ .. py:type:: StrPattern
+ :canonical: str | re.Pattern[str]
+
+ Represent a regular expression or a compiled pattern.
+
+ This is rendered as:
+
+ .. py:type:: StrPattern
+ :no-contents-entry:
+ :no-index-entry:
+ :canonical: str | re.Pattern[str]
+
+ Represent a regular expression or a compiled pattern.
+
+ .. versionadded:: 7.4
+
.. rst:directive:: .. py:method:: name(parameters)
.. py:method:: name[type parameters](parameters)
@@ -649,6 +700,10 @@ a matching identifier is found:
.. note:: The role is also able to refer to property.
+.. rst:role:: py:type
+
+ Reference a type alias.
+
.. rst:role:: py:exc
Reference an exception. A dotted name may be used.
| [
{
"components": [
{
"doc": "Description of a type alias.",
"lines": [
392,
428
],
"name": "PyTypeAlias",
"signature": "class PyTypeAlias(PyObject):",
"type": "class"
},
{
"doc": "",
"lines": [
400,
... | [
"tests/test_domains/test_domain_py.py::test_domain_py_xrefs",
"tests/test_domains/test_domain_py.py::test_domain_py_objects",
"tests/test_domains/test_domain_py.py::test_domain_py_find_obj",
"tests/test_domains/test_domain_py_pyobject.py::test_py_type_alias",
"tests/test_domains/test_domain_py_pyobject.py::... | [
"tests/test_domains/test_domain_py.py::test_function_signatures",
"tests/test_domains/test_domain_py.py::test_domain_py_xrefs_abbreviations",
"tests/test_domains/test_domain_py.py::test_resolve_xref_for_properties",
"tests/test_domains/test_domain_py.py::test_get_full_qualified_name",
"tests/test_domains/te... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add py:type directive and role for documenting type aliases
### Feature or Bugfix
- Feature
### Purpose
This pull request adds a `py:type` directive for documenting type aliases (https://typing.readthedocs.io/en/latest/spec/aliases.html#type-aliases), and a `py:type` role for linking to them.
```rst
.. py:type:: Alias1
:value: list[str | int]
This is my type alias.
:py:type:`Alias1` is my type alias.
```
### Detail
- I've chosen to make a new directive for documenting type aliases, rather than add a new option to the `data` or `attribute` directives, because not all of the options on the `data` and `attribute` directives apply to a type alias (eg. `type`). In addition, type aliases seem to be classified differently from variable assignments by Python. Python documentation treats type aliases almost like first class citizens.
- Documenting with a new directive means that if a user is currently using `type` in their code, but they're documenting it using the workaround of documenting it as an assignment (eg. `.. py:data:: Url\n :type: TypeAlias\n :value: str`), the user will likely want to update their documentation to use this new directive. I'm assuming that the number of users using `type` in their code is quite low, and therefore this trade off seemed worth it.
- I've chosen to name the directive and role "type" because that's also the keyword used to define a type alias in Python. I considered using "typealias", "alias", or some thing similar, but it seemed safer to choose the same word used in Python syntax because CPython may change the usage of the `type` keyword in the future to define more than just type aliases.
- This overlaps with the `:type:` field, which potentially makes understanding rST syntax more confusing for users who are learning rST for the first time. But this trade-off seemed worth it.
- I've chosen to have users specify the type being aliased in a `:value:` option, rather than in the signature (eg. `.. py:type:: MyAlias = int`), because this is consistent with how the `data` and `attribute` directives work. So I think it makes the syntax more intuitive for users.
- I've chosen to make specifying a value of the alias optional because there could be cases where a user wants to declare the alias, but not make the type that is aliased public. For example:
```python
type MyAlias = str
def generate() -> MyAlias:
...
def accept(arg: MyAlias) -> None:
...
# To be used like the following:
accept(generate())
```
In the above example, a user may want to allow users to pass this type between parts of the API, without letting the user do anything else with instance of `MyAlias`, by not documenting what `MyAlias` is aliased from. So the user would document this as follows:
```rst
.. py:type:: MyAlias
.. py:function:: generate() -> MyAlias
.. py:function:: accept(arg: MyAlias) -> None
```
Users can currently do something similar with classes, where they might define a class but not document any of the attributes on it.
```rst
.. py:class:: MyClass
.. py:function:: generate() -> MyClass
.. py:function:: accept(arg: MyClass) -> None
```
- I've chosen to name the option "value" because this is consistent with the `data` and `attribute` directives. In addition, this is the terminology used in `typing.TypeAliasType` and in the ast node.
```pycon
>>> import ast
>>> class A:
... type MyAlias = int
...
>>> type(A.MyAlias)
<class 'typing.TypeAliasType'>
>>> A.MyAlias.__value__ # Note the attribute name "__value__"
<class 'int'>
>>> m = ast.parse("type PuzzleInput = list[tuple[list[str], int]]")
>>> m.body[0].value # Note the attribute name "value"
<ast.Subscript object at 0x7dd5ba91afd0>
```
### Relates
- Closes #7896
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sphinx/domains/python/__init__.py]
(definition of PyTypeAlias:)
class PyTypeAlias(PyObject):
"""Description of a type alias."""
(definition of PyTypeAlias.get_signature_prefix:)
def get_signature_prefix(self, sig: str) -> list[nodes.Node]:
(definition of PyTypeAlias.handle_signature:)
def handle_signature(self, sig: str, signode: desc_signature) -> tuple[str, str]:
(definition of PyTypeAlias.get_index_text:)
def get_index_text(self, modname: str, name_cls: tuple[str, str]) -> str:
[end of new definitions in sphinx/domains/python/__init__.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
introduce directive for type alias
There's no clear way to document a type alias as distinct from a module-level variable
I suggest adding a `:type:` directive that distinguishes an alias, like
```
DEFAULT_NUMBER_FACTORY = int
""" :var: zero-argument constructor for default numbers. """
Number = int
""" :type: Type used for numbers. """
def foo(a: Number) -> None:
...
```
It's possible we could use the fact that [type aliases aren't meant to have annotations themselves](https://mypy.readthedocs.io/en/stable/common_issues.html#variables-vs-type-aliases). I'm not sure which is better.
----------
At present, [PEP-613 (Explicit Type Aliases)](https://www.python.org/dev/peps/pep-0613/) has been discussed. So we should follow the spec in the future.
BTW, the PEP will be introduced since python-3.10 or later (maybe). It means we don't have a way to indicate one variable is a kind of type-alias until then. I understand a need for the workaround to do that.
Pros.
* We can indicate a variable is a type-alias.
* We can indicate it before PEP adopted.
Cons.
* Sphinx adopts a non-standard way as a workaround.
* We need to keep backward compatible for a while even after PEP adopted.
@tk0miya those are some good points. I assume there will be a desire to keep compatibility with <=3.9 for a long while now, esp since 3.9 isn't released yet. Is it therefore feasible to wait for that PEP?
Can we draw conclusions from sphinx's experience with the `:rtype:` directive, which is no longer needed with full type hinting in 3.6?
>Is it therefore feasible to wait for that PEP?
This is the first request for type-alias. So nobody in a hurry now as far as I know. So we can wait for the release of the PEP.
In addition, there are no special representation for type-aliases. So we need to consider how Sphinx represent them in output.
>Can we draw conclusions from sphinx's experience with the :rtype: directive, which is no longer needed with full type hinting in 3.6?
If I can design autodoc from scratch now, I'll drop the `:rtype:` field from spec because it is duplicated with the type annotation. (It might be useful for C extensions. So I can't say I'll surely drop it).
I would like this and I look forward to seeing it in a future release!
It would be great to just click on a custom type alias in the documentation and have it take you to the type definition. It complements the autodoc_type_aliases dictionary well - currently this lets you remove the explicit content of the definition in a place where it adds unnecessary clutter, I am interested in where I will put this explicit definition instead.
--------------------
</issues> | 35e7bfc347f845deff50787f0cd0340ea2ea0a5d |
deepset-ai__haystack-7009 | 7,009 | deepset-ai/haystack | null | a7209f64136d7cc8bd446f6801d8695fc367608f | 2024-02-16T09:06:38Z | diff --git a/haystack/dataclasses/byte_stream.py b/haystack/dataclasses/byte_stream.py
index 80b1c50c3b..ee736c001d 100644
--- a/haystack/dataclasses/byte_stream.py
+++ b/haystack/dataclasses/byte_stream.py
@@ -49,3 +49,13 @@ def from_string(
:param meta: Additional metadata to be stored with the ByteStream.
"""
return cls(data=text.encode(encoding), mime_type=mime_type, meta=meta or {})
+
+ def to_string(self, encoding: str = "utf-8") -> str:
+ """
+ Convert the ByteStream to a string, metadata will not be included.
+
+ :param encoding: The encoding used to convert the bytes to a string. Defaults to "utf-8".
+ :return: The string representation of the ByteStream.
+ :raises UnicodeDecodeError: If the ByteStream data cannot be decoded with the specified encoding.
+ """
+ return self.data.decode(encoding)
| diff --git a/test/dataclasses/test_byte_stream.py b/test/dataclasses/test_byte_stream.py
index 57d444b038..4e4199ba19 100644
--- a/test/dataclasses/test_byte_stream.py
+++ b/test/dataclasses/test_byte_stream.py
@@ -1,3 +1,5 @@
+import pytest
+
from haystack.dataclasses import ByteStream
@@ -35,6 +37,30 @@ def test_from_string():
assert b.meta == {"foo": "bar"}
+def test_to_string():
+ test_string = "Hello, world!"
+ b = ByteStream.from_string(test_string)
+ assert b.to_string() == test_string
+
+
+def test_to_from_string_encoding():
+ test_string = "Hello Baščaršija!"
+ with pytest.raises(UnicodeEncodeError):
+ ByteStream.from_string(test_string, encoding="ISO-8859-1")
+
+ bs = ByteStream.from_string(test_string) # default encoding is utf-8
+
+ assert bs.to_string(encoding="ISO-8859-1") != test_string
+ assert bs.to_string(encoding="utf-8") == test_string
+
+
+def test_to_string_encoding_error():
+ # test that it raises ValueError if the encoding is not valid
+ b = ByteStream.from_string("Hello, world!")
+ with pytest.raises(UnicodeDecodeError):
+ b.to_string("utf-16")
+
+
def test_to_file(tmp_path, request):
test_str = "Hello, world!\n"
test_path = tmp_path / request.node.name
| [
{
"components": [
{
"doc": "Convert the ByteStream to a string, metadata will not be included.\n\n:param encoding: The encoding used to convert the bytes to a string. Defaults to \"utf-8\".\n:return: The string representation of the ByteStream.\n:raises UnicodeDecodeError: If the ByteStream data c... | [
"test/dataclasses/test_byte_stream.py::test_to_string",
"test/dataclasses/test_byte_stream.py::test_to_from_string_encoding",
"test/dataclasses/test_byte_stream.py::test_to_string_encoding_error"
] | [
"test/dataclasses/test_byte_stream.py::test_from_file_path",
"test/dataclasses/test_byte_stream.py::test_from_string",
"test/dataclasses/test_byte_stream.py::test_to_file"
] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat: Add ByteStream to_string method
### Why:
Adds `to_string` method in the `ByteStream` class which arises from the requirement to convert byte data back into its original string format for certain use cases. This enhancement provides a convenient and intuitive way to achieve this, enriching the class's functionalities.
In addition, we already have a method from_string that creates a ByteStream instance from a string by encoding it to bytes. A to_string method provides the symmetric operation, converting the byte stream to a string. This symmetry in API design improves intuitiveness and usability.
### What:
* A new method named `to_string` has been added to the `ByteStream` class in `haystack/dataclasses/byte_stream.py`, enabling conversion of byte data to its original string representation.
* The method accepts an optional encoding parameter for controlling the decoding process and defaults to `utf-8`.
* Custom error handling, specifically a `UnicodeDecodeError`, is implemented to ensure graceful failure in case of encoding issues.
### How can it be used:
* The `to_string` method can be applied to `ByteStream` instances when users wish to extract the original text content.
Example usage:
```python
bs = ByteStream.from_string('hello, world!')
text = bs.to_string()
assert text == 'hello, world!'
```
### How did you test it:
* Unit tests have been added to `test/dataclasses/test_byte_stream.py` verifying correct functionality of the `to_string` method in different scenarios, including valid and invalid encoding cases.
### Notes for the reviewer:
* The changes mainly concern the `ByteStream` class and its test class, keeping the impact localized.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/dataclasses/byte_stream.py]
(definition of ByteStream.to_string:)
def to_string(self, encoding: str = "utf-8") -> str:
"""Convert the ByteStream to a string, metadata will not be included.
:param encoding: The encoding used to convert the bytes to a string. Defaults to "utf-8".
:return: The string representation of the ByteStream.
:raises UnicodeDecodeError: If the ByteStream data cannot be decoded with the specified encoding."""
[end of new definitions in haystack/dataclasses/byte_stream.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 | ||
roboflow__supervision-910 | 910 | roboflow/supervision | null | 0c0685987a57ffbcff8eea89bb16aff70846403e | 2024-02-15T21:29:25Z | diff --git a/supervision/detection/tools/polygon_zone.py b/supervision/detection/tools/polygon_zone.py
index c6f5b80e6..1357b03c2 100644
--- a/supervision/detection/tools/polygon_zone.py
+++ b/supervision/detection/tools/polygon_zone.py
@@ -1,5 +1,5 @@
from dataclasses import replace
-from typing import Optional, Tuple
+from typing import Iterable, Optional, Tuple
import cv2
import numpy as np
@@ -10,6 +10,7 @@
from supervision.draw.utils import draw_polygon, draw_text
from supervision.geometry.core import Position
from supervision.geometry.utils import get_polygon_center
+from supervision.utils.internal import deprecated_parameter
class PolygonZone:
@@ -20,21 +21,33 @@ class PolygonZone:
polygon (np.ndarray): A polygon represented by a numpy array of shape
`(N, 2)`, containing the `x`, `y` coordinates of the points.
frame_resolution_wh (Tuple[int, int]): The frame resolution (width, height)
- triggering_position (Position): The position within the bounding
- box that triggers the zone (default: Position.BOTTOM_CENTER)
+ triggering_anchors (Iterable[sv.Position]): A list of positions specifying
+ which anchors of the detections bounding box to consider when deciding on
+ whether the detection fits within the PolygonZone
+ (default: (sv.Position.BOTTOM_CENTER,)).
current_count (int): The current count of detected objects within the zone
mask (np.ndarray): The 2D bool mask for the polygon zone
"""
+ @deprecated_parameter(
+ old_parameter="triggering_position",
+ new_parameter="triggering_anchors",
+ map_function=lambda x: [x],
+ warning_message="Warning: '{old_parameter}' in '{function_name}' "
+ "is deprecated and will be remove in "
+ "'{removal_sv_version}: use '{new_parameter}' instead.",
+ removal_sv_version="supervision-0.23.0",
+ )
def __init__(
self,
polygon: np.ndarray,
frame_resolution_wh: Tuple[int, int],
- triggering_position: Position = Position.BOTTOM_CENTER,
+ triggering_anchors: Iterable[Position] = (Position.BOTTOM_CENTER,),
):
self.polygon = polygon.astype(int)
self.frame_resolution_wh = frame_resolution_wh
- self.triggering_position = triggering_position
+ self.triggering_anchors = triggering_anchors
+
self.current_count = 0
width, height = frame_resolution_wh
@@ -59,10 +72,20 @@ def trigger(self, detections: Detections) -> np.ndarray:
xyxy=detections.xyxy, resolution_wh=self.frame_resolution_wh
)
clipped_detections = replace(detections, xyxy=clipped_xyxy)
- clipped_anchors = np.ceil(
- clipped_detections.get_anchors_coordinates(anchor=self.triggering_position)
- ).astype(int)
- is_in_zone = self.mask[clipped_anchors[:, 1], clipped_anchors[:, 0]]
+ all_clipped_anchors = np.array(
+ [
+ np.ceil(clipped_detections.get_anchors_coordinates(anchor)).astype(int)
+ for anchor in self.triggering_anchors
+ ]
+ )
+
+ is_in_zone = (
+ self.mask[all_clipped_anchors[:, :, 1], all_clipped_anchors[:, :, 0]]
+ .transpose()
+ .astype(bool)
+ )
+ is_in_zone = np.all(is_in_zone, axis=1)
+
self.current_count = int(np.sum(is_in_zone))
return is_in_zone.astype(bool)
diff --git a/supervision/utils/internal.py b/supervision/utils/internal.py
index 031f5be15..2749e360d 100644
--- a/supervision/utils/internal.py
+++ b/supervision/utils/internal.py
@@ -1,5 +1,78 @@
import functools
import warnings
+from typing import Callable
+
+
+def deprecated_parameter(
+ old_parameter: str,
+ new_parameter: str,
+ map_function: Callable = lambda x: x,
+ warning_message: str = "Warning: '{old_parameter}' in '{function_name}' "
+ "is deprecated: use '{new_parameter}' instead.",
+ **message_kwargs,
+):
+ """
+ A decorator to mark a function's parameter as deprecated
+ and issue a warning when used.
+
+ Parameters:
+ - old_parameter (str): The name of the deprecated parameter.
+ - new_parameter (str): The name of the parameter that should be used instead.
+ - map_function (Callable, optional): A function used to map the value of the old
+ parameter to the new parameter. Defaults to the identity function.
+ - warn_message (str, optional): The warning message to be displayed when the
+ deprecated parameter is used. Defaults to a generic warning message with
+ placeholders for the old parameter, new parameter, and function name.
+ - **message_kwargs: Additional keyword arguments that can be used to customize
+ the warning message.
+
+ Returns:
+ Callable: A decorator function that can be applied to mark a function's
+ parameter as deprecated.
+
+ Usage Example:
+ ```python
+ @deprecated_parameter(old_parameter="old_param", new_parameter="new_param")
+ def example_function(new_param):
+ print(f"Function called with new_param: {new_param}")
+
+ # When calling the function with the deprecated parameter:
+ example_function(old_param="deprecated_value")
+ ```
+ This will trigger a deprecation warning and execute the function with the mapped
+ value of the deprecated parameter.
+ """
+
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ if old_parameter in kwargs:
+ # In case of a method, display also the class name.
+ if args and hasattr(args[0], "__class__"):
+ class_name = args[0].__class__.__name__
+ function_name = f"{class_name}.{func.__name__}"
+ else:
+ function_name = func.__name__
+
+ # Display deprecation warning
+ warnings.warn(
+ message=warning_message.format(
+ function_name=function_name,
+ old_parameter=old_parameter,
+ new_parameter=new_parameter,
+ **message_kwargs,
+ ),
+ category=DeprecationWarning,
+ stacklevel=2,
+ )
+ # Map old_param to new_param
+ kwargs[new_parameter] = map_function(kwargs.pop(old_parameter))
+
+ return func(*args, **kwargs)
+
+ return wrapper
+
+ return decorator
def deprecated(reason: str):
| diff --git a/test/detection/test_polygonzone.py b/test/detection/test_polygonzone.py
new file mode 100644
index 000000000..2aae955f7
--- /dev/null
+++ b/test/detection/test_polygonzone.py
@@ -0,0 +1,92 @@
+from contextlib import ExitStack as DoesNotRaise
+from test.test_utils import mock_detections
+
+import numpy as np
+import pytest
+
+import supervision as sv
+
+DETECTION_BOXES = np.array(
+ [
+ [35.0, 35.0, 65.0, 65.0],
+ [60.0, 60.0, 90.0, 90.0],
+ [85.0, 85.0, 115.0, 115.0],
+ [110.0, 110.0, 140.0, 140.0],
+ [135.0, 135.0, 165.0, 165.0],
+ [160.0, 160.0, 190.0, 190.0],
+ [185.0, 185.0, 215.0, 215.0],
+ [210.0, 210.0, 240.0, 240.0],
+ [235.0, 235.0, 265.0, 265.0],
+ ],
+ dtype=np.float32,
+)
+
+DETECTIONS = mock_detections(
+ xyxy=DETECTION_BOXES, class_id=np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])
+)
+
+POLYGON = np.array([[100, 100], [200, 100], [200, 200], [100, 200]])
+FRAME_RESOLUTION = (300, 300)
+
+
+@pytest.mark.parametrize(
+ "detections, polygon_zone, expected_results, exception",
+ [
+ (
+ DETECTIONS,
+ sv.PolygonZone(
+ POLYGON,
+ FRAME_RESOLUTION,
+ triggering_anchors=(
+ sv.Position.TOP_LEFT,
+ sv.Position.TOP_RIGHT,
+ sv.Position.BOTTOM_LEFT,
+ sv.Position.BOTTOM_RIGHT,
+ ),
+ ),
+ np.array(
+ [False, False, False, True, True, True, False, False, False], dtype=bool
+ ),
+ DoesNotRaise(),
+ ), # Test all four corners
+ (
+ DETECTIONS,
+ sv.PolygonZone(
+ POLYGON,
+ FRAME_RESOLUTION,
+ ),
+ np.array(
+ [False, False, True, True, True, True, False, False, False], dtype=bool
+ ),
+ DoesNotRaise(),
+ ), # Test default behaviour when no anchors are provided
+ (
+ DETECTIONS,
+ sv.PolygonZone(
+ POLYGON, FRAME_RESOLUTION, triggering_position=sv.Position.BOTTOM_CENTER
+ ),
+ np.array(
+ [False, False, True, True, True, True, False, False, False], dtype=bool
+ ),
+ DoesNotRaise(),
+ ), # Test default behaviour with deprecated api.
+ (
+ sv.Detections.empty(),
+ sv.PolygonZone(
+ POLYGON,
+ FRAME_RESOLUTION,
+ ),
+ np.array([], dtype=bool),
+ DoesNotRaise(),
+ ), # Test empty detections
+ ],
+)
+def test_polygon_zone_trigger(
+ detections: sv.Detections,
+ polygon_zone: sv.PolygonZone,
+ expected_results: np.ndarray,
+ exception: Exception,
+) -> None:
+ with exception:
+ in_zone = polygon_zone.trigger(detections)
+ assert np.all(in_zone == expected_results)
| [
{
"components": [
{
"doc": "A decorator to mark a function's parameter as deprecated\nand issue a warning when used.\n\nParameters:\n- old_parameter (str): The name of the deprecated parameter.\n- new_parameter (str): The name of the parameter that should be used instead.\n- map_function (Callable... | [
"test/detection/test_polygonzone.py::test_polygon_zone_trigger[detections0-polygon_zone0-expected_results0-exception0]",
"test/detection/test_polygonzone.py::test_polygon_zone_trigger[detections1-polygon_zone1-expected_results1-exception1]",
"test/detection/test_polygonzone.py::test_polygon_zone_trigger[detecti... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Polygonzone add multiple anchors support
# Description
This PR addresses [Issue #844](https://github.com/roboflow/supervision/issues/844).
- Rename `triggering_position` to `triggering_anchors` to be consistent with the `LineZone` naming convention.
- Update type of argument from `Position` to `Iterable[Position]`.
- Maintain the default behavior. The zone should, by default, be triggered by `Position.BOTTOM_CENTER`.
- Adds unit tests for `PolygonZone`.
- Updates documentation.
## Type of change
- [ x ] New feature (non-breaking change which adds functionality)
- [ x ] This change requires a documentation update
## How has this change been tested, please provide a testcase or example of how you tested the change?
For a minimalist setup, I'm doing the following:
1- Create mock detections simulating an object moving in a straight line.
2- Instantiate a `PolygonZone` object with polygon partially intersecting the line.
3- Compute the `PolygonZone.trigger(mock_detections)` and annotate the detections triggering the `PolygonZone` object as green, and red otherwise.
The setup used here was also used as base for the unit tests.
Google colab: [https://colab.research.google.com/drive/1fcsVrprMuf7hm_bxitSqmeL5L7KBVGHh?usp=sharing](https://colab.research.google.com/drive/1fcsVrprMuf7hm_bxitSqmeL5L7KBVGHh?usp=sharing)
## Deployment Concerns
### API Backward Compatibility
#### Option 1: Maintaining Deprecation Logic Within the Function
To ensure backward compatibility after renaming a parameter in this PR, one approach is to maintain deprecation logic within the function itself. The implementation could look something like this:
```python
class PolygonZone:
def __init__(self, polygon, frame_resolution_wh, triggering_anchors, **kwargs):
if 'triggering_position' in kwargs:
warning.warn('deprecated argument...')
''' logic to handle deprecated parameter'''
```
While effective for specific cases, this approach has some drawbacks:
1. **Documentation Readability**: Inserting `**kwargs` might make the documentation less clear.
2. **Code Visibility**: Deprecated code may become challenging to locate, especially with scale.
3. **Lack of Precedence**: I couldn't find any examples of this pattern in the codebase.
#### Option 2: Adding a `@deprecated_parameter` Annotator (Adopted Solution)
The chosen solution involves creating a `@deprecated_parameter` annotator using pseudocode like this:
```python
def deprecated_parameter(old_param: str, new_param: str, map_func: Callable = lambda x: x):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if old_param in kwargs:
print('warning message...')
kwargs[new_param] = map_func(kwargs.pop(old_param))
return func(*args, **kwargs)
return wrapper
return decorator
```
While this solution may not be suitable for highly complex parameter changes, it aligns with existing practices, such as the use of the `@deprecated` annotator, making it easier to track deprecated code references.
Please share your thoughts on this proposed solution.
### Deprecated Examples
Some examples, like `examples/traffic_analysis`, explicitly set `triggering_position` for the `PolygonZone` class. This PR renders it deprecated.
Should we address these changes within this PR, or would it be more appropriate to handle them in a separate one? I am open to incorporating the necessary modifications here if that is the preferred approach.
Let me know your preferences regarding these considerations.
## Docs
- [ x ] Docs updated? What were the changes:
[Changed the documentation for class `PolygonZone`](https://github.com/roboflow/supervision/pull/910/files#diff-738bb6b6510e46b8633ab062a4c980fa338f7f3ccf7c9792b91a1bd589e8bf01L23-R27).
Fix : #844
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in supervision/utils/internal.py]
(definition of deprecated_parameter:)
def deprecated_parameter( old_parameter: str, new_parameter: str, map_function: Callable = lambda x: x, warning_message: str = "Warning: '{old_parameter}' in '{function_name}' " "is deprecated: use '{new_parameter}' instead.", **message_kwargs, ):
"""A decorator to mark a function's parameter as deprecated
and issue a warning when used.
Parameters:
- old_parameter (str): The name of the deprecated parameter.
- new_parameter (str): The name of the parameter that should be used instead.
- map_function (Callable, optional): A function used to map the value of the old
parameter to the new parameter. Defaults to the identity function.
- warn_message (str, optional): The warning message to be displayed when the
deprecated parameter is used. Defaults to a generic warning message with
placeholders for the old parameter, new parameter, and function name.
- **message_kwargs: Additional keyword arguments that can be used to customize
the warning message.
Returns:
Callable: A decorator function that can be applied to mark a function's
parameter as deprecated.
Usage Example:
```python
@deprecated_parameter(old_parameter="old_param", new_parameter="new_param")
def example_function(new_param):
print(f"Function called with new_param: {new_param}")
# When calling the function with the deprecated parameter:
example_function(old_param="deprecated_value")
```
This will trigger a deprecation warning and execute the function with the mapped
value of the deprecated parameter."""
(definition of deprecated_parameter.decorator:)
def decorator(func): @functools.wraps(func)
(definition of deprecated_parameter.decorator.wrapper:)
def wrapper(*args, **kwargs):
[end of new definitions in supervision/utils/internal.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 3eb5c0b024e3e46877b7fe4fd66e6177d1308ba0 | ||
deepset-ai__haystack-6997 | 6,997 | deepset-ai/haystack | null | cf221a970196f3768002378127f6618e1f3d97d0 | 2024-02-15T09:58:16Z | diff --git a/haystack/components/preprocessors/__init__.py b/haystack/components/preprocessors/__init__.py
index d1ed7a96ec..bf24cf2cb2 100644
--- a/haystack/components/preprocessors/__init__.py
+++ b/haystack/components/preprocessors/__init__.py
@@ -1,4 +1,5 @@
-from haystack.components.preprocessors.document_cleaner import DocumentCleaner
-from haystack.components.preprocessors.document_splitter import DocumentSplitter
+from .document_cleaner import DocumentCleaner
+from .document_splitter import DocumentSplitter
+from .text_cleaner import TextCleaner
-__all__ = ["DocumentSplitter", "DocumentCleaner"]
+__all__ = ["DocumentSplitter", "DocumentCleaner", "TextCleaner"]
diff --git a/haystack/components/preprocessors/text_cleaner.py b/haystack/components/preprocessors/text_cleaner.py
new file mode 100644
index 0000000000..752e2a2cb6
--- /dev/null
+++ b/haystack/components/preprocessors/text_cleaner.py
@@ -0,0 +1,67 @@
+import re
+import string
+from typing import Any, Dict, List, Optional
+
+from haystack import component
+
+
+@component
+class TextCleaner:
+ """
+ A preprocessor component to clean text data. It can remove substrings matching a list of regular expressions,
+ convert text to lowercase, remove punctuation, and remove numbers.
+ This is useful to cleanup text data before evaluation.
+ """
+
+ def __init__(
+ self,
+ remove_regexps: Optional[List[str]] = None,
+ convert_to_lowercase: bool = False,
+ remove_punctuation: bool = False,
+ remove_numbers: bool = False,
+ ):
+ """
+ Creates a new instance of TextCleaner.
+
+ :param remove_regexps: A list of regular expressions. If provided, it removes substrings
+ matching these regular expressions from the text. Defaults to None.
+ :param convert_to_lowercase: If True, converts all characters to lowercase. Defaults to False.
+ :param remove_punctuation: If True, removes punctuation from the text. Defaults to False.
+ :param remove_numbers: If True, removes numerical digits from the text. Defaults to False.
+ """
+ self._remove_regexps = remove_regexps
+ self._convert_to_lowercase = convert_to_lowercase
+ self._remove_punctuation = remove_punctuation
+ self._remove_numbers = remove_numbers
+
+ self._regex = None
+ if remove_regexps:
+ self._regex = re.compile("|".join(remove_regexps), flags=re.IGNORECASE)
+ to_remove = ""
+ if remove_punctuation:
+ to_remove = string.punctuation
+ if remove_numbers:
+ to_remove += string.digits
+
+ self._translator = str.maketrans("", "", to_remove) if to_remove else None
+
+ @component.output_types(texts=List[str])
+ def run(self, texts: List[str]) -> Dict[str, Any]:
+ r"""
+ Run the TextCleaner on the given list of strings.
+
+ :param texts: List of strings to clean.
+ :returns: A dictionary with the following outputs:
+ * `texts` - The cleaned list of strings.
+ """
+
+ if self._regex:
+ texts = [self._regex.sub("", text) for text in texts]
+
+ if self._convert_to_lowercase:
+ texts = [text.lower() for text in texts]
+
+ if self._translator:
+ texts = [text.translate(self._translator) for text in texts]
+
+ return {"texts": texts}
diff --git a/releasenotes/notes/text-cleaner-eee0eecbdec21427.yaml b/releasenotes/notes/text-cleaner-eee0eecbdec21427.yaml
new file mode 100644
index 0000000000..3e28b27ca5
--- /dev/null
+++ b/releasenotes/notes/text-cleaner-eee0eecbdec21427.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add `TextCleaner` Component to clean list of strings. It can remove substrings matching a list of regular expressions,
+ convert text to lowercase, remove punctuation, and remove numbers.
+ This is mostly useful to clean generator predictions before evaluation.
| diff --git a/test/components/preprocessors/test_text_cleaner.py b/test/components/preprocessors/test_text_cleaner.py
new file mode 100644
index 0000000000..dea47e6b37
--- /dev/null
+++ b/test/components/preprocessors/test_text_cleaner.py
@@ -0,0 +1,70 @@
+from haystack.components.preprocessors import TextCleaner
+
+
+def test_init_default():
+ cleaner = TextCleaner()
+ assert cleaner._remove_regexps is None
+ assert not cleaner._convert_to_lowercase
+ assert not cleaner._remove_punctuation
+ assert not cleaner._remove_numbers
+ assert cleaner._regex is None
+ assert cleaner._translator is None
+
+
+def test_run():
+ cleaner = TextCleaner()
+ texts = ["Some text", "Some other text", "Yet another text"]
+ result = cleaner.run(texts=texts)
+ assert len(result) == 1
+ assert result["texts"] == texts
+
+
+def test_run_with_empty_inputs():
+ cleaner = TextCleaner()
+ result = cleaner.run(texts=[])
+ assert len(result) == 1
+ assert result["texts"] == []
+
+
+def test_run_with_regex():
+ cleaner = TextCleaner(remove_regexps=[r"\d+"])
+ result = cleaner.run(texts=["Open123 Source", "HaystackAI"])
+ assert len(result) == 1
+ assert result["texts"] == ["Open Source", "HaystackAI"]
+
+
+def test_run_with_multiple_regexps():
+ cleaner = TextCleaner(remove_regexps=[r"\d+", r"[^\w\s]"])
+ result = cleaner.run(texts=["Open123! Source", "Haystack.AI"])
+ assert len(result) == 1
+ assert result["texts"] == ["Open Source", "HaystackAI"]
+
+
+def test_run_with_convert_to_lowercase():
+ cleaner = TextCleaner(convert_to_lowercase=True)
+ result = cleaner.run(texts=["Open123! Source", "Haystack.AI"])
+ assert len(result) == 1
+ assert result["texts"] == ["open123! source", "haystack.ai"]
+
+
+def test_run_with_remove_punctuation():
+ cleaner = TextCleaner(remove_punctuation=True)
+ result = cleaner.run(texts=["Open123! Source", "Haystack.AI"])
+ assert len(result) == 1
+ assert result["texts"] == ["Open123 Source", "HaystackAI"]
+
+
+def test_run_with_remove_numbers():
+ cleaner = TextCleaner(remove_numbers=True)
+ result = cleaner.run(texts=["Open123! Source", "Haystack.AI"])
+ assert len(result) == 1
+ assert result["texts"] == ["Open! Source", "Haystack.AI"]
+
+
+def test_run_with_multiple_parameters():
+ cleaner = TextCleaner(
+ remove_regexps=[r"\d+", r"[^\w\s]"], convert_to_lowercase=True, remove_punctuation=True, remove_numbers=True
+ )
+ result = cleaner.run(texts=["Open%123. !$Source", "Haystack.AI##"])
+ assert len(result) == 1
+ assert result["texts"] == ["open source", "haystackai"]
| diff --git a/releasenotes/notes/text-cleaner-eee0eecbdec21427.yaml b/releasenotes/notes/text-cleaner-eee0eecbdec21427.yaml
new file mode 100644
index 0000000000..3e28b27ca5
--- /dev/null
+++ b/releasenotes/notes/text-cleaner-eee0eecbdec21427.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add `TextCleaner` Component to clean list of strings. It can remove substrings matching a list of regular expressions,
+ convert text to lowercase, remove punctuation, and remove numbers.
+ This is mostly useful to clean generator predictions before evaluation.
| [
{
"components": [
{
"doc": "A preprocessor component to clean text data. It can remove substrings matching a list of regular expressions,\nconvert text to lowercase, remove punctuation, and remove numbers.\nThis is useful to cleanup text data before evaluation.",
"lines": [
9,
... | [
"test/components/preprocessors/test_text_cleaner.py::test_init_default",
"test/components/preprocessors/test_text_cleaner.py::test_run",
"test/components/preprocessors/test_text_cleaner.py::test_run_with_empty_inputs",
"test/components/preprocessors/test_text_cleaner.py::test_run_with_regex",
"test/componen... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat: Add `TextCleaner` component
### Related Issues
- Part of #6903
### Proposed Changes:
Add `TextCleaner` component. This is mostly useful between a generator and an evaluator to cleanup the generated LLM response.
### How did you test it?
I added unit tests.
### Notes for the reviewer
This stems from feedback received in PR #6980.
### Checklist
- I have read the [contributors guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md) and the [code of conduct](https://github.com/deepset-ai/haystack/blob/main/code_of_conduct.txt)
- I have updated the related issue with new insights and changes
- I added unit tests and updated the docstrings
- I've used one of the [conventional commit types](https://www.conventionalcommits.org/en/v1.0.0/) for my PR title: `fix:`, `feat:`, `build:`, `chore:`, `ci:`, `docs:`, `style:`, `refactor:`, `perf:`, `test:`.
- I documented my code
- I ran [pre-commit hooks](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md#installation) and fixed any issue
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/components/preprocessors/text_cleaner.py]
(definition of TextCleaner:)
class TextCleaner:
"""A preprocessor component to clean text data. It can remove substrings matching a list of regular expressions,
convert text to lowercase, remove punctuation, and remove numbers.
This is useful to cleanup text data before evaluation."""
(definition of TextCleaner.__init__:)
def __init__( self, remove_regexps: Optional[List[str]] = None, convert_to_lowercase: bool = False, remove_punctuation: bool = False, remove_numbers: bool = False, ):
"""Creates a new instance of TextCleaner.
:param remove_regexps: A list of regular expressions. If provided, it removes substrings
matching these regular expressions from the text. Defaults to None.
:param convert_to_lowercase: If True, converts all characters to lowercase. Defaults to False.
:param remove_punctuation: If True, removes punctuation from the text. Defaults to False.
:param remove_numbers: If True, removes numerical digits from the text. Defaults to False."""
(definition of TextCleaner.run:)
def run(self, texts: List[str]) -> Dict[str, Any]:
"""Run the TextCleaner on the given list of strings.
:param texts: List of strings to clean.
:returns: A dictionary with the following outputs:
* `texts` - The cleaned list of strings."""
[end of new definitions in haystack/components/preprocessors/text_cleaner.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 | |
deepset-ai__haystack-6995 | 6,995 | deepset-ai/haystack | null | a7209f64136d7cc8bd446f6801d8695fc367608f | 2024-02-15T06:44:23Z | diff --git a/haystack/components/rankers/__init__.py b/haystack/components/rankers/__init__.py
index d4aeab1d16..bb8c7dd999 100644
--- a/haystack/components/rankers/__init__.py
+++ b/haystack/components/rankers/__init__.py
@@ -1,4 +1,5 @@
+from haystack.components.rankers.lost_in_the_middle import LostInTheMiddleRanker
from haystack.components.rankers.meta_field import MetaFieldRanker
from haystack.components.rankers.transformers_similarity import TransformersSimilarityRanker
-__all__ = ["MetaFieldRanker", "TransformersSimilarityRanker"]
+__all__ = ["LostInTheMiddleRanker", "MetaFieldRanker", "TransformersSimilarityRanker"]
diff --git a/haystack/components/rankers/lost_in_the_middle.py b/haystack/components/rankers/lost_in_the_middle.py
new file mode 100644
index 0000000000..cbbe166d91
--- /dev/null
+++ b/haystack/components/rankers/lost_in_the_middle.py
@@ -0,0 +1,109 @@
+from typing import Any, Dict, List, Optional
+
+from haystack import Document, component, default_to_dict
+
+
+@component
+class LostInTheMiddleRanker:
+ """
+ The LostInTheMiddleRanker implements a ranker that reorders documents based on the "lost in the middle" order.
+ "Lost in the Middle: How Language Models Use Long Contexts" paper by Liu et al. aims to lay out paragraphs into LLM
+ context so that the relevant paragraphs are at the beginning or end of the input context, while the least relevant
+ information is in the middle of the context.
+
+ See https://arxiv.org/abs/2307.03172 for more details.
+ """
+
+ def __init__(self, word_count_threshold: Optional[int] = None, top_k: Optional[int] = None):
+ """
+ If 'word_count_threshold' is specified, this ranker includes all documents up until the point where adding
+ another document would exceed the 'word_count_threshold'. The last document that causes the threshold to
+ be breached will be included in the resulting list of documents, but all subsequent documents will be
+ discarded.
+
+ :param word_count_threshold: The maximum total number of words across all documents selected by the ranker.
+ :param top_k: The maximum number of documents to return.
+ """
+ if isinstance(word_count_threshold, int) and word_count_threshold <= 0:
+ raise ValueError(
+ f"Invalid value for word_count_threshold: {word_count_threshold}. " f"word_count_threshold must be > 0."
+ )
+ if isinstance(top_k, int) and top_k <= 0:
+ raise ValueError(f"top_k must be > 0, but got {top_k}")
+
+ self.word_count_threshold = word_count_threshold
+ self.top_k = top_k
+
+ def to_dict(self) -> Dict[str, Any]:
+ """
+ Serialize object to a dictionary.
+ """
+ return default_to_dict(self, word_count_threshold=self.word_count_threshold, top_k=self.top_k)
+
+ def run(
+ self, documents: List[Document], top_k: Optional[int] = None, word_count_threshold: Optional[int] = None
+ ) -> Dict[str, List[Document]]:
+ """
+ Reranks documents based on the "lost in the middle" order.
+ Returns a list of Documents reordered based on the input query.
+ :param documents: List of Documents to reorder.
+ :param top_k: The number of documents to return.
+ :param word_count_threshold: The maximum total number of words across all documents selected by the ranker.
+
+ :return: The reordered documents.
+ """
+ if isinstance(word_count_threshold, int) and word_count_threshold <= 0:
+ raise ValueError(
+ f"Invalid value for word_count_threshold: {word_count_threshold}. " f"word_count_threshold must be > 0."
+ )
+ if isinstance(top_k, int) and top_k <= 0:
+ raise ValueError(f"top_k must be > 0, but got {top_k}")
+
+ if not documents:
+ return {"documents": []}
+
+ top_k = top_k or self.top_k
+ word_count_threshold = word_count_threshold or self.word_count_threshold
+
+ documents_to_reorder = documents[:top_k] if top_k else documents
+
+ # If there's only one document, return it as is
+ if len(documents_to_reorder) == 1:
+ return {"documents": documents_to_reorder}
+
+ # Raise an error if any document is not textual
+ if any(not doc.content_type == "text" for doc in documents_to_reorder):
+ raise ValueError("Some provided documents are not textual; LostInTheMiddleRanker can process only text.")
+
+ # Initialize word count and indices for the "lost in the middle" order
+ word_count = 0
+ document_index = list(range(len(documents_to_reorder)))
+ lost_in_the_middle_indices = [0]
+
+ # If word count threshold is set and the first document has content, calculate word count for the first document
+ if word_count_threshold and documents_to_reorder[0].content:
+ word_count = len(documents_to_reorder[0].content.split())
+
+ # If the first document already meets the word count threshold, return it
+ if word_count >= word_count_threshold:
+ return {"documents": [documents_to_reorder[0]]}
+
+ # Start from the second document and create "lost in the middle" order
+ for doc_idx in document_index[1:]:
+ # Calculate the index at which the current document should be inserted
+ insertion_index = len(lost_in_the_middle_indices) // 2 + len(lost_in_the_middle_indices) % 2
+
+ # Insert the document index at the calculated position
+ lost_in_the_middle_indices.insert(insertion_index, doc_idx)
+
+ # If word count threshold is set and the document has content, calculate the total word count
+ if word_count_threshold and documents_to_reorder[doc_idx].content:
+ word_count += len(documents_to_reorder[doc_idx].content.split()) # type: ignore[union-attr]
+
+ # If the total word count meets the threshold, stop processing further documents
+ if word_count >= word_count_threshold:
+ break
+
+ # Documents in the "lost in the middle" order
+ ranked_docs = [documents_to_reorder[idx] for idx in lost_in_the_middle_indices]
+ return {"documents": ranked_docs}
diff --git a/releasenotes/notes/add-lost-in-the-middle-ranker-976f2e9bf83c3c68.yaml b/releasenotes/notes/add-lost-in-the-middle-ranker-976f2e9bf83c3c68.yaml
new file mode 100644
index 0000000000..83c9e57682
--- /dev/null
+++ b/releasenotes/notes/add-lost-in-the-middle-ranker-976f2e9bf83c3c68.yaml
@@ -0,0 +1,8 @@
+---
+
+features:
+ - |
+ Add LostInTheMiddleRanker.
+ It reorders documents based on the "Lost in the Middle" order, a strategy that
+ places the most relevant paragraphs at the beginning or end of the context,
+ while less relevant paragraphs are positioned in the middle.
| diff --git a/test/components/rankers/test_lost_in_the_middle.py b/test/components/rankers/test_lost_in_the_middle.py
new file mode 100644
index 0000000000..d1bae7669d
--- /dev/null
+++ b/test/components/rankers/test_lost_in_the_middle.py
@@ -0,0 +1,104 @@
+import pytest
+from haystack import Document
+from haystack.components.rankers.lost_in_the_middle import LostInTheMiddleRanker
+
+
+class TestLostInTheMiddleRanker:
+ def test_lost_in_the_middle_order_odd(self):
+ # tests that lost_in_the_middle order works with an odd number of documents
+ docs = [Document(content=str(i)) for i in range(1, 10)]
+ ranker = LostInTheMiddleRanker()
+ result = ranker.run(documents=docs)
+ assert result["documents"]
+ expected_order = "1 3 5 7 9 8 6 4 2".split()
+ assert all(doc.content == expected_order[idx] for idx, doc in enumerate(result["documents"]))
+
+ def test_lost_in_the_middle_order_even(self):
+ # tests that lost_in_the_middle order works with an even number of documents
+ docs = [Document(content=str(i)) for i in range(1, 11)]
+ ranker = LostInTheMiddleRanker()
+ result = ranker.run(documents=docs)
+ expected_order = "1 3 5 7 9 10 8 6 4 2".split()
+ assert all(doc.content == expected_order[idx] for idx, doc in enumerate(result["documents"]))
+
+ def test_lost_in_the_middle_order_two_docs(self):
+ # tests that lost_in_the_middle order works with two documents
+ ranker = LostInTheMiddleRanker()
+ # two docs
+ docs = [Document(content="1"), Document(content="2")]
+ result = ranker.run(documents=docs)
+ assert result["documents"][0].content == "1"
+ assert result["documents"][1].content == "2"
+
+ def test_lost_in_the_middle_init(self):
+ # tests that LostInTheMiddleRanker initializes with default values
+ ranker = LostInTheMiddleRanker()
+ assert ranker.word_count_threshold is None
+
+ ranker = LostInTheMiddleRanker(word_count_threshold=10)
+ assert ranker.word_count_threshold == 10
+
+ def test_lost_in_the_middle_init_invalid_word_count_threshold(self):
+ # tests that LostInTheMiddleRanker raises an error when word_count_threshold is <= 0
+ with pytest.raises(ValueError, match="Invalid value for word_count_threshold"):
+ LostInTheMiddleRanker(word_count_threshold=0)
+
+ with pytest.raises(ValueError, match="Invalid value for word_count_threshold"):
+ LostInTheMiddleRanker(word_count_threshold=-5)
+
+ def test_lost_in_the_middle_with_word_count_threshold(self):
+ # tests that lost_in_the_middle with word_count_threshold works as expected
+ ranker = LostInTheMiddleRanker(word_count_threshold=6)
+ docs = [Document(content="word" + str(i)) for i in range(1, 10)]
+ # result, _ = ranker.run(query="", documents=docs)
+ result = ranker.run(documents=docs)
+ expected_order = "word1 word3 word5 word6 word4 word2".split()
+ assert all(doc.content == expected_order[idx] for idx, doc in enumerate(result["documents"]))
+
+ ranker = LostInTheMiddleRanker(word_count_threshold=9)
+ # result, _ = ranker.run(query="", documents=docs)
+ result = ranker.run(documents=docs)
+ expected_order = "word1 word3 word5 word7 word9 word8 word6 word4 word2".split()
+ assert all(doc.content == expected_order[idx] for idx, doc in enumerate(result["documents"]))
+
+ def test_word_count_threshold_greater_than_total_number_of_words_returns_all_documents(self):
+ ranker = LostInTheMiddleRanker(word_count_threshold=100)
+ docs = [Document(content="word" + str(i)) for i in range(1, 10)]
+ ordered_docs = ranker.run(documents=docs)
+ # assert len(ordered_docs) == len(docs)
+ expected_order = "word1 word3 word5 word7 word9 word8 word6 word4 word2".split()
+ assert all(doc.content == expected_order[idx] for idx, doc in enumerate(ordered_docs["documents"]))
+
+ def test_empty_documents_returns_empty_list(self):
+ ranker = LostInTheMiddleRanker()
+ result = ranker.run(documents=[])
+ assert result == {"documents": []}
+
+ def test_list_of_one_document_returns_same_document(self):
+ ranker = LostInTheMiddleRanker()
+ doc = Document(content="test")
+ assert ranker.run(documents=[doc]) == {"documents": [doc]}
+
+ @pytest.mark.parametrize("top_k", [1, 2, 3, 4, 5, 6, 7, 8, 12, 20])
+ def test_lost_in_the_middle_order_with_top_k(self, top_k: int):
+ # tests that lost_in_the_middle order works with an odd number of documents and a top_k parameter
+ docs = [Document(content=str(i)) for i in range(1, 10)]
+ ranker = LostInTheMiddleRanker()
+ result = ranker.run(documents=docs, top_k=top_k)
+ if top_k < len(docs):
+ # top_k is less than the number of documents, so only the top_k documents should be returned in LITM order
+ assert len(result["documents"]) == top_k
+ expected_order = ranker.run(documents=[Document(content=str(i)) for i in range(1, top_k + 1)])
+ assert result == expected_order
+ else:
+ # top_k is greater than the number of documents, so all documents should be returned in LITM order
+ assert len(result["documents"]) == len(docs)
+ assert result == ranker.run(documents=docs)
+
+ def test_to_dict(self):
+ component = LostInTheMiddleRanker()
+ data = component.to_dict()
+ assert data == {
+ "type": "haystack.components.rankers.lost_in_the_middle.LostInTheMiddleRanker",
+ "init_parameters": {"word_count_threshold": None, "top_k": None},
+ }
| diff --git a/releasenotes/notes/add-lost-in-the-middle-ranker-976f2e9bf83c3c68.yaml b/releasenotes/notes/add-lost-in-the-middle-ranker-976f2e9bf83c3c68.yaml
new file mode 100644
index 0000000000..83c9e57682
--- /dev/null
+++ b/releasenotes/notes/add-lost-in-the-middle-ranker-976f2e9bf83c3c68.yaml
@@ -0,0 +1,8 @@
+---
+
+features:
+ - |
+ Add LostInTheMiddleRanker.
+ It reorders documents based on the "Lost in the Middle" order, a strategy that
+ places the most relevant paragraphs at the beginning or end of the context,
+ while less relevant paragraphs are positioned in the middle.
| [
{
"components": [
{
"doc": "The LostInTheMiddleRanker implements a ranker that reorders documents based on the \"lost in the middle\" order.\n\"Lost in the Middle: How Language Models Use Long Contexts\" paper by Liu et al. aims to lay out paragraphs into LLM\ncontext so that the relevant paragrap... | [
"test/components/rankers/test_lost_in_the_middle.py::TestLostInTheMiddleRanker::test_lost_in_the_middle_order_odd",
"test/components/rankers/test_lost_in_the_middle.py::TestLostInTheMiddleRanker::test_lost_in_the_middle_order_even",
"test/components/rankers/test_lost_in_the_middle.py::TestLostInTheMiddleRanker:... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat: Add Lost In The Middle Ranker
Lost In The Middle Ranker
This ranker ranks documents based on the "Lost in the Middle" order, designed to position "the best" documents (low index in the given list of documents) at the beginning and the end of the resulting list while placing "the worst" documents (high index in the given list of documents) in the middle.
The Lost in the Middle Ranker contains these methods:
```python
def __init__(self, word_count_threshold: Optional[int] = None, top_k: Optional[int] = None):
```
If 'word_count_threshold' is specified, this ranker includes all documents up until the point where adding another document would exceed the 'word_count_threshold'. The last document that causes the threshold to be breached will be included in the resulting list of documents, but all subsequent documents will be discarded.
```python
def reorder_documents(self, documents: List[Document]) -> List[Document]:
```
Ranks documents based on the "lost in the middle" order. Assumes that all documents are ordered by relevance.
```python
def run(self, query: str, documents: List[Document], top_k: Optional[int] = None) -> List[Document]:
```
Reranks documents based on the "lost in the middle" order.
Returns a list of Documents reordered based on the input query.
The following units tests were written for:
1) The Lost In The Middle Ranker works with an odd number of documents.
2) The Lost In The Middle Ranker works with an even number of documents.
3) The Lost In The Middle Ranker works with two documents.
4) The Lost In The Middle Ranker initializes with default values.
5) The Lost In The Middle Ranker raises an error when word count threshold is <= 0
6) The Lost In The Middle Ranker with word count threshold works as expected.
7) Empty Documents will return a empty List.
8) One Document will return the same document
9) Tests that merging a list of non-textual documents raises a ValueError
10) Tests the lost in the middle order works with a odd number of documents and a top_k parameter.
11) Tests that the lost in the middle order works with an odd number of documents and an invalid top_k parameter.
12) Ranker Retreival Pipeline where a sparse retreiver and lost in the middle ranker is connected on three documents and the top 2 documents are retrieved.
13) RAG Pipeline on three documents where the emebddings are created and then the retrieved documents are ranked.
14) RAG Pipeline on the wikipedia dataset where the top 3 documents are returned after ranking.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/components/rankers/lost_in_the_middle.py]
(definition of LostInTheMiddleRanker:)
class LostInTheMiddleRanker:
"""The LostInTheMiddleRanker implements a ranker that reorders documents based on the "lost in the middle" order.
"Lost in the Middle: How Language Models Use Long Contexts" paper by Liu et al. aims to lay out paragraphs into LLM
context so that the relevant paragraphs are at the beginning or end of the input context, while the least relevant
information is in the middle of the context.
See https://arxiv.org/abs/2307.03172 for more details."""
(definition of LostInTheMiddleRanker.__init__:)
def __init__(self, word_count_threshold: Optional[int] = None, top_k: Optional[int] = None):
"""If 'word_count_threshold' is specified, this ranker includes all documents up until the point where adding
another document would exceed the 'word_count_threshold'. The last document that causes the threshold to
be breached will be included in the resulting list of documents, but all subsequent documents will be
discarded.
:param word_count_threshold: The maximum total number of words across all documents selected by the ranker.
:param top_k: The maximum number of documents to return."""
(definition of LostInTheMiddleRanker.to_dict:)
def to_dict(self) -> Dict[str, Any]:
"""Serialize object to a dictionary."""
(definition of LostInTheMiddleRanker.run:)
def run( self, documents: List[Document], top_k: Optional[int] = None, word_count_threshold: Optional[int] = None ) -> Dict[str, List[Document]]:
"""Reranks documents based on the "lost in the middle" order.
Returns a list of Documents reordered based on the input query.
:param documents: List of Documents to reorder.
:param top_k: The number of documents to return.
:param word_count_threshold: The maximum total number of words across all documents selected by the ranker.
:return: The reordered documents."""
[end of new definitions in haystack/components/rankers/lost_in_the_middle.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 | |
huggingface__huggingface_hub-2027 | 2,027 | huggingface/huggingface_hub | null | 0c272d506e390f2d7b9dac68159595845c7f8e3b | 2024-02-14T18:00:11Z | diff --git a/src/huggingface_hub/hf_file_system.py b/src/huggingface_hub/hf_file_system.py
index a78ab0fd80..630ff64ba8 100644
--- a/src/huggingface_hub/hf_file_system.py
+++ b/src/huggingface_hub/hf_file_system.py
@@ -577,6 +577,20 @@ def isfile(self, path):
except: # noqa: E722
return False
+ def url(self, path: str) -> str:
+ """Get the HTTP URL of the given path"""
+ resolved_path = self.resolve_path(path)
+ url = hf_hub_url(
+ resolved_path.repo_id,
+ resolved_path.path_in_repo,
+ repo_type=resolved_path.repo_type,
+ revision=resolved_path.revision,
+ endpoint=self.endpoint,
+ )
+ if self.isdir(path):
+ url = url.replace("/resolve/", "/tree/", 1)
+ return url
+
@property
def transaction(self):
"""A context within which files are committed together upon exit
@@ -653,6 +667,9 @@ def _upload_chunk(self, final: bool = False) -> None:
path=self.resolved_path.unresolve(),
)
+ def url(self) -> str:
+ return self.fs.url(self.path)
+
class HfFileSystemStreamFile(fsspec.spec.AbstractBufferedFile):
def __init__(
@@ -740,6 +757,9 @@ def read(self, length: int = -1):
self.loc += len(out)
return out
+ def url(self) -> str:
+ return self.fs.url(self.path)
+
def __del__(self):
if not hasattr(self, "resolved_path"):
# Means that the constructor failed. Nothing to do.
| diff --git a/tests/test_hf_file_system.py b/tests/test_hf_file_system.py
index 02cf913515..af9bf3b94f 100644
--- a/tests/test_hf_file_system.py
+++ b/tests/test_hf_file_system.py
@@ -131,6 +131,16 @@ def test_glob(self):
)
self.assertIsNotNone(files[keys[0]]["last_commit"])
+ def test_url(self):
+ self.assertEqual(
+ self.hffs.url(self.hf_path + "/data/text_data.txt"),
+ f"{ENDPOINT_STAGING}/datasets/{self.repo_id}/resolve/main/data/text_data.txt",
+ )
+ self.assertEqual(
+ self.hffs.url(self.hf_path + "/data"),
+ f"{ENDPOINT_STAGING}/datasets/{self.repo_id}/tree/main/data",
+ )
+
def test_file_type(self):
self.assertTrue(
self.hffs.isdir(self.hf_path + "/data") and not self.hffs.isdir(self.hf_path + "/.gitattributes")
| [
{
"components": [
{
"doc": "Get the HTTP URL of the given path",
"lines": [
580,
592
],
"name": "HfFileSystem.url",
"signature": "def url(self, path: str) -> str:",
"type": "function"
},
{
"doc": "",
"lines": [... | [
"tests/test_hf_file_system.py::HfFileSystemTests::test_url"
] | [
"tests/test_hf_file_system.py::HfFileSystemTests::test_copy_file",
"tests/test_hf_file_system.py::HfFileSystemTests::test_file_type",
"tests/test_hf_file_system.py::HfFileSystemTests::test_find_data_file_no_revision",
"tests/test_hf_file_system.py::HfFileSystemTests::test_find_root_directory_no_revision",
"... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add `HfFileSystem.url` method
Adds a `url` method to the `HfFileSystem` to simplify converting HF paths to HTTP URLs, which should be useful when working with libs that support HTTP URLs but not `fsspec` paths as input/output (e.g., `webdataset`, `polars`, etc.).
PS: The `url` method is not part of the official `fsspec` specification, but popular filesystem implementations such as `gcsfs` and `s3fs` also have it
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in src/huggingface_hub/hf_file_system.py]
(definition of HfFileSystem.url:)
def url(self, path: str) -> str:
"""Get the HTTP URL of the given path"""
(definition of HfFileSystemFile.url:)
def url(self) -> str:
(definition of HfFileSystemStreamFile.url:)
def url(self) -> str:
[end of new definitions in src/huggingface_hub/hf_file_system.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4058e1f97ebe256b2f3006d4bc31be275c66df6b | ||
deepset-ai__haystack-6977 | 6,977 | deepset-ai/haystack | null | 549021d2fcf26feac772731ae1e5607365be242b | 2024-02-12T16:26:45Z | diff --git a/haystack/components/rankers/meta_field.py b/haystack/components/rankers/meta_field.py
index 57e48995e2..d1a2cc9765 100644
--- a/haystack/components/rankers/meta_field.py
+++ b/haystack/components/rankers/meta_field.py
@@ -1,6 +1,8 @@
import logging
from collections import defaultdict
-from typing import List, Dict, Any, Optional, Literal
+from typing import List, Dict, Any, Optional, Literal, Callable
+from dateutil.parser import parse as date_parse
+
from haystack import Document, component, default_to_dict
@@ -37,6 +39,7 @@ def __init__(
top_k: Optional[int] = None,
ranking_mode: Literal["reciprocal_rank_fusion", "linear_score"] = "reciprocal_rank_fusion",
sort_order: Literal["ascending", "descending"] = "descending",
+ meta_value_type: Optional[Literal["float", "int", "date"]] = None,
):
"""
Creates an instance of MetaFieldRanker.
@@ -53,6 +56,15 @@ def __init__(
Use the 'score' mode only with Retrievers or Rankers that return a score in range [0,1].
:param sort_order: Whether to sort the meta field by ascending or descending order.
Possible values are `descending` (default) and `ascending`.
+ :param meta_value_type: Parse the meta value into the data type specified before sorting.
+ This will only work if all meta values stored under `meta_field` in the provided documents are strings.
+ For example, if we specified `meta_value_type="date"` then for the meta value `"date": "2015-02-01"`
+ we would parse the string into a datetime object and then sort the documents by date.
+ The available options are:
+ -'float' will parse the meta values into floats.
+ -'int' will parse the meta values into integers.
+ -'date' will parse the meta values into datetime objects.
+ -'None' (default) will do no parsing.
"""
self.meta_field = meta_field
@@ -61,8 +73,13 @@ def __init__(
self.ranking_mode = ranking_mode
self.sort_order = sort_order
self._validate_params(
- weight=self.weight, top_k=self.top_k, ranking_mode=self.ranking_mode, sort_order=self.sort_order
+ weight=self.weight,
+ top_k=self.top_k,
+ ranking_mode=self.ranking_mode,
+ sort_order=self.sort_order,
+ meta_value_type=meta_value_type,
)
+ self.meta_value_type = meta_value_type
def _validate_params(
self,
@@ -70,6 +87,7 @@ def _validate_params(
top_k: Optional[int],
ranking_mode: Literal["reciprocal_rank_fusion", "linear_score"],
sort_order: Literal["ascending", "descending"],
+ meta_value_type: Optional[Literal["float", "int", "date"]],
):
if top_k is not None and top_k <= 0:
raise ValueError("top_k must be > 0, but got %s" % top_k)
@@ -96,6 +114,14 @@ def _validate_params(
"MetaFieldRanker." % sort_order
)
+ if meta_value_type not in ["float", "int", "date", None]:
+ raise ValueError(
+ "The value of parameter <meta_value_type> must be 'float', 'int', 'date' or None but is "
+ "currently set to '%s'.\n"
+ "Change the <meta_value_type> value to 'float', 'int', 'date' or None when initializing the "
+ "MetaFieldRanker." % meta_value_type
+ )
+
def to_dict(self) -> Dict[str, Any]:
"""
Serialize object to a dictionary.
@@ -107,6 +133,7 @@ def to_dict(self) -> Dict[str, Any]:
top_k=self.top_k,
ranking_mode=self.ranking_mode,
sort_order=self.sort_order,
+ meta_value_type=self.meta_value_type,
)
@component.output_types(documents=List[Document])
@@ -117,6 +144,7 @@ def run(
weight: Optional[float] = None,
ranking_mode: Optional[Literal["reciprocal_rank_fusion", "linear_score"]] = None,
sort_order: Optional[Literal["ascending", "descending"]] = None,
+ meta_value_type: Optional[Literal["float", "int", "date"]] = None,
):
"""
Use this method to rank a list of Documents based on the selected meta field by:
@@ -139,6 +167,15 @@ def run(
:param sort_order: Whether to sort the meta field by ascending or descending order.
Possible values are `descending` (default) and `ascending`.
If not provided, the sort_order provided at initialization time is used.
+ :param meta_value_type: Parse the meta value into the data type specified before sorting.
+ This will only work if all meta values stored under `meta_field` in the provided documents are strings.
+ For example, if we specified `meta_value_type="date"` then for the meta value `"date": "2015-02-01"`
+ we would parse the string into a datetime object and then sort the documents by date.
+ The available options are:
+ -'float' will parse the meta values into floats.
+ -'int' will parse the meta values into integers.
+ -'date' will parse the meta values into datetime objects.
+ -'None' (default) will do no parsing.
"""
if not documents:
return {"documents": []}
@@ -147,7 +184,14 @@ def run(
weight = weight or self.weight
ranking_mode = ranking_mode or self.ranking_mode
sort_order = sort_order or self.sort_order
- self._validate_params(weight=weight, top_k=top_k, ranking_mode=ranking_mode, sort_order=sort_order)
+ meta_value_type = meta_value_type or self.meta_value_type
+ self._validate_params(
+ weight=weight,
+ top_k=top_k,
+ ranking_mode=ranking_mode,
+ sort_order=sort_order,
+ meta_value_type=meta_value_type,
+ )
# If the weight is 0 then ranking by meta field is disabled and the original documents should be returned
if weight == 0:
@@ -175,10 +219,14 @@ def run(
",".join([doc.id for doc in docs_missing_meta_field]),
)
+ # If meta_value_type is provided try to parse the meta values
+ parsed_meta = self._parse_meta(docs_with_meta_field=docs_with_meta_field, meta_value_type=meta_value_type)
+ tuple_parsed_meta_and_docs = list(zip(parsed_meta, docs_with_meta_field))
+
# Sort the documents by self.meta_field
reverse = sort_order == "descending"
try:
- sorted_by_meta = sorted(docs_with_meta_field, key=lambda doc: doc.meta[self.meta_field], reverse=reverse)
+ tuple_sorted_by_meta = sorted(tuple_parsed_meta_and_docs, key=lambda x: x[0], reverse=reverse)
except TypeError as error:
# Return original documents if mixed types that are not comparable are returned (e.g. int and list)
logger.warning(
@@ -190,10 +238,53 @@ def run(
return {"documents": documents[:top_k]}
# Add the docs missing the meta_field back on the end
+ sorted_by_meta = [doc for meta, doc in tuple_sorted_by_meta]
sorted_documents = sorted_by_meta + docs_missing_meta_field
sorted_documents = self._merge_rankings(documents, sorted_documents)
return {"documents": sorted_documents[:top_k]}
+ def _parse_meta(
+ self, docs_with_meta_field: List[Document], meta_value_type: Optional[Literal["float", "int", "date"]]
+ ) -> List[Any]:
+ """
+ Parse the meta values stored under `self.meta_field` for the Documents provided in `docs_with_meta_field`.
+ """
+ if meta_value_type is None:
+ return [d.meta[self.meta_field] for d in docs_with_meta_field]
+
+ unique_meta_values = {doc.meta[self.meta_field] for doc in docs_with_meta_field}
+ if not all(isinstance(meta_value, str) for meta_value in unique_meta_values):
+ logger.warning(
+ "The parameter <meta_value_type> is currently set to '%s', but not all of meta values in the "
+ "provided Documents with IDs %s are strings.\n"
+ "Skipping parsing of the meta values.\n"
+ "Set all meta values found under the <meta_field> parameter to strings to use <meta_value_type>.",
+ meta_value_type,
+ ",".join([doc.id for doc in docs_with_meta_field]),
+ )
+ return [d.meta[self.meta_field] for d in docs_with_meta_field]
+
+ parse_fn: Callable
+ if meta_value_type == "float":
+ parse_fn = float
+ elif meta_value_type == "int":
+ parse_fn = int
+ else:
+ parse_fn = date_parse
+
+ try:
+ meta_values = [parse_fn(d.meta[self.meta_field]) for d in docs_with_meta_field]
+ except ValueError as error:
+ logger.warning(
+ "Tried to parse the meta values of Documents with IDs %s, but got ValueError with the message: %s\n"
+ "Skipping parsing of the meta values.",
+ ",".join([doc.id for doc in docs_with_meta_field]),
+ error,
+ )
+ meta_values = [d.meta[self.meta_field] for d in docs_with_meta_field]
+
+ return meta_values
+
def _merge_rankings(self, documents: List[Document], sorted_documents: List[Document]) -> List[Document]:
"""
Merge the two different rankings for Documents sorted both by their content and by their meta field.
diff --git a/releasenotes/notes/metafieldranker-meta-value-type-365ff1bdb412257b.yaml b/releasenotes/notes/metafieldranker-meta-value-type-365ff1bdb412257b.yaml
new file mode 100644
index 0000000000..f2be1fb87a
--- /dev/null
+++ b/releasenotes/notes/metafieldranker-meta-value-type-365ff1bdb412257b.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add a new variable called meta_value_type to the MetaFieldRanker that allows a user to parse the meta value into the data type specified as along as the meta value is a string.
+ The supported values for meta_value_type are '"float"', '"int"', '"date"', or 'None'. If None is passed then no parsing is done.
+ For example, if we specified meta_value_type="date" then for the meta value "date": "2015-02-01" we would parse the string into a datetime object.
| diff --git a/test/components/rankers/test_metafield.py b/test/components/rankers/test_metafield.py
index 1269d3ca83..d729c55b4a 100644
--- a/test/components/rankers/test_metafield.py
+++ b/test/components/rankers/test_metafield.py
@@ -17,12 +17,18 @@ def test_to_dict(self):
"top_k": None,
"ranking_mode": "reciprocal_rank_fusion",
"sort_order": "descending",
+ "meta_value_type": None,
},
}
def test_to_dict_with_custom_init_parameters(self):
component = MetaFieldRanker(
- meta_field="rating", weight=0.5, top_k=5, ranking_mode="linear_score", sort_order="ascending"
+ meta_field="rating",
+ weight=0.5,
+ top_k=5,
+ ranking_mode="linear_score",
+ sort_order="ascending",
+ meta_value_type="date",
)
data = component.to_dict()
assert data == {
@@ -33,6 +39,7 @@ def test_to_dict_with_custom_init_parameters(self):
"top_k": 5,
"ranking_mode": "linear_score",
"sort_order": "ascending",
+ "meta_value_type": "date",
},
}
@@ -82,6 +89,27 @@ def test_sort_order_ascending(self):
sorted_scores = sorted([doc.meta["rating"] for doc in docs_after])
assert [doc.meta["rating"] for doc in docs_after] == sorted_scores
+ def test_meta_value_type_float(self):
+ ranker = MetaFieldRanker(meta_field="rating", weight=1.0, meta_value_type="float")
+ docs_before = [Document(content="abc", meta={"rating": value}) for value in ["1.1", "10.5", "2.3"]]
+ docs_after = ranker.run(documents=docs_before)["documents"]
+ assert len(docs_after) == 3
+ assert [doc.meta["rating"] for doc in docs_after] == ["10.5", "2.3", "1.1"]
+
+ def test_meta_value_type_int(self):
+ ranker = MetaFieldRanker(meta_field="rating", weight=1.0, meta_value_type="int")
+ docs_before = [Document(content="abc", meta={"rating": value}) for value in ["1", "10", "2"]]
+ docs_after = ranker.run(documents=docs_before)["documents"]
+ assert len(docs_after) == 3
+ assert [doc.meta["rating"] for doc in docs_after] == ["10", "2", "1"]
+
+ def test_meta_value_type_date(self):
+ ranker = MetaFieldRanker(meta_field="rating", weight=1.0, meta_value_type="date")
+ docs_before = [Document(content="abc", meta={"rating": value}) for value in ["2022-10", "2023-01", "2022-11"]]
+ docs_after = ranker.run(documents=docs_before)["documents"]
+ assert len(docs_after) == 3
+ assert [doc.meta["rating"] for doc in docs_after] == ["2023-01", "2022-11", "2022-10"]
+
def test_returns_empty_list_if_no_documents_are_provided(self):
ranker = MetaFieldRanker(meta_field="rating")
output = ranker.run(documents=[])
@@ -123,6 +151,36 @@ def test_warning_if_unsortable_values(self, caplog):
assert len(output["documents"]) == 3
assert "Tried to sort Documents with IDs 1,2,3, but got TypeError with the message:" in caplog.text
+ def test_warning_if_meta_value_parsing_error(self, caplog):
+ ranker = MetaFieldRanker(meta_field="rating", meta_value_type="float")
+ docs_before = [
+ Document(id="1", content="abc", meta={"rating": "1.3"}),
+ Document(id="2", content="abc", meta={"rating": "1.2"}),
+ Document(id="3", content="abc", meta={"rating": "not a float"}),
+ ]
+ with caplog.at_level(logging.WARNING):
+ output = ranker.run(documents=docs_before)
+ assert len(output["documents"]) == 3
+ assert (
+ "Tried to parse the meta values of Documents with IDs 1,2,3, but got ValueError with the message:"
+ in caplog.text
+ )
+
+ def test_warning_meta_value_type_not_all_strings(self, caplog):
+ ranker = MetaFieldRanker(meta_field="rating", meta_value_type="float")
+ docs_before = [
+ Document(id="1", content="abc", meta={"rating": "1.3"}),
+ Document(id="2", content="abc", meta={"rating": "1.2"}),
+ Document(id="3", content="abc", meta={"rating": 2.1}),
+ ]
+ with caplog.at_level(logging.WARNING):
+ output = ranker.run(documents=docs_before)
+ assert len(output["documents"]) == 3
+ assert (
+ "The parameter <meta_value_type> is currently set to 'float', but not all of meta values in the provided Documents with IDs 1,2,3 are strings."
+ in caplog.text
+ )
+
def test_raises_value_error_if_wrong_ranking_mode(self):
with pytest.raises(ValueError):
MetaFieldRanker(meta_field="rating", ranking_mode="wrong_mode")
@@ -140,6 +198,10 @@ def test_raises_value_error_if_wrong_sort_order(self):
with pytest.raises(ValueError):
MetaFieldRanker(meta_field="rating", sort_order="wrong_order")
+ def test_raises_value_error_if_wrong_meta_value_type(self):
+ with pytest.raises(ValueError):
+ MetaFieldRanker(meta_field="rating", meta_value_type="wrong_type")
+
def test_linear_score(self):
ranker = MetaFieldRanker(meta_field="rating", ranking_mode="linear_score", weight=0.5)
docs_before = [
| diff --git a/releasenotes/notes/metafieldranker-meta-value-type-365ff1bdb412257b.yaml b/releasenotes/notes/metafieldranker-meta-value-type-365ff1bdb412257b.yaml
new file mode 100644
index 0000000000..f2be1fb87a
--- /dev/null
+++ b/releasenotes/notes/metafieldranker-meta-value-type-365ff1bdb412257b.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add a new variable called meta_value_type to the MetaFieldRanker that allows a user to parse the meta value into the data type specified as along as the meta value is a string.
+ The supported values for meta_value_type are '"float"', '"int"', '"date"', or 'None'. If None is passed then no parsing is done.
+ For example, if we specified meta_value_type="date" then for the meta value "date": "2015-02-01" we would parse the string into a datetime object.
| [
{
"components": [
{
"doc": "Parse the meta values stored under `self.meta_field` for the Documents provided in `docs_with_meta_field`.",
"lines": [
246,
286
],
"name": "MetaFieldRanker._parse_meta",
"signature": "def _parse_meta( self, docs_with_... | [
"test/components/rankers/test_metafield.py::TestMetaFieldRanker::test_to_dict",
"test/components/rankers/test_metafield.py::TestMetaFieldRanker::test_to_dict_with_custom_init_parameters",
"test/components/rankers/test_metafield.py::TestMetaFieldRanker::test_meta_value_type_float",
"test/components/rankers/tes... | [
"[",
"[100%]",
"test/components/rankers/test_metafield.py::TestMetaFieldRanker::test_run[meta_field_values0-2.1]",
"test/components/rankers/test_metafield.py::TestMetaFieldRanker::test_run[meta_field_values1-8]",
"test/components/rankers/test_metafield.py::TestMetaFieldRanker::test_run_with_weight_equal_to_... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat: Meta field ranker add `meta_value_type`
### Related Issues
- resolves the RecentnessRanker in issue https://github.com/deepset-ai/haystack/issues/6673
### Proposed Changes:
<!--- In case of a bug: Describe what caused the issue and how you solved it -->
<!--- In case of a feature: Describe what did you add and how it works -->
Add a new variable called `meta_value_type` that allows a user to parse the meta value into the data type specified if the meta value is a string. For example, if we specified `meta_value_type="date"` then for the meta value `"date": "2015-02-01"` we would parse the string into a datetime object. However, when returning the documents we still return the documents with their original meta data so the parsing is only used for sorting.
The available options are:
- 'float' will parse the meta values into floats.
- 'int' will parse the meta values into integers.
- 'date' will parse the meta values into datetime objects.
- 'None' (default) will do no parsing.
### How did you test it?
<!-- unit tests, integration tests, manual verification, instructions for manual tests -->
- Added unit tests
### Notes for the reviewer
<!-- E.g. point out section where the reviewer -->
### Checklist
- I have read the [contributors guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md) and the [code of conduct](https://github.com/deepset-ai/haystack/blob/main/code_of_conduct.txt)
- I have updated the related issue with new insights and changes
- I added unit tests and updated the docstrings
- I've used one of the [conventional commit types](https://www.conventionalcommits.org/en/v1.0.0/) for my PR title: `fix:`, `feat:`, `build:`, `chore:`, `ci:`, `docs:`, `style:`, `refactor:`, `perf:`, `test:`.
- I documented my code
- I ran [pre-commit hooks](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md#installation) and fixed any issue
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/components/rankers/meta_field.py]
(definition of MetaFieldRanker._parse_meta:)
def _parse_meta( self, docs_with_meta_field: List[Document], meta_value_type: Optional[Literal["float", "int", "date"]] ) -> List[Any]:
"""Parse the meta values stored under `self.meta_field` for the Documents provided in `docs_with_meta_field`."""
[end of new definitions in haystack/components/rankers/meta_field.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 | |
pvlib__pvlib-python-1969 | 1,969 | pvlib/pvlib-python | 0.9 | c4a2b4bdb99216dbf1bf1dd58c7156f25c0e7458 | 2024-02-10T13:37:29Z | diff --git a/ci/requirements-py3.10.yml b/ci/requirements-py3.10.yml
index cc11e4cb8a..617d89c755 100644
--- a/ci/requirements-py3.10.yml
+++ b/ci/requirements-py3.10.yml
@@ -8,8 +8,8 @@ dependencies:
- ephem
- h5py
- numba
- - numpy >= 1.16.0
- - pandas >= 0.25.0
+ - numpy >= 1.17.3
+ - pandas >= 1.3.0
- pip
- pytest
- pytest-cov
diff --git a/ci/requirements-py3.11.yml b/ci/requirements-py3.11.yml
index 5bd43c6df7..2ffdd932bd 100644
--- a/ci/requirements-py3.11.yml
+++ b/ci/requirements-py3.11.yml
@@ -8,8 +8,8 @@ dependencies:
- ephem
- h5py
- numba
- - numpy >= 1.16.0
- - pandas >= 0.25.0
+ - numpy >= 1.17.3
+ - pandas >= 1.3.0
- pip
- pytest
- pytest-cov
diff --git a/ci/requirements-py3.12.yml b/ci/requirements-py3.12.yml
index 156a408f48..250a9344c0 100644
--- a/ci/requirements-py3.12.yml
+++ b/ci/requirements-py3.12.yml
@@ -8,8 +8,8 @@ dependencies:
- ephem
- h5py
- numba
- - numpy >= 1.16.0
- - pandas >= 0.25.0
+ - numpy >= 1.17.3
+ - pandas >= 1.3.0
- pip
- pytest
- pytest-cov
diff --git a/ci/requirements-py3.7-min.yml b/ci/requirements-py3.7-min.yml
index 65dd6fa744..6371d5afb9 100644
--- a/ci/requirements-py3.7-min.yml
+++ b/ci/requirements-py3.7-min.yml
@@ -14,8 +14,8 @@ dependencies:
- pip:
- dataclasses
- h5py==3.1.0
- - numpy==1.16.0
- - pandas==0.25.0
+ - numpy==1.17.3
+ - pandas==1.3.0
- scipy==1.5.0
- pytest-rerunfailures # conda version is >3.6
- pytest-remotedata # conda package is 0.3.0, needs > 0.3.1
diff --git a/ci/requirements-py3.7.yml b/ci/requirements-py3.7.yml
index 49da67f3de..4b175ec532 100644
--- a/ci/requirements-py3.7.yml
+++ b/ci/requirements-py3.7.yml
@@ -8,8 +8,8 @@ dependencies:
- ephem
- h5py
- numba
- - numpy >= 1.16.0
- - pandas >= 0.25.0
+ - numpy >= 1.17.3
+ - pandas >= 1.3.0
- pip
- pytest
- pytest-cov
diff --git a/ci/requirements-py3.8.yml b/ci/requirements-py3.8.yml
index 0f5d63fd4a..814708a911 100644
--- a/ci/requirements-py3.8.yml
+++ b/ci/requirements-py3.8.yml
@@ -8,8 +8,8 @@ dependencies:
- ephem
- h5py
- numba
- - numpy >= 1.16.0
- - pandas >= 0.25.0
+ - numpy >= 1.17.3
+ - pandas >= 1.3.0
- pip
- pytest
- pytest-cov
diff --git a/ci/requirements-py3.9.yml b/ci/requirements-py3.9.yml
index 14151ce47a..24573894b7 100644
--- a/ci/requirements-py3.9.yml
+++ b/ci/requirements-py3.9.yml
@@ -8,8 +8,8 @@ dependencies:
- ephem
- h5py
- numba
- - numpy >= 1.16.0
- - pandas >= 0.25.0
+ - numpy >= 1.17.3
+ - pandas >= 1.3.0
- pip
- pytest
- pytest-cov
diff --git a/docs/sphinx/source/reference/iotools.rst b/docs/sphinx/source/reference/iotools.rst
index 39081220f3..5f405d7536 100644
--- a/docs/sphinx/source/reference/iotools.rst
+++ b/docs/sphinx/source/reference/iotools.rst
@@ -53,6 +53,7 @@ of sources and file formats relevant to solar energy modeling.
iotools.get_solcast_historic
iotools.get_solcast_forecast
iotools.get_solcast_live
+ iotools.get_solargis
A :py:class:`~pvlib.location.Location` object may be created from metadata
diff --git a/docs/sphinx/source/whatsnew.rst b/docs/sphinx/source/whatsnew.rst
index c614c0de98..7fad810f8e 100644
--- a/docs/sphinx/source/whatsnew.rst
+++ b/docs/sphinx/source/whatsnew.rst
@@ -6,6 +6,7 @@ What's New
These are new features and improvements of note in each release.
+.. include:: whatsnew/v0.10.4.rst
.. include:: whatsnew/v0.10.3.rst
.. include:: whatsnew/v0.10.2.rst
.. include:: whatsnew/v0.10.1.rst
diff --git a/docs/sphinx/source/whatsnew/v0.10.4.rst b/docs/sphinx/source/whatsnew/v0.10.4.rst
index 7dbf7d7d6d..216fed8b17 100644
--- a/docs/sphinx/source/whatsnew/v0.10.4.rst
+++ b/docs/sphinx/source/whatsnew/v0.10.4.rst
@@ -8,6 +8,8 @@ v0.10.4 (Anticipated March, 2024)
Enhancements
~~~~~~~~~~~~
* Added the Huld PV model used by PVGIS (:pull:`1940`)
+* Add :py:func:`pvlib.iotools.get_solargis` for retrieving Solargis
+ irradiance data. (:pull:`1969`)
* Added function :py:func:`pvlib.shading.projected_solar_zenith_angle`,
a common calculation in shading and tracking. (:issue:`1734`, :pull:`1904`)
* Added :py:func:`~pvlib.iotools.get_solrad` for fetching irradiance data from
@@ -15,7 +17,6 @@ Enhancements
* Added metadata parsing to :py:func:`~pvlib.iotools.read_solrad` to follow the standard iotools
convention of returning a tuple of (data, meta). Previously the function only returned a dataframe. (:pull:`1968`)
-
Bug fixes
~~~~~~~~~
* Fixed an error in solar position calculations when using
@@ -33,6 +34,7 @@ Bug fixes
``temperature_model_parameters`` are specified on the passed ``system`` instead of on its ``arrays``. (:issue:`1759`).
* :py:func:`pvlib.irradiance.ghi_from_poa_driesse_2023` now correctly makes use
of the ``xtol`` argument. Previously, it was ignored. (:issue:`1970`, :pull:`1971`)
+* Fixed incorrect unit conversion of precipitable water used for the Solcast iotools functions.
* :py:class:`~pvlib.modelchain.ModelChain.infer_temperature_model` now raises a more useful error when
the temperature model cannot be inferred (:issue:`1946`)
@@ -49,6 +51,8 @@ Documentation
Requirements
~~~~~~~~~~~~
+* Minimum version of pandas advanced from 0.25.0 to 1.3.0. (:pull:`1969`)
+* Minimum version of numpy advanced from 1.16.0 to 1.17.3. (:pull:`1969`)
Contributors
@@ -59,5 +63,4 @@ Contributors
* Cliff Hansen (:ghuser:`cwhanse`)
* Roma Koulikov (:ghuser:`matsuobasho`)
* Adam R. Jensen (:ghuser:`AdamRJensen`)
-* Kevin Anderson (:ghuser:`kandersolar`)
* Peter Dudfield (:ghuser:`peterdudfield`)
diff --git a/pvlib/iotools/__init__.py b/pvlib/iotools/__init__.py
index 0fbec16c1f..96259ecc24 100644
--- a/pvlib/iotools/__init__.py
+++ b/pvlib/iotools/__init__.py
@@ -34,3 +34,4 @@
from pvlib.iotools.solcast import get_solcast_live # noqa: F401
from pvlib.iotools.solcast import get_solcast_historic # noqa: F401
from pvlib.iotools.solcast import get_solcast_tmy # noqa: F401
+from pvlib.iotools.solargis import get_solargis # noqa: F401
diff --git a/pvlib/iotools/solargis.py b/pvlib/iotools/solargis.py
new file mode 100644
index 0000000000..375c7ed3e8
--- /dev/null
+++ b/pvlib/iotools/solargis.py
@@ -0,0 +1,214 @@
+"""Functions to retrieve and parse irradiance data from Solargis."""
+
+import pandas as pd
+import requests
+from dataclasses import dataclass
+import io
+
+URL = 'https://solargis.info/ws/rest/datadelivery/request'
+
+
+TIME_RESOLUTION_MAP = {
+ 5: 'MIN_5', 10: 'MIN_10', 15: 'MIN_15', 30: 'MIN_30', 60: 'HOURLY',
+ 'PT05M': 'MIN_5', 'PT5M': 'MIN_5', 'PT10M': 'MIN_10', 'PT15M': 'MIN_15',
+ 'PT30': 'MIN_30', 'PT60M': 'HOURLY', 'PT1H': 'HOURLY', 'P1D': 'DAILY',
+ 'P1M': 'MONTHLY', 'P1Y': 'YEARLY'}
+
+
+@dataclass
+class ParameterMap:
+ solargis_name: str
+ pvlib_name: str
+ conversion: callable = lambda x: x
+
+
+# define the conventions between Solargis and pvlib nomenclature and units
+VARIABLE_MAP = [
+ # Irradiance (unit varies based on time resolution)
+ ParameterMap('GHI', 'ghi'),
+ ParameterMap('GHI_C', 'ghi_clear'), # this is stated in documentation
+ ParameterMap('GHIc', 'ghi_clear'), # this is used in practice
+ ParameterMap('DNI', 'dni'),
+ ParameterMap('DNI_C', 'dni_clear'),
+ ParameterMap('DNIc', 'dni_clear'),
+ ParameterMap('DIF', 'dhi'),
+ ParameterMap('GTI', 'poa_global'),
+ ParameterMap('GTI_C', 'poa_global_clear'),
+ ParameterMap('GTIc', 'poa_global_clear'),
+ # Solar position
+ ParameterMap('SE', 'solar_elevation'),
+ # SA -> solar_azimuth (degrees) (different convention)
+ ParameterMap("SA", "solar_azimuth", lambda x: x + 180),
+ # Weather / atmospheric parameters
+ ParameterMap('TEMP', 'temp_air'),
+ ParameterMap('TD', 'temp_dew'),
+ # surface_pressure (hPa) -> pressure (Pa)
+ ParameterMap('AP', 'pressure', lambda x: x*100),
+ ParameterMap('RH', 'relative_humidity'),
+ ParameterMap('WS', 'wind_speed'),
+ ParameterMap('WD', 'wind_direction'),
+ ParameterMap('INC', 'aoi'), # angle of incidence of direct irradiance
+ # precipitable_water (kg/m2) -> precipitable_water (cm)
+ ParameterMap('PWAT', 'precipitable_water', lambda x: x/10),
+]
+
+METADATA_FIELDS = [
+ 'issued', 'site name', 'latitude', 'longitude', 'elevation',
+ 'summarization type', 'summarization period'
+]
+
+
+# Variables that use "-9" as nan values
+NA_9_COLUMNS = ['GHI', 'GHIc', 'DNI', 'DNIc', 'DIF', 'GTI', 'GIc', 'KT', 'PAR',
+ 'PREC', 'PWAT', 'SDWE', 'SFWE']
+
+
+def get_solargis(latitude, longitude, start, end, variables, api_key,
+ time_resolution, timestamp_type='center', tz='GMT+00',
+ terrain_shading=True, url=URL, map_variables=True,
+ timeout=30):
+ """
+ Retrieve irradiance time series data from Solargis.
+
+ The Solargis [1]_ API is described in [2]_.
+
+ Parameters
+ ----------
+ latitude: float
+ In decimal degrees, between -90 and 90, north is positive (ISO 19115)
+ longitude: float
+ In decimal degrees, between -180 and 180, east is positive (ISO 19115)
+ start : datetime-like
+ Start date of time series.
+ end : datetime-like
+ End date of time series.
+ variables : list
+ List of variables to request, see [2]_ for options.
+ api_key : str
+ API key.
+ time_resolution : str, {'PT05M', 'PT10M', 'PT15M', 'PT30', 'PT1H', 'P1D', 'P1M', 'P1Y'}
+ Time resolution as an integer number of minutes (e.g. 5, 60)
+ or an ISO 8601 duration string (e.g. "PT05M", "PT60M", "P1M").
+ timestamp_type : {'start', 'center', 'end'}, default: 'center'
+ Labeling of time stamps of the return data.
+ tz : str, default : 'GMT+00'
+ Timezone of `start` and `end` in the format "GMT+hh" or "GMT-hh".
+ terrain_shading : boolean, default: True
+ Whether to account for horizon shading.
+ url : str, default : :const:`pvlib.iotools.solargis.URL`
+ Base url of Solargis API.
+ map_variables : boolean, default: True
+ When true, renames columns of the Dataframe to pvlib variable names
+ where applicable. See variable :const:`VARIABLE_MAP`.
+ timeout : int or float, default: 30
+ Time in seconds to wait for server response before timeout
+
+ Returns
+ -------
+ data : DataFrame
+ DataFrame containing time series data.
+ meta : dict
+ Dictionary containing metadata.
+
+ Raises
+ ------
+ requests.HTTPError
+ A message from the Solargis server if the request is rejected
+
+ Notes
+ -----
+ Each XML request is limited to retrieving 31 days of data.
+
+ The variable units depends on the time frequency, e.g., the unit for
+ sub-hourly irradiance data is :math:`W/m^2`, for hourly data it is
+ :math:`Wh/m^2`, and for daily data it is :math:`kWh/m^2`.
+
+ References
+ ----------
+ .. [1] `Solargis <https://solargis.com>`_
+ .. [2] `Solargis API User Guide
+ <https://solargis.atlassian.net/wiki/spaces/public/pages/7602367/Solargis+API+User+Guide>`_
+
+ Examples
+ --------
+ >>> # Retrieve two days of irradiance data from Solargis
+ >>> data, meta = response = pvlib.iotools.get_solargis(
+ >>> latitude=48.61259, longitude=20.827079,
+ >>> start='2022-01-01', end='2022-01-02',
+ >>> variables=['GHI', 'DNI'], time_resolution='PT05M', api_key='demo')
+ """ # noqa: E501
+ # Use pd.to_datetime so that strings (e.g. '2021-01-01') are accepted
+ start = pd.to_datetime(start)
+ end = pd.to_datetime(end)
+
+ headers = {'Content-Type': 'application/xml'}
+
+ # Solargis recommends creating a unique site_id for each location request.
+ # The site_id does not impact the data retrieval and is used for debugging.
+ site_id = f"latitude_{latitude}_longitude_{longitude}"
+
+ request_xml = f'''<ws:dataDeliveryRequest
+ dateFrom="{start.strftime('%Y-%m-%d')}"
+ dateTo="{end.strftime('%Y-%m-%d')}"
+ xmlns="http://geomodel.eu/schema/data/request"
+ xmlns:ws="http://geomodel.eu/schema/ws/data"
+ xmlns:geo="http://geomodel.eu/schema/common/geo"
+ xmlns:pv="http://geomodel.eu/schema/common/pv"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <site id="{site_id}" name="" lat="{latitude}" lng="{longitude}">
+ </site>
+ <processing key="{' '.join(variables)}"
+ summarization="{TIME_RESOLUTION_MAP.get(time_resolution, time_resolution).upper()}"
+ terrainShading="{str(terrain_shading).lower()}">
+ <timestampType>{timestamp_type.upper()}</timestampType>
+ <timeZone>{tz}</timeZone>
+ </processing>
+ </ws:dataDeliveryRequest>''' # noqa: E501
+
+ response = requests.post(url + "?key=" + api_key, headers=headers,
+ data=request_xml.encode('utf8'), timeout=timeout)
+
+ if response.ok is False:
+ raise requests.HTTPError(response.json())
+
+ # Parse metadata
+ header = pd.read_xml(io.StringIO(response.text), parser='etree')
+ meta_lines = header['metadata'].iloc[0].split('#')
+ meta_lines = [line.strip() for line in meta_lines]
+ meta = {}
+ for line in meta_lines:
+ if ':' in line:
+ key = line.split(':')[0].lower()
+ if key in METADATA_FIELDS:
+ meta[key] = ':'.join(line.split(':')[1:])
+ meta['latitude'] = float(meta['latitude'])
+ meta['longitude'] = float(meta['longitude'])
+ meta['altitude'] = float(meta.pop('elevation').replace('m a.s.l.', ''))
+
+ # Parse data
+ data = pd.read_xml(io.StringIO(response.text), xpath='.//doc:row',
+ namespaces={'doc': 'http://geomodel.eu/schema/ws/data'},
+ parser='etree')
+ data.index = pd.to_datetime(data['dateTime'])
+ # when requesting one variable, it is necessary to convert dataframe to str
+ data = data['values'].astype(str).str.split(' ', expand=True)
+ data = data.astype(float)
+ data.columns = header['columns'].iloc[0].split()
+
+ # Replace "-9" with nan values for specific columns
+ for variable in data.columns:
+ if variable in NA_9_COLUMNS:
+ data[variable] = data[variable].replace(-9, pd.NA)
+
+ # rename and convert variables
+ if map_variables:
+ for variable in VARIABLE_MAP:
+ if variable.solargis_name in data.columns:
+ data.rename(
+ columns={variable.solargis_name: variable.pvlib_name},
+ inplace=True
+ )
+ data[variable.pvlib_name] = data[
+ variable.pvlib_name].apply(variable.conversion)
+
+ return data, meta
diff --git a/pvlib/iotools/solcast.py b/pvlib/iotools/solcast.py
index 4fcee40050..5abd9c724e 100644
--- a/pvlib/iotools/solcast.py
+++ b/pvlib/iotools/solcast.py
@@ -35,7 +35,7 @@ class ParameterMap:
"azimuth", "solar_azimuth", lambda x: -x % 360
),
# precipitable_water (kg/m2) -> precipitable_water (cm)
- ParameterMap("precipitable_water", "precipitable_water", lambda x: x*10),
+ ParameterMap("precipitable_water", "precipitable_water", lambda x: x/10),
# zenith -> solar_zenith
ParameterMap("zenith", "solar_zenith"),
# clearsky
diff --git a/pyproject.toml b/pyproject.toml
index 75970c9f92..0053f0e568 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -11,8 +11,8 @@ authors = [
]
requires-python = ">=3.7"
dependencies = [
- 'numpy >= 1.16.0',
- 'pandas >= 0.25.0',
+ 'numpy >= 1.17.3',
+ 'pandas >= 1.3.0',
'pytz',
'requests',
'scipy >= 1.5.0',
| diff --git a/pvlib/tests/iotools/test_solargis.py b/pvlib/tests/iotools/test_solargis.py
new file mode 100644
index 0000000000..55882e91c5
--- /dev/null
+++ b/pvlib/tests/iotools/test_solargis.py
@@ -0,0 +1,68 @@
+import pandas as pd
+import pytest
+import pvlib
+import requests
+from ..conftest import (RERUNS, RERUNS_DELAY, assert_frame_equal,
+ assert_index_equal)
+
+
+@pytest.fixture
+def hourly_index():
+ hourly_index = pd.date_range(start='2022-01-01 00:30+01:00', freq='60min',
+ periods=24, name='dateTime')
+ hourly_index.freq = None
+ return hourly_index
+
+
+@pytest.fixture
+def hourly_index_start_utc():
+ hourly_index_left_utc = pd.date_range(
+ start='2023-01-01 00:00+00:00', freq='30min', periods=24*2,
+ name='dateTime')
+ hourly_index_left_utc.freq = None
+ return hourly_index_left_utc
+
+
+@pytest.fixture
+def hourly_dataframe(hourly_index):
+ ghi = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 73.0, 152.0, 141.0, 105.0,
+ 62.0, 65.0, 62.0, 11.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+ dni = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 233.0, 301.0, 136.0, 32.0,
+ 0.0, 3.0, 77.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+ return pd.DataFrame(data={'ghi': ghi, 'dni': dni}, index=hourly_index)
+
+
+@pytest.mark.remote_data
+@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
+def test_get_solargis(hourly_dataframe):
+ data, meta = pvlib.iotools.get_solargis(
+ latitude=48.61259, longitude=20.827079,
+ start='2022-01-01', end='2022-01-01',
+ tz='GMT+01', variables=['GHI', 'DNI'],
+ time_resolution='HOURLY', api_key='demo')
+ assert_frame_equal(data, hourly_dataframe)
+
+
+@pytest.mark.remote_data
+@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
+def test_get_solargis_utc_start_timestamp(hourly_index_start_utc):
+ data, meta = pvlib.iotools.get_solargis(
+ latitude=48.61259, longitude=20.827079,
+ start='2023-01-01', end='2023-01-01',
+ variables=['GTI'],
+ timestamp_type='start',
+ time_resolution='MIN_30',
+ map_variables=False, api_key='demo')
+ assert 'GTI' in data.columns # assert that variables aren't mapped
+ assert_index_equal(data.index, hourly_index_start_utc)
+
+
+@pytest.mark.remote_data
+@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
+def test_get_solargis_http_error():
+ # Test if HTTPError is raised if date outside range is specified
+ with pytest.raises(requests.HTTPError, match="data coverage"):
+ _, _ = pvlib.iotools.get_solargis(
+ latitude=48.61259, longitude=20.827079,
+ start='1920-01-01', end='1920-01-01', # date outside range
+ variables=['GHI', 'DNI'], time_resolution='HOURLY', api_key='demo')
diff --git a/pvlib/tests/iotools/test_solcast.py b/pvlib/tests/iotools/test_solcast.py
index 19b00b8611..3879d88b20 100644
--- a/pvlib/tests/iotools/test_solcast.py
+++ b/pvlib/tests/iotools/test_solcast.py
@@ -174,9 +174,9 @@ def test_get_solcast_tmy(
),
pd.DataFrame(
[[9.4200e+02, 8.4300e+02, 1.0174e+05, 3.0000e+01, 7.8000e+00,
- 3.1600e+02, 1.0100e+03, 2.0000e+00, 4.6000e+00, 1.6400e+02, 90],
+ 3.1600e+02, 1.0100e+03, 2.0000e+00, 4.6000e+00, 1.6400e+00, 90],
[9.3600e+02, 8.3200e+02, 1.0179e+05, 3.0000e+01, 7.9000e+00,
- 3.1600e+02, 9.9600e+02, 1.4000e+01, 4.5000e+00, 1.6300e+02, 0]],
+ 3.1600e+02, 9.9600e+02, 1.4000e+01, 4.5000e+00, 1.6300e+00, 0]],
columns=[
'dni', 'ghi', 'pressure', 'temp_air', 'wind_speed',
'wind_direction', 'poa_global', 'solar_azimuth',
| diff --git a/ci/requirements-py3.10.yml b/ci/requirements-py3.10.yml
index cc11e4cb8a..617d89c755 100644
--- a/ci/requirements-py3.10.yml
+++ b/ci/requirements-py3.10.yml
@@ -8,8 +8,8 @@ dependencies:
- ephem
- h5py
- numba
- - numpy >= 1.16.0
- - pandas >= 0.25.0
+ - numpy >= 1.17.3
+ - pandas >= 1.3.0
- pip
- pytest
- pytest-cov
diff --git a/ci/requirements-py3.11.yml b/ci/requirements-py3.11.yml
index 5bd43c6df7..2ffdd932bd 100644
--- a/ci/requirements-py3.11.yml
+++ b/ci/requirements-py3.11.yml
@@ -8,8 +8,8 @@ dependencies:
- ephem
- h5py
- numba
- - numpy >= 1.16.0
- - pandas >= 0.25.0
+ - numpy >= 1.17.3
+ - pandas >= 1.3.0
- pip
- pytest
- pytest-cov
diff --git a/ci/requirements-py3.12.yml b/ci/requirements-py3.12.yml
index 156a408f48..250a9344c0 100644
--- a/ci/requirements-py3.12.yml
+++ b/ci/requirements-py3.12.yml
@@ -8,8 +8,8 @@ dependencies:
- ephem
- h5py
- numba
- - numpy >= 1.16.0
- - pandas >= 0.25.0
+ - numpy >= 1.17.3
+ - pandas >= 1.3.0
- pip
- pytest
- pytest-cov
diff --git a/ci/requirements-py3.7-min.yml b/ci/requirements-py3.7-min.yml
index 65dd6fa744..6371d5afb9 100644
--- a/ci/requirements-py3.7-min.yml
+++ b/ci/requirements-py3.7-min.yml
@@ -14,8 +14,8 @@ dependencies:
- pip:
- dataclasses
- h5py==3.1.0
- - numpy==1.16.0
- - pandas==0.25.0
+ - numpy==1.17.3
+ - pandas==1.3.0
- scipy==1.5.0
- pytest-rerunfailures # conda version is >3.6
- pytest-remotedata # conda package is 0.3.0, needs > 0.3.1
diff --git a/ci/requirements-py3.7.yml b/ci/requirements-py3.7.yml
index 49da67f3de..4b175ec532 100644
--- a/ci/requirements-py3.7.yml
+++ b/ci/requirements-py3.7.yml
@@ -8,8 +8,8 @@ dependencies:
- ephem
- h5py
- numba
- - numpy >= 1.16.0
- - pandas >= 0.25.0
+ - numpy >= 1.17.3
+ - pandas >= 1.3.0
- pip
- pytest
- pytest-cov
diff --git a/ci/requirements-py3.8.yml b/ci/requirements-py3.8.yml
index 0f5d63fd4a..814708a911 100644
--- a/ci/requirements-py3.8.yml
+++ b/ci/requirements-py3.8.yml
@@ -8,8 +8,8 @@ dependencies:
- ephem
- h5py
- numba
- - numpy >= 1.16.0
- - pandas >= 0.25.0
+ - numpy >= 1.17.3
+ - pandas >= 1.3.0
- pip
- pytest
- pytest-cov
diff --git a/ci/requirements-py3.9.yml b/ci/requirements-py3.9.yml
index 14151ce47a..24573894b7 100644
--- a/ci/requirements-py3.9.yml
+++ b/ci/requirements-py3.9.yml
@@ -8,8 +8,8 @@ dependencies:
- ephem
- h5py
- numba
- - numpy >= 1.16.0
- - pandas >= 0.25.0
+ - numpy >= 1.17.3
+ - pandas >= 1.3.0
- pip
- pytest
- pytest-cov
diff --git a/docs/sphinx/source/reference/iotools.rst b/docs/sphinx/source/reference/iotools.rst
index 39081220f3..5f405d7536 100644
--- a/docs/sphinx/source/reference/iotools.rst
+++ b/docs/sphinx/source/reference/iotools.rst
@@ -53,6 +53,7 @@ of sources and file formats relevant to solar energy modeling.
iotools.get_solcast_historic
iotools.get_solcast_forecast
iotools.get_solcast_live
+ iotools.get_solargis
A :py:class:`~pvlib.location.Location` object may be created from metadata
diff --git a/docs/sphinx/source/whatsnew.rst b/docs/sphinx/source/whatsnew.rst
index c614c0de98..7fad810f8e 100644
--- a/docs/sphinx/source/whatsnew.rst
+++ b/docs/sphinx/source/whatsnew.rst
@@ -6,6 +6,7 @@ What's New
These are new features and improvements of note in each release.
+.. include:: whatsnew/v0.10.4.rst
.. include:: whatsnew/v0.10.3.rst
.. include:: whatsnew/v0.10.2.rst
.. include:: whatsnew/v0.10.1.rst
diff --git a/docs/sphinx/source/whatsnew/v0.10.4.rst b/docs/sphinx/source/whatsnew/v0.10.4.rst
index 7dbf7d7d6d..216fed8b17 100644
--- a/docs/sphinx/source/whatsnew/v0.10.4.rst
+++ b/docs/sphinx/source/whatsnew/v0.10.4.rst
@@ -8,6 +8,8 @@ v0.10.4 (Anticipated March, 2024)
Enhancements
~~~~~~~~~~~~
* Added the Huld PV model used by PVGIS (:pull:`1940`)
+* Add :py:func:`pvlib.iotools.get_solargis` for retrieving Solargis
+ irradiance data. (:pull:`1969`)
* Added function :py:func:`pvlib.shading.projected_solar_zenith_angle`,
a common calculation in shading and tracking. (:issue:`1734`, :pull:`1904`)
* Added :py:func:`~pvlib.iotools.get_solrad` for fetching irradiance data from
@@ -15,7 +17,6 @@ Enhancements
* Added metadata parsing to :py:func:`~pvlib.iotools.read_solrad` to follow the standard iotools
convention of returning a tuple of (data, meta). Previously the function only returned a dataframe. (:pull:`1968`)
-
Bug fixes
~~~~~~~~~
* Fixed an error in solar position calculations when using
@@ -33,6 +34,7 @@ Bug fixes
``temperature_model_parameters`` are specified on the passed ``system`` instead of on its ``arrays``. (:issue:`1759`).
* :py:func:`pvlib.irradiance.ghi_from_poa_driesse_2023` now correctly makes use
of the ``xtol`` argument. Previously, it was ignored. (:issue:`1970`, :pull:`1971`)
+* Fixed incorrect unit conversion of precipitable water used for the Solcast iotools functions.
* :py:class:`~pvlib.modelchain.ModelChain.infer_temperature_model` now raises a more useful error when
the temperature model cannot be inferred (:issue:`1946`)
@@ -49,6 +51,8 @@ Documentation
Requirements
~~~~~~~~~~~~
+* Minimum version of pandas advanced from 0.25.0 to 1.3.0. (:pull:`1969`)
+* Minimum version of numpy advanced from 1.16.0 to 1.17.3. (:pull:`1969`)
Contributors
@@ -59,5 +63,4 @@ Contributors
* Cliff Hansen (:ghuser:`cwhanse`)
* Roma Koulikov (:ghuser:`matsuobasho`)
* Adam R. Jensen (:ghuser:`AdamRJensen`)
-* Kevin Anderson (:ghuser:`kandersolar`)
* Peter Dudfield (:ghuser:`peterdudfield`)
diff --git a/pyproject.toml b/pyproject.toml
index 75970c9f92..0053f0e568 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -11,8 +11,8 @@ authors = [
]
requires-python = ">=3.7"
dependencies = [
- 'numpy >= 1.16.0',
- 'pandas >= 0.25.0',
+ 'numpy >= 1.17.3',
+ 'pandas >= 1.3.0',
'pytz',
'requests',
'scipy >= 1.5.0',
| [
{
"components": [
{
"doc": "",
"lines": [
19,
22
],
"name": "ParameterMap",
"signature": "class ParameterMap:",
"type": "class"
},
{
"doc": "Retrieve irradiance time series data from Solargis.\n\nThe Solargis [1]_ API ... | [
"pvlib/tests/iotools/test_solcast.py::test_solcast2pvlib[in_df0-out_df0]"
] | [
"pvlib/tests/iotools/test_solcast.py::test__get_solcast[live/radiation_and_weather-params0-1234-json_response0]",
"pvlib/tests/iotools/test_solcast.py::test_get_solcast_live[live/radiation_and_weather-get_solcast_live-params0-json_response0-True]",
"pvlib/tests/iotools/test_solcast.py::test_get_solcast_live[liv... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add get_solargis iotools function
<!-- Thank you for your contribution! The following items must be addressed before the code can be merged. Please don't hesitate to ask for help if you're unsure of how to accomplish any of the items. Feel free to remove checklist items that are not relevant to your change. -->
- ~~[ ] Closes #xxxx~~
- [x] I am familiar with the [contributing guidelines](https://pvlib-python.readthedocs.io/en/latest/contributing.html)
- [x] Tests added
- [x] Updates entries in [`docs/sphinx/source/reference`](https://github.com/pvlib/pvlib-python/blob/main/docs/sphinx/source/reference) for API changes.
- [x] Adds description and name entries in the appropriate "what's new" file in [`docs/sphinx/source/whatsnew`](https://github.com/pvlib/pvlib-python/tree/main/docs/sphinx/source/whatsnew) for all changes. Includes link to the GitHub Issue with `` :issue:`num` `` or this Pull Request with `` :pull:`num` ``. Includes contributor name and/or GitHub username (link with `` :ghuser:`user` ``).
- [x] New code is fully documented. Includes [numpydoc](https://numpydoc.readthedocs.io/en/latest/format.html) compliant docstrings, examples, and comments where necessary.
- [x] Pull request is nearly complete and ready for detailed review.
- [x] Maintainer: Appropriate GitHub Labels (including `remote-data`) and Milestone are assigned to the Pull Request and linked Issue.
<!-- Brief description of the problem and proposed solution (if not already fully described in the issue linked to above): -->
This PR adds a fetching function for Solargis irradiance data. Given that such functions for SolarAnywhere and Solcast has been added in the previous release, it seems fitting to also have it for Solargis.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in pvlib/iotools/solargis.py]
(definition of ParameterMap:)
class ParameterMap:
(definition of get_solargis:)
def get_solargis(latitude, longitude, start, end, variables, api_key, time_resolution, timestamp_type='center', tz='GMT+00', terrain_shading=True, url=URL, map_variables=True, timeout=30):
"""Retrieve irradiance time series data from Solargis.
The Solargis [1]_ API is described in [2]_.
Parameters
----------
latitude: float
In decimal degrees, between -90 and 90, north is positive (ISO 19115)
longitude: float
In decimal degrees, between -180 and 180, east is positive (ISO 19115)
start : datetime-like
Start date of time series.
end : datetime-like
End date of time series.
variables : list
List of variables to request, see [2]_ for options.
api_key : str
API key.
time_resolution : str, {'PT05M', 'PT10M', 'PT15M', 'PT30', 'PT1H', 'P1D', 'P1M', 'P1Y'}
Time resolution as an integer number of minutes (e.g. 5, 60)
or an ISO 8601 duration string (e.g. "PT05M", "PT60M", "P1M").
timestamp_type : {'start', 'center', 'end'}, default: 'center'
Labeling of time stamps of the return data.
tz : str, default : 'GMT+00'
Timezone of `start` and `end` in the format "GMT+hh" or "GMT-hh".
terrain_shading : boolean, default: True
Whether to account for horizon shading.
url : str, default : :const:`pvlib.iotools.solargis.URL`
Base url of Solargis API.
map_variables : boolean, default: True
When true, renames columns of the Dataframe to pvlib variable names
where applicable. See variable :const:`VARIABLE_MAP`.
timeout : int or float, default: 30
Time in seconds to wait for server response before timeout
Returns
-------
data : DataFrame
DataFrame containing time series data.
meta : dict
Dictionary containing metadata.
Raises
------
requests.HTTPError
A message from the Solargis server if the request is rejected
Notes
-----
Each XML request is limited to retrieving 31 days of data.
The variable units depends on the time frequency, e.g., the unit for
sub-hourly irradiance data is :math:`W/m^2`, for hourly data it is
:math:`Wh/m^2`, and for daily data it is :math:`kWh/m^2`.
References
----------
.. [1] `Solargis <https://solargis.com>`_
.. [2] `Solargis API User Guide
<https://solargis.atlassian.net/wiki/spaces/public/pages/7602367/Solargis+API+User+Guide>`_
Examples
--------
>>> # Retrieve two days of irradiance data from Solargis
>>> data, meta = response = pvlib.iotools.get_solargis(
>>> latitude=48.61259, longitude=20.827079,
>>> start='2022-01-01', end='2022-01-02',
>>> variables=['GHI', 'DNI'], time_resolution='PT05M', api_key='demo')"""
[end of new definitions in pvlib/iotools/solargis.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | d53f97e984bfdd268aa92f8bf482ced0edda0110 | |
deepset-ai__haystack-6936 | 6,936 | deepset-ai/haystack | null | 74683fe74d400820a442cca03bb69473824e841a | 2024-02-08T07:50:04Z | diff --git a/haystack/components/converters/__init__.py b/haystack/components/converters/__init__.py
index 24bbdf9c31..61697488ae 100644
--- a/haystack/components/converters/__init__.py
+++ b/haystack/components/converters/__init__.py
@@ -5,6 +5,7 @@
from haystack.components.converters.html import HTMLToDocument
from haystack.components.converters.markdown import MarkdownToDocument
from haystack.components.converters.openapi_functions import OpenAPIServiceToFunctions
+from haystack.components.converters.output_adapter import OutputAdapter
__all__ = [
"TextFileToDocument",
@@ -14,4 +15,5 @@
"HTMLToDocument",
"MarkdownToDocument",
"OpenAPIServiceToFunctions",
+ "OutputAdapter",
]
diff --git a/haystack/components/converters/output_adapter.py b/haystack/components/converters/output_adapter.py
new file mode 100644
index 0000000000..f6ed4c9ca7
--- /dev/null
+++ b/haystack/components/converters/output_adapter.py
@@ -0,0 +1,146 @@
+from typing import Optional, Dict, Any, Set, Callable
+
+import jinja2.runtime
+from jinja2 import TemplateSyntaxError, meta
+from jinja2.nativetypes import NativeEnvironment
+from typing_extensions import TypeAlias
+
+from haystack import component, default_to_dict, default_from_dict
+from haystack.utils.type_serialization import serialize_type, deserialize_type
+
+
+class OutputAdaptationException(Exception):
+ """Exception raised when there is an error during output adaptation."""
+
+
+@component
+class OutputAdapter:
+ """
+ OutputAdapter in Haystack 2.x pipelines is designed to adapt the output of one component
+ to be compatible with the input of another component using Jinja2 template expressions.
+
+ The component configuration requires specifying the adaptation rules. Each rule comprises:
+ - 'template': A Jinja2 template string that defines how to adapt the input data.
+ - 'output_type': The type of the output data (e.g., str, List[int]).
+ - 'custom_filters': A dictionary of custom Jinja2 filters to be used in the template.
+
+ Example configuration:
+
+ ```python
+ from haystack.components.converters import OutputAdapter
+ adapter = OutputAdapter(template="{{ documents[0].content }}", output_type=str)
+
+ input_data = {"documents": [{"content": "Test content"}]}
+ expected_output = {"output": "Test content"}
+
+ assert adapter.run(**input_data) == expected_output
+ ```
+
+ In the pipeline setup, the adapter is placed between components that require output/input adaptation.
+ The name under which the adapted value is published is `output`. Use this name to connect the OutputAdapter
+ to downstream components in the pipeline.
+
+ Example pipeline setup:
+
+ ```python
+ from haystack import Pipeline, component
+ from haystack.components.converters import OutputAdapter
+
+ @component
+ class DocumentProducer:
+ @component.output_types(documents=dict)
+ def run(self):
+ return {"documents": [{"content": '{"framework": "Haystack"}'}]}
+
+ pipe = Pipeline()
+ pipe.add_component(
+ name="output_adapter",
+ instance=OutputAdapter(template="{{ documents[0].content | json_loads}}", output_type=str),
+ )
+ pipe.add_component(name="document_producer", instance=DocumentProducer())
+ pipe.connect("document_producer", "output_adapter")
+ result = pipe.run(data={})
+ assert result["output_adapter"]["output"] == {"framework": "Haystack"}
+ ```
+ """
+
+ def __init__(self, template: str, output_type: TypeAlias, custom_filters: Optional[Dict[str, Callable]] = None):
+ """
+ Initializes the OutputAdapter with a set of adaptation rules.
+ :param template: A Jinja2 template string that defines how to adapt the output data to the input of the
+ downstream component.
+ :param output_type: The type of the output data (e.g., str, List[int]).
+ :param custom_filters: A dictionary of custom Jinja2 filters to be used in the template.
+ """
+ self.custom_filters = {**(custom_filters or {})}
+ input_types: Set[str] = set()
+
+ # Create a Jinja native environment, we need it to:
+ # a) add custom filters to the environment for filter compilation stage
+ env = NativeEnvironment()
+ try:
+ env.parse(template) # Validate template syntax
+ self.template = template
+ except TemplateSyntaxError as e:
+ raise ValueError(f"Invalid Jinja template '{template}': {e}") from e
+
+ for name, filter_func in self.custom_filters.items():
+ env.filters[name] = filter_func
+
+ # b) extract variables in the template
+ route_input_names = self._extract_variables(env)
+ input_types.update(route_input_names)
+
+ # the env is not needed, discarded automatically
+ component.set_input_types(self, **{var: Any for var in input_types})
+ component.set_output_types(self, **{"output": output_type})
+ self.output_type = output_type
+
+ def run(self, **kwargs):
+ """
+ Executes the output adaptation logic by applying the specified Jinja template expressions
+ to adapt the incoming data to a format suitable for downstream components.
+
+ :param kwargs: A dictionary containing the pipeline variables, which are inputs to the adaptation templates.
+ :return: A dictionary containing the adapted outputs, based on the adaptation rules.
+ :raises OutputAdaptationException: If there's an error during the adaptation process.
+ """
+ # check if kwargs are empty
+ if not kwargs:
+ raise ValueError("No input data provided for output adaptation")
+ env = NativeEnvironment()
+ for name, filter_func in self.custom_filters.items():
+ env.filters[name] = filter_func
+ adapted_outputs = {}
+ try:
+ adapted_output_template = env.from_string(self.template)
+ output_result = adapted_output_template.render(**kwargs)
+ if isinstance(output_result, jinja2.runtime.Undefined):
+ raise OutputAdaptationException(f"Undefined variable in the template {self.template}; kwargs: {kwargs}")
+
+ adapted_outputs["output"] = output_result
+ except Exception as e:
+ raise OutputAdaptationException(f"Error adapting {self.template} with {kwargs}: {e}") from e
+ return adapted_outputs
+
+ def to_dict(self) -> Dict[str, Any]:
+ # todo should we serialize the custom filters? And if so, can we do the same as for callback handlers?
+ return default_to_dict(self, template=self.template, output_type=serialize_type(self.output_type))
+
+ @classmethod
+ def from_dict(cls, data: Dict[str, Any]) -> "OutputAdapter":
+ init_params = data.get("init_parameters", {})
+ init_params["output_type"] = deserialize_type(init_params["output_type"])
+ return default_from_dict(cls, data)
+
+ def _extract_variables(self, env: NativeEnvironment) -> Set[str]:
+ """
+ Extracts all variables from a list of Jinja template strings.
+
+ :param env: A Jinja native environment.
+ :return: A set of variable names extracted from the template strings.
+ """
+ variables = set()
+ ast = env.parse(self.template)
+ variables.update(meta.find_undeclared_variables(ast))
+ return variables
diff --git a/releasenotes/notes/add-output-adapter-5fab4cfcb0218925.yaml b/releasenotes/notes/add-output-adapter-5fab4cfcb0218925.yaml
new file mode 100644
index 0000000000..fba2fdcce3
--- /dev/null
+++ b/releasenotes/notes/add-output-adapter-5fab4cfcb0218925.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Introducing the OutputAdapter component which enables seamless data flow between pipeline components by adapting the output of one component to match the expected input of another using Jinja2 template expressions. This addition opens the door to greater flexibility in pipeline configurations, facilitating custom adaptation rules and exemplifying a structured approach to inter-component communication.
| diff --git a/test/components/converters/test_output_adapter.py b/test/components/converters/test_output_adapter.py
new file mode 100644
index 0000000000..795cd30d97
--- /dev/null
+++ b/test/components/converters/test_output_adapter.py
@@ -0,0 +1,107 @@
+import json
+
+import pytest
+
+from haystack import Pipeline, component
+from haystack.components.converters import OutputAdapter
+from haystack.components.converters.output_adapter import OutputAdaptationException
+
+
+class TestOutputAdapter:
+ # OutputAdapter can be initialized with a valid Jinja2 template string and output type.
+ def test_initialized_with_valid_template_and_output_type(self):
+ template = "{{ documents[0].content }}"
+ output_type = str
+ adapter = OutputAdapter(template="{{ documents[0].content }}", output_type=str)
+
+ assert adapter.template == template
+ assert adapter.__haystack_output__.output.name == "output"
+ assert adapter.__haystack_output__.output.type == output_type
+
+ # OutputAdapter can adapt the output of one component to be compatible with the input of another
+ # component using Jinja2 template expressions.
+ def test_output_adaptation(self):
+ adapter = OutputAdapter(template="{{ documents[0].content }}", output_type=str)
+
+ input_data = {"documents": [{"content": "Test content"}]}
+ expected_output = {"output": "Test content"}
+
+ assert adapter.run(**input_data) == expected_output
+
+ # OutputAdapter can add filter 'json_loads' and use it
+ def test_predefined_filters(self):
+ adapter = OutputAdapter(
+ template="{{ documents[0].content|json_loads }}",
+ output_type=dict,
+ custom_filters={"json_loads": lambda s: json.loads(str(s))},
+ )
+
+ input_data = {"documents": [{"content": '{"key": "value"}'}]}
+ expected_output = {"output": {"key": "value"}}
+
+ assert adapter.run(**input_data) == expected_output
+
+ # OutputAdapter can handle custom filters provided in the component configuration.
+ def test_custom_filters(self):
+ def custom_filter(value):
+ return value.upper()
+
+ custom_filters = {"custom_filter": custom_filter}
+ adapter = OutputAdapter(
+ template="{{ documents[0].content|custom_filter }}", output_type=str, custom_filters=custom_filters
+ )
+
+ input_data = {"documents": [{"content": "test content"}]}
+ expected_output = {"output": "TEST CONTENT"}
+
+ assert adapter.run(**input_data) == expected_output
+
+ # OutputAdapter raises an exception on init if the Jinja2 template string is invalid.
+ def test_invalid_template_string(self):
+ with pytest.raises(ValueError):
+ OutputAdapter(template="{{ documents[0].content }", output_type=str)
+
+ # OutputAdapter raises an exception if no input data is provided for output adaptation.
+ def test_no_input_data_provided(self):
+ adapter = OutputAdapter(template="{{ documents[0].content }}", output_type=str)
+ with pytest.raises(ValueError):
+ adapter.run()
+
+ # OutputAdapter raises an exception if there's an error during the adaptation process.
+ def test_error_during_adaptation(self):
+ adapter = OutputAdapter(template="{{ documents[0].content }}", output_type=str)
+ input_data = {"documents": [{"title": "Test title"}]}
+
+ with pytest.raises(OutputAdaptationException):
+ adapter.run(**input_data)
+
+ # OutputAdapter can be serialized to a dictionary and deserialized back to an OutputAdapter instance.
+ def test_sede(self):
+ adapter = OutputAdapter(template="{{ documents[0].content }}", output_type=str)
+ adapter_dict = adapter.to_dict()
+ deserialized_adapter = OutputAdapter.from_dict(adapter_dict)
+
+ assert adapter.template == deserialized_adapter.template
+ assert adapter.output_type == deserialized_adapter.output_type
+
+ def test_output_adapter_in_pipeline(self):
+ @component
+ class DocumentProducer:
+ @component.output_types(documents=dict)
+ def run(self):
+ return {"documents": [{"content": '{"framework": "Haystack"}'}]}
+
+ pipe = Pipeline()
+ pipe.add_component(
+ name="output_adapter",
+ instance=OutputAdapter(
+ template="{{ documents[0].content | json_loads}}",
+ output_type=str,
+ custom_filters={"json_loads": lambda s: json.loads(str(s))},
+ ),
+ )
+ pipe.add_component(name="document_producer", instance=DocumentProducer())
+ pipe.connect("document_producer", "output_adapter")
+ result = pipe.run(data={})
+ assert result
+ assert result["output_adapter"]["output"] == {"framework": "Haystack"}
| diff --git a/releasenotes/notes/add-output-adapter-5fab4cfcb0218925.yaml b/releasenotes/notes/add-output-adapter-5fab4cfcb0218925.yaml
new file mode 100644
index 0000000000..fba2fdcce3
--- /dev/null
+++ b/releasenotes/notes/add-output-adapter-5fab4cfcb0218925.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Introducing the OutputAdapter component which enables seamless data flow between pipeline components by adapting the output of one component to match the expected input of another using Jinja2 template expressions. This addition opens the door to greater flexibility in pipeline configurations, facilitating custom adaptation rules and exemplifying a structured approach to inter-component communication.
| [
{
"components": [
{
"doc": "Exception raised when there is an error during output adaptation.",
"lines": [
12,
13
],
"name": "OutputAdaptationException",
"signature": "class OutputAdaptationException(Exception):",
"type": "class"
},... | [
"test/components/converters/test_output_adapter.py::TestOutputAdapter::test_initialized_with_valid_template_and_output_type",
"test/components/converters/test_output_adapter.py::TestOutputAdapter::test_output_adaptation",
"test/components/converters/test_output_adapter.py::TestOutputAdapter::test_predefined_fil... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat: Add OutputAdapter
Part of https://github.com/deepset-ai/haystack/issues/6938
### Why:
Introduces a new `OutputAdapter` component to facilitate ease of the data flow between components within a pipeline. Its purpose is to transform the output from one component to align with the expected input of a subsequent component. This capability is particularly valuable when integrating components with mismatched interfaces, enhancing the pipeline design's modularity and flexibility.
- partially fixes https://github.com/deepset-ai/haystack/issues/6923
### What:
- Added `OutputAdapter` to `haystack/components/converters/__init__.py` to make it available as a component.
- Created a new file `output_adapter.py` implementing the `OutputAdapter` class, which includes adaptation logic utilizing Jinja2 template expressions.
- Introduced tests in `test_output_adapter.py` to verify functionality, error handling, and serialization/deserialization of the `OutputAdapter`.
### How can it be used:
- To adapt data between pipeline components without manual transformation, use the `OutputAdapter` where necessary. For example:
```python
from haystack.components.converters import OutputAdapter
adapter = OutputAdapter(template="{{ documents[0].content }}", output_type=str)
```
- This class can use predefined and custom Jinja2 filters to transform the data:
```python
# Custom filter usage example
def uppercase(value):
return value.upper()
adapter = OutputAdapter(template="{{ documents[0].content|uppercase }}", output_type=str, custom_filters={"uppercase": uppercase})
```
- In Haystack pipelines:
```python
@component
class DocumentProducer:
@component.output_types(documents=dict)
def run(self):
return {"documents": [{"content": '{"framework": "Haystack"}'}]}
pipe = Pipeline()
pipe.add_component(
name="output_adapter",
instance=OutputAdapter(template="{{ documents[0].content | json_loads}}", output_type=str),
)
pipe.add_component(name="document_producer", instance=DocumentProducer())
pipe.connect("document_producer", "output_adapter")
result = pipe.run(data={})
assert result
assert result["output_adapter"]["output"] == {"framework": "Haystack"}
```
### How did you test it:
- The PR includes unit tests validating template initialization, output adaptation, filter application, and exception handling.
- The tests cover scenarios like using valid and invalid templates, handling empty input data, leveraging predefined and custom filters, and serialization/deserialization of the component.
- The tests ensure that the `OutputAdapter` behaves as expected in isolation and when placed within a pipeline.
### Notes for the reviewer:
- Is the namespace for `OutputAdapter` in converters? Do you have any other suggestions?
- Should we allow custom filters addition?
- Review the templates and filters implemented for adaptability and evaluate if any additional edge cases need to be covered.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/components/converters/output_adapter.py]
(definition of OutputAdaptationException:)
class OutputAdaptationException(Exception):
"""Exception raised when there is an error during output adaptation."""
(definition of OutputAdapter:)
class OutputAdapter:
"""OutputAdapter in Haystack 2.x pipelines is designed to adapt the output of one component
to be compatible with the input of another component using Jinja2 template expressions.
The component configuration requires specifying the adaptation rules. Each rule comprises:
- 'template': A Jinja2 template string that defines how to adapt the input data.
- 'output_type': The type of the output data (e.g., str, List[int]).
- 'custom_filters': A dictionary of custom Jinja2 filters to be used in the template.
Example configuration:
```python
from haystack.components.converters import OutputAdapter
adapter = OutputAdapter(template="{{ documents[0].content }}", output_type=str)
input_data = {"documents": [{"content": "Test content"}]}
expected_output = {"output": "Test content"}
assert adapter.run(**input_data) == expected_output
```
In the pipeline setup, the adapter is placed between components that require output/input adaptation.
The name under which the adapted value is published is `output`. Use this name to connect the OutputAdapter
to downstream components in the pipeline.
Example pipeline setup:
```python
from haystack import Pipeline, component
from haystack.components.converters import OutputAdapter
@component
class DocumentProducer:
@component.output_types(documents=dict)
def run(self):
return {"documents": [{"content": '{"framework": "Haystack"}'}]}
pipe = Pipeline()
pipe.add_component(
name="output_adapter",
instance=OutputAdapter(template="{{ documents[0].content | json_loads}}", output_type=str),
)
pipe.add_component(name="document_producer", instance=DocumentProducer())
pipe.connect("document_producer", "output_adapter")
result = pipe.run(data={})
assert result["output_adapter"]["output"] == {"framework": "Haystack"}
```"""
(definition of OutputAdapter.__init__:)
def __init__(self, template: str, output_type: TypeAlias, custom_filters: Optional[Dict[str, Callable]] = None):
"""Initializes the OutputAdapter with a set of adaptation rules.
:param template: A Jinja2 template string that defines how to adapt the output data to the input of the
downstream component.
:param output_type: The type of the output data (e.g., str, List[int]).
:param custom_filters: A dictionary of custom Jinja2 filters to be used in the template."""
(definition of OutputAdapter.run:)
def run(self, **kwargs):
"""Executes the output adaptation logic by applying the specified Jinja template expressions
to adapt the incoming data to a format suitable for downstream components.
:param kwargs: A dictionary containing the pipeline variables, which are inputs to the adaptation templates.
:return: A dictionary containing the adapted outputs, based on the adaptation rules.
:raises OutputAdaptationException: If there's an error during the adaptation process."""
(definition of OutputAdapter.to_dict:)
def to_dict(self) -> Dict[str, Any]:
(definition of OutputAdapter.from_dict:)
def from_dict(cls, data: Dict[str, Any]) -> "OutputAdapter":
(definition of OutputAdapter._extract_variables:)
def _extract_variables(self, env: NativeEnvironment) -> Set[str]:
"""Extracts all variables from a list of Jinja template strings.
:param env: A Jinja native environment.
:return: A set of variable names extracted from the template strings."""
[end of new definitions in haystack/components/converters/output_adapter.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 | |
joke2k__faker-1989 | 1,989 | joke2k/faker | null | c5e37d54a4eee1fa9db3f4dcc705da27b03968d6 | 2024-02-06T21:09:31Z | diff --git a/faker/providers/lorem/uk_UA/__init__.py b/faker/providers/lorem/uk_UA/__init__.py
new file mode 100644
index 0000000000..a9ad816de8
--- /dev/null
+++ b/faker/providers/lorem/uk_UA/__init__.py
@@ -0,0 +1,505 @@
+from typing import Dict
+
+from .. import Provider as LoremProvider
+
+
+class Provider(LoremProvider):
+ """Implement lorem provider for ``uk_UA`` locale."""
+ word_list = (
+ "увійти",
+ "монета",
+ "підкинути",
+ "бажання",
+ "іспит",
+ "податковий",
+ "витягувати",
+ "приятель",
+ "здригатися",
+ "купа",
+ "порт",
+ "точно",
+ "заплакати",
+ "хата",
+ "правління",
+ "художній",
+ "болісно",
+ "зображати",
+ "ліхтарик",
+ "міф",
+ "сумний",
+ "небезпека",
+ "міра",
+ "пастух",
+ "факультет",
+ "мигнути",
+ "польовий",
+ "інший",
+ "виражений",
+ "забирати",
+ "рот",
+ "народ",
+ "відповідність",
+ "тута",
+ "комунізм",
+ "рішення",
+ "плід",
+ "співрозмовник",
+ "обуритися",
+ "гідність",
+ "господь",
+ "болото",
+ "інфекція",
+ "голубчик",
+ "синок",
+ "простір",
+ "прощення",
+ "раніше",
+ "хотіти",
+ "ленінград",
+ "даль",
+ "розвинений",
+ "близько",
+ "більше",
+ "спорт",
+ "епоха",
+ "відповісти",
+ "звільнити",
+ "порада",
+ "прохід",
+ "палець",
+ "вчора",
+ "пристойний",
+ "яскраво",
+ "білизна",
+ "коваль",
+ "несподівано",
+ "вперед",
+ "зате",
+ "кільце",
+ "перед",
+ "мить",
+ "плавно",
+ "тютюн",
+ "число",
+ "вивчити",
+ "важкий",
+ "міркування",
+ "салон",
+ "ідея",
+ "що",
+ "світило",
+ "порода",
+ "сумнівний",
+ "бок",
+ "очко",
+ "незручно",
+ "радити",
+ "відділ",
+ "помовчати",
+ "вітати",
+ "пробувати",
+ "дошлий",
+ "сміятися",
+ "наполегливо",
+ "здригнутися",
+ "затягнутися",
+ "танцювати",
+ "пісенька",
+ "вибирати",
+ "правильний",
+ "намір",
+ "здалеку",
+ "запустити",
+ "насолода",
+ "щур",
+ "летіти",
+ "космос",
+ "радість",
+ "поїзд",
+ "знаходити",
+ "гуляти",
+ "гіркий",
+ "бочок",
+ "ніч",
+ "щастя",
+ "знищення",
+ "диявол",
+ "коробка",
+ "спасти",
+ "шкіра",
+ "провінція",
+ "прелесть",
+ "в'язниця",
+ "вечір",
+ "низький",
+ "виблискувати",
+ "темніти",
+ "сонце",
+ "гараж",
+ "червʼяк",
+ "дружно",
+ "настати",
+ "блін",
+ "степ",
+ "самостійно",
+ "крутий",
+ "картинка",
+ "навіщо",
+ "робочий",
+ "незвичайний",
+ "армійський",
+ "труп",
+ "ягода",
+ "близько",
+ "монета",
+ "природний",
+ "юний",
+ "район",
+ "прихований",
+ "зловити",
+ "будівництво",
+ "палата",
+ "мить",
+ "триста",
+ "штаб",
+ "ламати",
+ "можливо",
+ "полюбити",
+ "чоловічок",
+ "легко",
+ "почуття",
+ "струмок",
+ "кишеня",
+ "гроші",
+ "неправда",
+ "порівняння",
+ "груди",
+ "від'їзд",
+ "виникнення",
+ "степ",
+ "збудження",
+ "діловий",
+ "отже",
+ "рідкий",
+ "синок",
+ "художній",
+ "покоління",
+ "розстебнути",
+ "їжа",
+ "вчений",
+ "секунда",
+ "заспокоїтися",
+ "навряд",
+ "аж",
+ "вскакивать",
+ "мимо",
+ "падати",
+ "потягнутися",
+ "загроза",
+ "розгубитися",
+ "бігати",
+ "склянка",
+ "о",
+ "кпсс",
+ "нині",
+ "підлога",
+ "реклама",
+ "при",
+ "шкільний",
+ "прем'єра",
+ "дальній",
+ "потрясти",
+ "звільнення",
+ "покидати",
+ "наступати",
+ "жити",
+ "який",
+ "образа",
+ "командування",
+ "дівка",
+ "висловлюватися",
+ "головний",
+ "другий",
+ "князь",
+ "соціалістичний",
+ "головка",
+ "залучати",
+ "через",
+ "господь",
+ "результат",
+ "відзначити",
+ "адже",
+ "падаль",
+ "покидати",
+ "художній",
+ "правий",
+ "висіти",
+ "лапа",
+ "каюта",
+ "занадто",
+ "нервово",
+ "серйозний",
+ "зима",
+ "заробити",
+ "ефект",
+ "прірва",
+ "плід",
+ "щось",
+ "що-небудь",
+ "казна-хто",
+ "висіти",
+ "холодно",
+ "єдиний",
+ "викинути",
+ "похмуро",
+ "вигнати",
+ "вмирати",
+ "інший",
+ "космос",
+ "природа",
+ "функція",
+ "поставити",
+ "оборот",
+ "услати",
+ "черговий",
+ "медицина",
+ "функція",
+ "зарплата",
+ "витримати",
+ "розлад",
+ "адвокат",
+ "затримати",
+ "поява",
+ "інвалід",
+ "інтелектуальний",
+ "досліджено",
+ "мати"
+ "ліворуч",
+ "хлопець",
+ "мільярд",
+ "гіркий",
+ "трубка",
+ "подробиця",
+ "паща",
+ "незвичний",
+ "угодний",
+ "засунути",
+ "мета",
+ "заборонити",
+ "дрімати",
+ "розуміти",
+ "приходити",
+ "нарада",
+ "постійний",
+ "аналіз",
+ "терапія",
+ "приятель",
+ "процес",
+ "академік",
+ "метал",
+ "розвернутися",
+ "жорстокий",
+ "інтернет",
+ "яблуко",
+ "банда",
+ "зміна",
+ "колектив",
+ "похорон",
+ "пристрій",
+ "квапливий",
+ "розводити",
+ "промовчати",
+ "підземний",
+ "полум'я",
+ "редактор",
+ "теорія",
+ "олівець",
+ "упор",
+ "означати",
+ "метелик",
+ "чотири",
+ "століття",
+ "різноманітний",
+ "вітрина",
+ "ніж",
+ "команда",
+ "шолом",
+ "недолік",
+ "протягувати",
+ "за",
+ "метал",
+ "домогтися",
+ "доба",
+ "чітко",
+ "надати",
+ "тисяча",
+ "заспівати",
+ "бригада",
+ "дрібниця",
+ "виражений",
+ "перетнути",
+ "сходити",
+ "взагалі",
+ "рис",
+ "банк",
+ "бак",
+ "передо",
+ "призначити",
+ "важливий",
+ "правління",
+ "палиця",
+ "трясти",
+ "упустити",
+ "вітрина",
+ "основа",
+ "так",
+ "мʼята",
+ "пірʼя",
+ "перебивати",
+ "дихання",
+ "застосовуватися",
+ "червень",
+ "бетонний",
+ "уникати",
+ "благати",
+ "м'який",
+ "заява",
+ "конференція",
+ "встати",
+ "свіжий",
+ "супроводжуватися",
+ "ланцюжок",
+ "вираз",
+ "кут",
+ "черевик",
+ "лягати",
+ "інструкція",
+ "присісти",
+ "решітка",
+ "єврейський",
+ "поріг",
+ "зелений",
+ "кордон",
+ "ставити",
+ "сміливий",
+ "суглоб",
+ "роса",
+ "демократія",
+ "вивести",
+ "конструкція",
+ "задерти",
+ "багряний",
+ "військовий",
+ "направо",
+ "житель",
+ "товар",
+ "солома",
+ "ґазда",
+ "ґаздиня",
+ "ґудзик",
+ "неправда",
+ "матерія",
+ "командувач",
+ "кидати",
+ "закласти",
+ "ліловий",
+ "слати",
+ "гіркий",
+ "простір",
+ "провал",
+ "сміття",
+ "наштовхнутися",
+ "торгівля",
+ "монета",
+ "місце",
+ "спалити",
+ "брову",
+ "лівий",
+ "хліб",
+ "коричневий",
+ "подвірʼя"
+ "потім",
+ "червонй",
+ "пристрасть",
+ "виднітися",
+ "розкішний",
+ "спосіб",
+ "багаття",
+ "заклад",
+ "пропадати",
+ "занадто",
+ "п'ятеро",
+ "програміст",
+ "кора",
+ "хлопчисько",
+ "тьмяний",
+ "несподіваний",
+ "танцювати",
+ "безглуздий",
+ "здригнутися",
+ "скинути",
+ "прошепотіти",
+ "безпорадний",
+ "рота",
+ "пісня",
+ "тривога",
+ "деякий",
+ "термін",
+ "пити",
+ "колишній",
+ "натиснути",
+ "видимо",
+ "валюта",
+ "набір",
+ "боєць",
+ "райком",
+ "новий",
+ "ковзати",
+ "керівник",
+ "вовк",
+ "зрідка",
+ "зрозумілий",
+ "пропаганда",
+ "зупинити",
+ "виконувати",
+ "хід",
+ "пані",
+ "друкувати",
+ "командир",
+ "знімати",
+ "страта",
+ "ручка",
+ "камінчик",
+ "нога",
+ "нестерпний",
+ "спорт",
+ "тривога",
+ "уточнити",
+ "актриса",
+ "повністю",
+ "покинути",
+ "блискучий",
+ "мотоцикл",
+ "дорогий",
+ "вказаний",
+ "ремінь",
+ "присвятити",
+ "один",
+ "а",
+ "їсти"
+ "діставати",
+ "господиня",
+ "шкарпетка",
+ "написати",
+ "єврейський",
+ "заклик",
+ "збільшуватися",
+ "байдужий",
+ "грати",
+ "співати",
+ "й",
+ "фахівець",
+ "купа-невеличка",
+ )
+
+ parts_of_speech: Dict[str, tuple] = {}
| diff --git a/tests/providers/test_lorem.py b/tests/providers/test_lorem.py
index 40761a27f0..84bc53eb96 100644
--- a/tests/providers/test_lorem.py
+++ b/tests/providers/test_lorem.py
@@ -10,6 +10,7 @@
from faker.providers.lorem.en_US import Provider as EnUsLoremProvider
from faker.providers.lorem.fa_IR import Provider as FaIrLoremProvider
from faker.providers.lorem.nl_BE import Provider as NlBeLoremProvider
+from faker.providers.lorem.uk_UA import Provider as UkUaLoremProvider
class TestLoremProvider:
@@ -645,3 +646,71 @@ def test_words(self, faker, num_samples):
for _ in range(num_samples):
words = faker.words(num_words)
assert all(isinstance(word, str) and word in NlBeLoremProvider.word_list for word in words)
+
+
+class TestUkUa:
+ """Test uk_UA lorem provider"""
+ word_list = [word.lower() for word in UkUaLoremProvider.word_list]
+
+ def test_paragraph(self, faker, num_samples):
+ num_sentences = 10
+ for _ in range(num_samples):
+ paragraph = faker.paragraph(nb_sentences=num_sentences)
+ assert isinstance(paragraph, str)
+ words = paragraph.replace(".", "").split()
+ assert all(word.lower() in self.word_list for word in words)
+
+ def test_paragraphs(self, faker, num_samples):
+ num_paragraphs = 5
+ for _ in range(num_samples):
+ paragraphs = faker.paragraphs(nb=num_paragraphs)
+ for paragraph in paragraphs:
+ assert isinstance(paragraph, str)
+ words = paragraph.replace(".", "").split()
+ assert all(word.lower() in self.word_list for word in words)
+
+ def test_sentence(self, faker, num_samples):
+ num_words = 10
+ for _ in range(num_samples):
+ sentence = faker.sentence(nb_words=num_words)
+ assert isinstance(sentence, str)
+ words = sentence.replace(".", "").split()
+ assert all(word.lower() in self.word_list for word in words)
+
+ def test_sentences(self, faker, num_samples):
+ num_sentences = 5
+ for _ in range(num_samples):
+ sentences = faker.sentences(nb=num_sentences)
+ for sentence in sentences:
+ assert isinstance(sentence, str)
+ words = sentence.replace(".", "").split()
+ assert all(word.lower() in self.word_list for word in words)
+
+ def test_text(self, faker, num_samples):
+ num_chars = 25
+ for _ in range(num_samples):
+ text = faker.text(max_nb_chars=num_chars)
+ assert isinstance(text, str)
+ words = re.sub(r"[.\n]+", " ", text).split()
+ assert all(word.lower() in self.word_list for word in words)
+
+ def test_texts(self, faker, num_samples):
+ num_texts = 5
+ num_chars = 25
+ for _ in range(num_samples):
+ texts = faker.texts(max_nb_chars=num_chars, nb_texts=num_texts)
+ for text in texts:
+ assert isinstance(text, str)
+ words = re.sub(r"[.\n]+", " ", text).split()
+ assert all(word.lower() in self.word_list for word in words)
+
+ def test_word(self, faker, num_samples):
+ for _ in range(num_samples):
+ word = faker.word()
+ assert isinstance(word, str) and word in UkUaLoremProvider.word_list
+
+ def test_words(self, faker, num_samples):
+ num_words = 5
+ for _ in range(num_samples):
+ words = faker.words(num_words)
+ assert all(isinstance(word, str) and word in UkUaLoremProvider.word_list for word in words)
| [
{
"components": [
{
"doc": "Implement lorem provider for ``uk_UA`` locale.",
"lines": [
6,
505
],
"name": "Provider",
"signature": "class Provider(LoremProvider):",
"type": "class"
}
],
"file": "faker/providers/lorem/uk_UA/_... | [
"tests/providers/test_lorem.py::TestLoremProvider::test_word_with_defaults",
"tests/providers/test_lorem.py::TestLoremProvider::test_word_with_custom_list",
"tests/providers/test_lorem.py::TestLoremProvider::test_words_with_zero_nb",
"tests/providers/test_lorem.py::TestLoremProvider::test_words_with_defaults"... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat(uk_UA-lorem) Add uk_UA lorem provider
### Add uk_UA lorem provider and test
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in faker/providers/lorem/uk_UA/__init__.py]
(definition of Provider:)
class Provider(LoremProvider):
"""Implement lorem provider for ``uk_UA`` locale."""
[end of new definitions in faker/providers/lorem/uk_UA/__init__.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 6edfdbf6ae90b0153309e3bf066aa3b2d16494a7 | ||
tobymao__sqlglot-2924 | 2,924 | tobymao/sqlglot | null | 838e7800c32ad16074efef6a188ebd89083a9717 | 2024-02-06T21:09:10Z | diff --git a/sqlglot/dialects/hive.py b/sqlglot/dialects/hive.py
index 9daa5acb5d..6337ffd940 100644
--- a/sqlglot/dialects/hive.py
+++ b/sqlglot/dialects/hive.py
@@ -574,6 +574,13 @@ class Generator(generator.Generator):
exp.WithDataProperty: exp.Properties.Location.UNSUPPORTED,
}
+ def _jsonpathkey_sql(self, expression: exp.JSONPathKey) -> str:
+ if isinstance(expression.this, exp.JSONPathWildcard):
+ self.unsupported("Unsupported wildcard in JSONPathKey expression")
+ return ""
+
+ return super()._jsonpathkey_sql(expression)
+
def temporary_storage_provider(self, expression: exp.Create) -> exp.Create:
# Hive has no temporary storage provider (there are hive settings though)
return expression
diff --git a/sqlglot/dialects/postgres.py b/sqlglot/dialects/postgres.py
index 57602ad303..1841862f81 100644
--- a/sqlglot/dialects/postgres.py
+++ b/sqlglot/dialects/postgres.py
@@ -440,7 +440,6 @@ class Generator(generator.Generator):
exp.JSONBExtract: lambda self, e: self.binary(e, "#>"),
exp.JSONBExtractScalar: lambda self, e: self.binary(e, "#>>"),
exp.JSONBContains: lambda self, e: self.binary(e, "?"),
- exp.JSONPathKey: lambda _, e: e.name,
exp.JSONPathRoot: lambda *_: "",
exp.JSONPathSubscript: lambda self, e: self.json_path_part(e.this),
exp.LastDay: no_last_day_sql,
@@ -496,6 +495,12 @@ class Generator(generator.Generator):
exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
}
+ def _jsonpathkey_sql(self, expression: exp.JSONPathKey) -> str:
+ if not isinstance(expression.this, str):
+ self.unsupported("Unsupported wildcard in JSONPathKey expression")
+
+ return expression.name
+
def bracket_sql(self, expression: exp.Bracket) -> str:
"""Forms like ARRAY[1, 2, 3][3] aren't allowed; we need to wrap the ARRAY."""
if isinstance(expression.this, exp.Array):
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 4cbd05d941..568dcb4c11 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -3371,12 +3371,20 @@ def lastday_sql(self, expression: exp.LastDay) -> str:
def _jsonpathkey_sql(self, expression: exp.JSONPathKey) -> str:
this = expression.this
- if this == "*" or exp.SAFE_IDENTIFIER_RE.match(this):
+ if isinstance(this, exp.JSONPathWildcard):
+ this = self.json_path_part(this)
+ return f".{this}" if this else ""
+
+ if exp.SAFE_IDENTIFIER_RE.match(this):
return f".{this}"
this = self.json_path_part(this)
return f"[{this}]" if self.JSON_PATH_BRACKETED_KEY_SUPPORTED else f".{this}"
+ def _jsonpathsubscript_sql(self, expression: exp.JSONPathSubscript) -> str:
+ this = self.json_path_part(expression.this)
+ return f"[{this}]" if this else ""
+
def _simplify_unless_literal(self, expression: E) -> E:
if not isinstance(expression, exp.Literal):
from sqlglot.optimizer.simplify import simplify
diff --git a/sqlglot/jsonpath.py b/sqlglot/jsonpath.py
index 988b5affa8..129a4e6fed 100644
--- a/sqlglot/jsonpath.py
+++ b/sqlglot/jsonpath.py
@@ -150,14 +150,20 @@ def _parse_bracket() -> exp.JSONPathPart:
while _curr():
if _match(TokenType.DOT) or _match(TokenType.COLON):
recursive = _prev().text == ".."
- value = _match(TokenType.VAR) or _match(TokenType.IDENTIFIER) or _match(TokenType.STAR)
+
+ if _match(TokenType.VAR) or _match(TokenType.IDENTIFIER):
+ value: t.Optional[str | exp.JSONPathWildcard] = _prev().text
+ elif _match(TokenType.STAR):
+ value = exp.JSONPathWildcard()
+ else:
+ value = None
if recursive:
- expressions.append(exp.JSONPathRecursive(this=value.text if value else None))
+ expressions.append(exp.JSONPathRecursive(this=value))
elif value:
- expressions.append(exp.JSONPathKey(this=value.text))
+ expressions.append(exp.JSONPathKey(this=value))
else:
- raise ParseError(_error("Expected key name after DOT"))
+ raise ParseError(_error("Expected key name or * after DOT"))
elif _match(TokenType.L_BRACKET):
expressions.append(_parse_bracket())
elif _match(TokenType.VAR) or _match(TokenType.IDENTIFIER):
@@ -182,7 +188,7 @@ def _parse_bracket() -> exp.JSONPathPart:
for p in [e.args.get("start"), e.args.get("end"), e.args.get("step")]
if p is not None
),
- exp.JSONPathSubscript: lambda self, e: f"[{self.json_path_part(e.this)}]",
+ exp.JSONPathSubscript: lambda self, e: self._jsonpathsubscript_sql(e),
exp.JSONPathUnion: lambda self,
e: f"[{','.join(self.json_path_part(p) for p in e.expressions)}]",
exp.JSONPathWildcard: lambda *_: "*",
| diff --git a/tests/dialects/test_dialect.py b/tests/dialects/test_dialect.py
index f4852aea67..e0aaf17962 100644
--- a/tests/dialects/test_dialect.py
+++ b/tests/dialects/test_dialect.py
@@ -1245,6 +1245,7 @@ def test_json(self):
write={
"bigquery": UnsupportedError,
"duckdb": "x -> '$.y[*]'",
+ "mysql": "JSON_EXTRACT(x, '$.y[*]')",
"postgres": UnsupportedError,
"presto": "JSON_EXTRACT(x, '$.y[*]')",
"redshift": UnsupportedError,
@@ -1254,6 +1255,32 @@ def test_json(self):
"tsql": UnsupportedError,
},
)
+ self.validate_all(
+ "JSON_EXTRACT(x, '$.y[*]')",
+ write={
+ "bigquery": "JSON_EXTRACT(x, '$.y')",
+ "postgres": "JSON_EXTRACT_PATH(x, 'y')",
+ "redshift": "JSON_EXTRACT_PATH_TEXT(x, 'y')",
+ "snowflake": "GET_PATH(x, 'y')",
+ "sqlite": "x -> '$.y'",
+ "tsql": "ISNULL(JSON_QUERY(x, '$.y'), JSON_VALUE(x, '$.y'))",
+ },
+ )
+ self.validate_all(
+ "JSON_EXTRACT(x, '$.y.*')",
+ write={
+ "bigquery": UnsupportedError,
+ "duckdb": "x -> '$.y.*'",
+ "mysql": "JSON_EXTRACT(x, '$.y.*')",
+ "postgres": UnsupportedError,
+ "presto": "JSON_EXTRACT(x, '$.y.*')",
+ "redshift": UnsupportedError,
+ "snowflake": UnsupportedError,
+ "spark": UnsupportedError,
+ "sqlite": UnsupportedError,
+ "tsql": UnsupportedError,
+ },
+ )
def test_cross_join(self):
self.validate_all(
diff --git a/tests/test_jsonpath.py b/tests/test_jsonpath.py
index 9fffa44a30..4daf3c1ce0 100644
--- a/tests/test_jsonpath.py
+++ b/tests/test_jsonpath.py
@@ -13,7 +13,7 @@ class TestJsonpath(unittest.TestCase):
def test_jsonpath(self):
expected_expressions = [
exp.JSONPathRoot(),
- exp.JSONPathKey(this="*"),
+ exp.JSONPathKey(this=exp.JSONPathWildcard()),
exp.JSONPathKey(this="a"),
exp.JSONPathSubscript(this=0),
exp.JSONPathKey(this="x"),
| [] | [
"tests/dialects/test_dialect.py::TestDialect::test_json",
"tests/test_jsonpath.py::TestJsonpath::test_jsonpath"
] | [
"tests/dialects/test_dialect.py::TestDialect::test_alias",
"tests/dialects/test_dialect.py::TestDialect::test_array",
"tests/dialects/test_dialect.py::TestDialect::test_cast",
"tests/dialects/test_dialect.py::TestDialect::test_cast_to_user_defined_type",
"tests/dialects/test_dialect.py::TestDialect::test_co... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Feat: improve transpilation of JSON path wildcards
I think this is the final item
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | ceb42fabad60312699e4b15936aeebac00e22e4d | ||
deepset-ai__haystack-6927 | 6,927 | deepset-ai/haystack | null | b875eda4af7b7d45ed02fa578bf23fda1f923673 | 2024-02-06T14:43:45Z | diff --git a/haystack/core/component/component.py b/haystack/core/component/component.py
index 95ae87d731..c231e41dfa 100644
--- a/haystack/core/component/component.py
+++ b/haystack/core/component/component.py
@@ -160,6 +160,21 @@ def __call__(cls, *args, **kwargs):
return instance
+def _component_repr(component: Component) -> str:
+ """
+ All Components override their __repr__ method with this one.
+ It prints the component name and the input/output sockets.
+ """
+ result = object.__repr__(component)
+ if pipeline := getattr(component, "__haystack_added_to_pipeline__"):
+ # This Component has been added in a Pipeline, let's get the name from there.
+ result += f"\n{pipeline.get_component_name(component)}"
+
+ # We're explicitly ignoring the type here because we're sure that the component
+ # has the __haystack_input__ and __haystack_output__ attributes at this point
+ return f"{result}\n{component.__haystack_input__}\n{component.__haystack_output__}" # type: ignore[attr-defined]
+
+
class _Component:
"""
See module's docstring.
@@ -332,6 +347,9 @@ def copy_class_namespace(namespace):
self.registry[class_path] = class_
logger.debug("Registered Component %s", class_)
+ # Override the __repr__ method with a default one
+ class_.__repr__ = _component_repr
+
return class_
def __call__(self, class_):
diff --git a/haystack/core/component/sockets.py b/haystack/core/component/sockets.py
index 25bf4fdc88..374ae63032 100644
--- a/haystack/core/component/sockets.py
+++ b/haystack/core/component/sockets.py
@@ -82,8 +82,9 @@ def _component_name(self) -> str:
return pipeline.get_component_name(self._component)
# This Component has not been added to a Pipeline yet, so we can't know its name.
- # Let's use the class name instead.
- return str(self._component)
+ # Let's use default __repr__. We don't call repr() directly as Components have a custom
+ # __repr__ method and that would lead to infinite recursion since we call Sockets.__repr__ in it.
+ return object.__repr__(self._component)
def __getattribute__(self, name):
try:
@@ -96,12 +97,10 @@ def __getattribute__(self, name):
return object.__getattribute__(self, name)
def __repr__(self) -> str:
- result = self._component_name()
+ result = ""
if self._sockets_io_type == InputSocket:
- result += " inputs:\n"
+ result = "Inputs:\n"
elif self._sockets_io_type == OutputSocket:
- result += " outputs:\n"
+ result = "Outputs:\n"
- result += "\n".join([f" - {n}: {_type_name(s.type)}" for n, s in self._sockets_dict.items()])
-
- return result
+ return result + "\n".join([f" - {n}: {_type_name(s.type)}" for n, s in self._sockets_dict.items()])
diff --git a/releasenotes/notes/component-repr-a6486af81530bc3b.yaml b/releasenotes/notes/component-repr-a6486af81530bc3b.yaml
new file mode 100644
index 0000000000..3a7439e926
--- /dev/null
+++ b/releasenotes/notes/component-repr-a6486af81530bc3b.yaml
@@ -0,0 +1,6 @@
+---
+enhancements:
+ - |
+ Add `__repr__` to all Components to print their I/O.
+ This can also be useful in Jupyter notebooks as this will be shown as a cell output
+ if the it's the last expression in a cell.
| diff --git a/test/core/component/test_component.py b/test/core/component/test_component.py
index bbe2605f03..b093c32b82 100644
--- a/test/core/component/test_component.py
+++ b/test/core/component/test_component.py
@@ -4,6 +4,7 @@
from haystack.core.component import Component, InputSocket, OutputSocket, component
from haystack.core.errors import ComponentError
+from haystack.core.pipeline import Pipeline
def test_correct_declaration():
@@ -189,3 +190,31 @@ def run(self, *, arg: int):
comp = MockComponent()
component_inputs = {name: {"type": socket.type} for name, socket in comp.__haystack_input__._sockets_dict.items()}
assert component_inputs == {"arg": {"type": int}}
+
+
+def test_repr():
+ @component
+ class MockComponent:
+ def __init__(self):
+ component.set_output_types(self, value=int)
+
+ def run(self, value: int):
+ return {"value": value}
+
+ comp = MockComponent()
+ assert repr(comp) == f"{object.__repr__(comp)}\nInputs:\n - value: int\nOutputs:\n - value: int"
+
+
+def test_repr_added_to_pipeline():
+ @component
+ class MockComponent:
+ def __init__(self):
+ component.set_output_types(self, value=int)
+
+ def run(self, value: int):
+ return {"value": value}
+
+ pipe = Pipeline()
+ comp = MockComponent()
+ pipe.add_component("my_component", comp)
+ assert repr(comp) == f"{object.__repr__(comp)}\nmy_component\nInputs:\n - value: int\nOutputs:\n - value: int"
diff --git a/test/core/component/test_sockets.py b/test/core/component/test_sockets.py
index ac3b01bda8..6e942b84f9 100644
--- a/test/core/component/test_sockets.py
+++ b/test/core/component/test_sockets.py
@@ -23,19 +23,6 @@ def test_init_with_empty_sockets(self):
assert io._component == comp
assert io._sockets_dict == {}
- def test_component_name(self):
- comp = component_class("SomeComponent")()
- io = Sockets(component=comp, sockets_dict={}, sockets_io_type=InputSocket)
- assert io._component_name() == str(comp)
-
- def test_component_name_added_to_pipeline(self):
- comp = component_class("SomeComponent")()
- pipeline = Pipeline()
- pipeline.add_component("my_component", comp)
-
- io = Sockets(component=comp, sockets_dict={}, sockets_io_type=InputSocket)
- assert io._component_name() == "my_component"
-
def test_getattribute(self):
comp = component_class("SomeComponent", input_types={"input_1": int, "input_2": int})()
io = Sockets(component=comp, sockets_dict=comp.__haystack_input__._sockets_dict, sockets_io_type=InputSocket)
@@ -54,4 +41,4 @@ def test_repr(self):
comp = component_class("SomeComponent", input_types={"input_1": int, "input_2": int})()
io = Sockets(component=comp, sockets_dict=comp.__haystack_input__._sockets_dict, sockets_io_type=InputSocket)
res = repr(io)
- assert res == f"{comp} inputs:\n - input_1: int\n - input_2: int"
+ assert res == "Inputs:\n - input_1: int\n - input_2: int"
| diff --git a/releasenotes/notes/component-repr-a6486af81530bc3b.yaml b/releasenotes/notes/component-repr-a6486af81530bc3b.yaml
new file mode 100644
index 0000000000..3a7439e926
--- /dev/null
+++ b/releasenotes/notes/component-repr-a6486af81530bc3b.yaml
@@ -0,0 +1,6 @@
+---
+enhancements:
+ - |
+ Add `__repr__` to all Components to print their I/O.
+ This can also be useful in Jupyter notebooks as this will be shown as a cell output
+ if the it's the last expression in a cell.
| [
{
"components": [
{
"doc": "All Components override their __repr__ method with this one.\nIt prints the component name and the input/output sockets.",
"lines": [
163,
175
],
"name": "_component_repr",
"signature": "def _component_repr(component: ... | [
"test/core/component/test_component.py::test_repr",
"test/core/component/test_component.py::test_repr_added_to_pipeline",
"test/core/component/test_sockets.py::TestSockets::test_repr"
] | [
"test/core/component/test_component.py::test_correct_declaration",
"test/core/component/test_component.py::test_correct_declaration_with_additional_readonly_property",
"test/core/component/test_component.py::test_correct_declaration_with_additional_writable_property",
"test/core/component/test_component.py::t... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat: Add `__repr__` method to all Components
### Related Issues
- fixes #6779
### Proposed Changes:
This PR changes the `@component` decorator so all Components have a `__repr__` method that returns a clear representation of their I/O sockets.
I also changed a bit `Sockets.__repr__` to avoid infinite recursion.
### How did you test it?
I added new tests.
### Notes for the reviewer
N/A
### Checklist
- I have read the [contributors guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md) and the [code of conduct](https://github.com/deepset-ai/haystack/blob/main/code_of_conduct.txt)
- I have updated the related issue with new insights and changes
- I added unit tests and updated the docstrings
- I've used one of the [conventional commit types](https://www.conventionalcommits.org/en/v1.0.0/) for my PR title: `fix:`, `feat:`, `build:`, `chore:`, `ci:`, `docs:`, `style:`, `refactor:`, `perf:`, `test:`.
- I documented my code
- I ran [pre-commit hooks](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md#installation) and fixed any issue
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/core/component/component.py]
(definition of _component_repr:)
def _component_repr(component: Component) -> str:
"""All Components override their __repr__ method with this one.
It prints the component name and the input/output sockets."""
[end of new definitions in haystack/core/component/component.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Enhance Component I/O representation
After #6768 is done we should have a nice way of representing a Component's I/O.
Example:
```py
from haystack.components.converters.txt import TextFileToDocument
converter = TextFileToDocument()
print(converter.inputs)
>>> Inputs:
>>> * sources: List[Document]
>>> * meta: Dict[str, Any], default: {}
```
----------
Instead of adding an `inputs` field, we could leverage `__repr__` to get:
```py
print(converter)
>>> Inputs:
>>> * sources: List[Document]
>>> * meta: Dict[str, Any], default: {}
```
--------------------
</issues> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 |
prometheus__client_python-1005 | 1,005 | prometheus/client_python | null | b9edc43221101cad593c64d3fe9853760bef135e | 2024-02-06T10:42:38Z | diff --git a/prometheus_client/metrics.py b/prometheus_client/metrics.py
index 34305a17..91cd9ecf 100644
--- a/prometheus_client/metrics.py
+++ b/prometheus_client/metrics.py
@@ -292,6 +292,12 @@ def f():
# Count only one type of exception
with c.count_exceptions(ValueError):
pass
+
+ You can also reset the counter to zero in case your logical "process" restarts
+ without restarting the actual python process.
+
+ c.reset()
+
"""
_type = 'counter'
@@ -310,6 +316,11 @@ def inc(self, amount: float = 1, exemplar: Optional[Dict[str, str]] = None) -> N
_validate_exemplar(exemplar)
self._value.set_exemplar(Exemplar(exemplar, amount, time.time()))
+ def reset(self) -> None:
+ """Reset the counter to zero. Use this when a logical process restarts without restarting the actual python process."""
+ self._value.set(0)
+ self._created = time.time()
+
def count_exceptions(self, exception: Union[Type[BaseException], Tuple[Type[BaseException], ...]] = Exception) -> ExceptionCounter:
"""Count exceptions in a block of code or function.
| diff --git a/tests/test_core.py b/tests/test_core.py
index 6f7c9d1c..30f9e0ad 100644
--- a/tests/test_core.py
+++ b/tests/test_core.py
@@ -43,6 +43,16 @@ def test_increment(self):
self.counter.inc(7)
self.assertEqual(8, self.registry.get_sample_value('c_total'))
+ def test_reset(self):
+ self.counter.inc()
+ self.assertNotEqual(0, self.registry.get_sample_value('c_total'))
+ created = self.registry.get_sample_value('c_created')
+ time.sleep(0.05)
+ self.counter.reset()
+ self.assertEqual(0, self.registry.get_sample_value('c_total'))
+ created_after_reset = self.registry.get_sample_value('c_created')
+ self.assertLess(created, created_after_reset)
+
def test_repr(self):
self.assertEqual(repr(self.counter), "prometheus_client.metrics.Counter(c)")
| [
{
"components": [
{
"doc": "Reset the counter to zero. Use this when a logical process restarts without restarting the actual python process.",
"lines": [
319,
322
],
"name": "Counter.reset",
"signature": "def reset(self) -> None:",
"type... | [
"tests/test_core.py::TestCounter::test_reset"
] | [
"tests/test_core.py::TestCounter::test_block_decorator",
"tests/test_core.py::TestCounter::test_count_exceptions_not_observable",
"tests/test_core.py::TestCounter::test_exemplar_invalid_label_name",
"tests/test_core.py::TestCounter::test_exemplar_too_long",
"tests/test_core.py::TestCounter::test_exemplar_un... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Reset counter
Adds `.reset()` method to Counter metric.
Closes #995.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in prometheus_client/metrics.py]
(definition of Counter.reset:)
def reset(self) -> None:
"""Reset the counter to zero. Use this when a logical process restarts without restarting the actual python process."""
[end of new definitions in prometheus_client/metrics.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Provide a way to reset a Counter metric
Hi. My program performs some lengthy process of many iterations, and I use a Counter to track current iteration. However, it is normal for the process to finish at some point, and then the user can start it again, without restarting the app.
> A counter is a cumulative metric that represents a single monotonically increasing counter whose value can only increase or be reset to zero on restart.
(https://prometheus.io/docs/concepts/metric_types/)
In my case it's not the whole app that restarts, but only the calculation procedure, but I think in spirit it's still a Counter's job, not a Gauge (a value that only increments and resets to zero), but there is currently no way in the library to set the Counter to zero.
Would it be possible to add "reset" method to a Counter class?
----------
This is an interesting one. The docs do specifically say that the reset to zero is on restart, but I can also understand the use case. Since https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter also specifies that a counter's total may reset I am ok accepting this feature. Are you interested in opening a PR?
For when this is implemented: it will require updating `_created` at the same time.
hi! Yes, I can provide a PR soon, but can you elaborate on what needs to change in behavior of `_created` metric? Should it be set to time of reset?
is it possible to add `_updated` instead of updating `_created` every time we do reset.
Yep, `_created` just needs to be set to the time of reset.
From https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter, which this library needs to implement:
> A MetricPoint in a Metric's Counter's Total MAY reset to 0. If present, the corresponding Created time MUST also be set to the timestamp of the reset.
So the correct thing to do is update `_created`, not create a new metric or anything like that. Prometheus will soon start using the `_created` data as well, and having a non-standard field would not be supported.
--------------------
</issues> | 09a5ae30602a7a81f6174dae4ba08b93ee7feed2 | |
cocotb__cocotb-3702 | 3,702 | cocotb/cocotb | null | fdf7286a3a38b703dc4a3bb7c707bfcd2263773a | 2024-02-06T06:42:22Z | diff --git a/docs/source/newsfragments/3659.feature.rst b/docs/source/newsfragments/3659.feature.rst
new file mode 100644
index 0000000000..772ec771ef
--- /dev/null
+++ b/docs/source/newsfragments/3659.feature.rst
@@ -0,0 +1,1 @@
+:class:`~cocotb.types.Array` now supports equality with :class:`list` and :class:`tuple`.
diff --git a/docs/source/newsfragments/3696.feature.rst b/docs/source/newsfragments/3696.feature.rst
new file mode 100644
index 0000000000..dfee1c02f8
--- /dev/null
+++ b/docs/source/newsfragments/3696.feature.rst
@@ -0,0 +1,1 @@
+Support comparing :class:`~cocotb.types.LogicArray` with :class:`str`, :class:`list`, and :class:`tuple`.
diff --git a/docs/source/newsfragments/3705.removal.rst b/docs/source/newsfragments/3705.removal.rst
new file mode 100644
index 0000000000..30d0ad4ad4
--- /dev/null
+++ b/docs/source/newsfragments/3705.removal.rst
@@ -0,0 +1,1 @@
+``cocotb.types.concat`` was removed. Use ``Array(itertools.chain(a, b))`` instead.
diff --git a/src/cocotb/py.typed b/src/cocotb/py.typed
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/src/cocotb/types/__init__.py b/src/cocotb/types/__init__.py
index 9491a012d3..a618a4e776 100644
--- a/src/cocotb/types/__init__.py
+++ b/src/cocotb/types/__init__.py
@@ -2,74 +2,15 @@
# Licensed under the Revised BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-3-Clause
from abc import ABC, abstractmethod
-from typing import Any, Generic, Iterable, Iterator, Optional, TypeVar, Union, overload
-
-try:
- from typing import Protocol
-except ImportError:
- try:
- from typing_extensions import Protocol
- except ImportError:
- Protocol = ABC
+from typing import Generic, Iterable, Iterator, Optional, TypeVar, Union, overload
T = TypeVar("T")
-Self = TypeVar("Self")
-
-
-class Concatable(Protocol, Generic[T]):
- def __concat__(self: Self, other: Self) -> Self:
- return NotImplemented
-
- def __rconcat__(self: Self, other: Self) -> Self:
- return NotImplemented
-
-
-ConcatableT = TypeVar("ConcatableT", bound=Concatable[Any])
-
-
-def concat(a: ConcatableT, b: ConcatableT) -> ConcatableT:
- """
- Create a new array that is the concatenation of one array with another.
-
- Uses the :meth:`__concat__` or :meth:`__rconcat__` special methods to dispatch to a particular implementation,
- exactly like other binary operations in Python.
-
- Raises:
- TypeError: when the arguments do not support concatenation in the given order.
- """
- MISSING = object()
- type_a = type(a)
- type_b = type(b)
- a_concat = getattr(type_a, "__concat__", MISSING)
- a_rconcat = getattr(type_a, "__rconcat__", MISSING)
- b_rconcat = getattr(type_b, "__rconcat__", MISSING)
-
- if type_a is not type_b and issubclass(type_b, type_a) and a_rconcat != b_rconcat:
- # 'b' is a subclass of 'a' with a more specific implementation of 'concat(a, b)'
- call_order = [(b, b_rconcat, a), (a, a_concat, b)]
- elif type_a is not type_b:
- # normal call order
- call_order = [(a, a_concat, b), (b, b_rconcat, a)]
- else:
- # types are the same, we expect implementation of 'concat(a, b)' to be in 'a.__concat__'
- call_order = [(a, a_concat, b)]
-
- for lhs, method, rhs in call_order:
- if method is MISSING:
- continue
- res = method(lhs, rhs)
- if res is not NotImplemented:
- return res
-
- raise TypeError(
- f"cannot concatenate {type_a.__qualname__!r} with {type_b.__qualname__!r}"
- )
from .range import Range # noqa: E402 F401
-class ArrayLike(Concatable[T], Protocol, Generic[T]):
+class ArrayLike(ABC, Generic[T]):
@property
def left(self) -> int:
"""Leftmost index of the array."""
@@ -98,13 +39,13 @@ def range(self, new_range: Range) -> None:
def __len__(self) -> int:
return len(self.range)
- @abstractmethod
def __iter__(self) -> Iterator[T]:
- ...
+ for i in self.range:
+ yield self[i]
- @abstractmethod
def __reversed__(self) -> Iterator[T]:
- ...
+ for i in reversed(self.range):
+ yield self[i]
def __contains__(self, item: object) -> bool:
for v in self:
@@ -112,23 +53,16 @@ def __contains__(self, item: object) -> bool:
return True
return False
- def __eq__(self, other: object) -> bool:
- if not isinstance(other, type(self)):
- return NotImplemented
- if len(self) != len(other):
- return False
- return all(a == b for a, b in zip(self, other))
-
@overload
def __getitem__(self, item: int) -> T:
...
@overload
- def __getitem__(self: Self, item: slice) -> Self:
+ def __getitem__(self, item: slice) -> "ArrayLike[T]":
...
@abstractmethod
- def __getitem__(self: Self, item: Union[int, slice]) -> Union[T, Self]:
+ def __getitem__(self, item: Union[int, slice]) -> Union[T, "ArrayLike[T]"]:
...
@overload
diff --git a/src/cocotb/types/array.py b/src/cocotb/types/array.py
index b417a0ff43..49715a524c 100644
--- a/src/cocotb/types/array.py
+++ b/src/cocotb/types/array.py
@@ -2,13 +2,11 @@
# Licensed under the Revised BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-3-Clause
import typing
-from itertools import chain
from cocotb.types import ArrayLike
from cocotb.types.range import Range
T = typing.TypeVar("T")
-Self = typing.TypeVar("Self", bound="Array[typing.Any]")
class Array(ArrayLike[T]):
@@ -168,9 +166,12 @@ def __contains__(self, item: object) -> bool:
return item in self._value
def __eq__(self, other: object) -> bool:
- if isinstance(other, type(self)):
+ if isinstance(other, Array):
return self._value == other._value
- return NotImplemented
+ elif isinstance(other, (list, tuple)):
+ return self == Array(other)
+ else:
+ return NotImplemented
@typing.overload
def __getitem__(self, item: int) -> T:
@@ -201,7 +202,7 @@ def __getitem__(
)
value = self._value[start_i : stop_i + 1]
range = Range(start, self.direction, stop)
- return type(self)(value=value, range=range)
+ return Array(value=value, range=range)
raise TypeError(f"indexes must be ints or slices, not {type(item).__name__}")
@typing.overload
@@ -247,16 +248,6 @@ def __setitem__(
def __repr__(self) -> str:
return f"{type(self).__name__}({self._value!r}, {self._range!r})"
- def __concat__(self: Self, other: Self) -> Self:
- if isinstance(other, type(self)):
- return type(self)(chain(self, other))
- return NotImplemented
-
- def __rconcat__(self: Self, other: Self) -> Self:
- if isinstance(other, type(self)):
- return type(self)(chain(other, self))
- return NotImplemented
-
def count(self, value: T) -> int:
"""Return number of occurrences of *value*."""
return self._value.count(value)
diff --git a/src/cocotb/types/logic.py b/src/cocotb/types/logic.py
index d6f00b76d6..1378986114 100644
--- a/src/cocotb/types/logic.py
+++ b/src/cocotb/types/logic.py
@@ -109,22 +109,24 @@ class Logic:
__slots__ = ("_repr",)
+ _repr: int
+
@classmethod
@lru_cache(maxsize=None)
- def _make(cls: typing.Type["Logic"], _repr: int) -> "Logic":
- """enforce singleton"""
+ def _get_object(cls: typing.Type["Logic"], _repr: int) -> "Logic":
+ """Return the Logic object associated with the repr, enforcing singleton."""
self = object.__new__(cls)
self._repr = _repr
- return typing.cast("Logic", self)
+ return self
- def __new__(
+ @classmethod
+ @lru_cache(maxsize=None)
+ def _map_literal(
cls: typing.Type["Logic"],
- value: typing.Optional[LogicConstructibleT] = None,
+ value: typing.Optional[LogicLiteralT] = None,
) -> "Logic":
- if isinstance(value, Logic):
- # convert Logic
- _repr = value._repr
- elif value is None:
+ """Convert and cache all literals."""
+ if value is None:
_repr = _X
else:
# convert literal
@@ -134,13 +136,21 @@ def __new__(
raise ValueError(
f"{value!r} is not convertible to a {cls.__qualname__}"
) from None
- obj = cls._make(_repr)
+ obj = cls._get_object(_repr)
return obj
+ def __new__(
+ cls: typing.Type["Logic"],
+ value: typing.Optional[LogicConstructibleT] = None,
+ ) -> "Logic":
+ if isinstance(value, Logic):
+ return value
+ return cls._map_literal(value)
+
def __and__(self, other: "Logic") -> "Logic":
- if not isinstance(other, type(self)):
+ if not isinstance(other, Logic):
return NotImplemented
- return type(self)(
+ return Logic(
(
# -----------------------------------------------------
# U X 0 1 Z W L H - | |
@@ -157,13 +167,10 @@ def __and__(self, other: "Logic") -> "Logic":
)[self._repr][other._repr]
)
- def __rand__(self: "Logic", other: "Logic") -> "Logic":
- return self & other
-
def __or__(self: "Logic", other: "Logic") -> "Logic":
- if not isinstance(other, type(self)):
+ if not isinstance(other, Logic):
return NotImplemented
- return type(self)(
+ return Logic(
(
# -----------------------------------------------------
# U X 0 1 Z W L H - | |
@@ -180,13 +187,10 @@ def __or__(self: "Logic", other: "Logic") -> "Logic":
)[self._repr][other._repr]
)
- def __ror__(self: "Logic", other: "Logic") -> "Logic":
- return self | other
-
def __xor__(self: "Logic", other: "Logic") -> "Logic":
- if not isinstance(other, type(self)):
+ if not isinstance(other, Logic):
return NotImplemented
- return type(self)(
+ return Logic(
(
# -----------------------------------------------------
# U X 0 1 Z W L H - | |
@@ -203,19 +207,20 @@ def __xor__(self: "Logic", other: "Logic") -> "Logic":
)[self._repr][other._repr]
)
- def __rxor__(self: "Logic", other: "Logic") -> "Logic":
- return self ^ other
-
def __invert__(self: "Logic") -> "Logic":
- return type(self)(("U", "X", "1", "0", "X", "X", "1", "0", "X")[self._repr])
+ return Logic(("U", "X", "1", "0", "X", "X", "1", "0", "X")[self._repr])
def __eq__(self, other: object) -> bool:
- if not isinstance(other, type(self)):
+ if isinstance(other, Logic):
+ return self is other
+ elif isinstance(other, (int, str, bool)):
try:
- other = type(self)(other)
- except Exception:
- return NotImplemented
- return self._repr == other._repr
+ other = Logic(other)
+ except ValueError:
+ return False
+ return self == other
+ else:
+ return NotImplemented
def __repr__(self) -> str:
return f"{type(self).__qualname__}({str(self)!r})"
diff --git a/src/cocotb/types/logic_array.py b/src/cocotb/types/logic_array.py
index 4ef7cddf22..422c35dbd0 100644
--- a/src/cocotb/types/logic_array.py
+++ b/src/cocotb/types/logic_array.py
@@ -2,16 +2,11 @@
# Licensed under the Revised BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-3-Clause
import typing
-from itertools import chain
from cocotb.types import ArrayLike
from cocotb.types.logic import Logic, LogicConstructibleT
from cocotb.types.range import Range
-LogicT = typing.TypeVar("LogicT", bound=Logic)
-S = typing.TypeVar("S")
-Self = typing.TypeVar("Self", bound="LogicArray")
-
class LogicArray(ArrayLike[Logic]):
r"""
@@ -124,14 +119,15 @@ class LogicArray(ArrayLike[Logic]):
def __init__(
self,
value: typing.Union[int, typing.Iterable[LogicConstructibleT]],
- range: typing.Optional[Range],
+ range: typing.Optional[Range] = None,
):
...
@typing.overload
def __init__(
self,
- value: typing.Union[int, typing.Iterable[LogicConstructibleT], None],
+ value: typing.Union[int, typing.Iterable[LogicConstructibleT], None] = None,
+ *,
range: Range,
):
...
@@ -141,11 +137,11 @@ def __init__(
value: typing.Union[int, typing.Iterable[LogicConstructibleT], None] = None,
range: typing.Optional[Range] = None,
) -> None:
- if value is None and range is None:
- raise ValueError(
- "at least one of the value and range input parameters must be given"
- )
if value is None:
+ if range is None:
+ raise ValueError(
+ "at least one of the value and range input parameters must be given"
+ )
self._value = [Logic() for _ in range]
elif isinstance(value, int):
if value < 0:
@@ -198,16 +194,23 @@ def __reversed__(self) -> typing.Iterator[Logic]:
def __contains__(self, item: object) -> bool:
return item in self._value
- def __eq__(self, other: object) -> bool:
- if isinstance(other, int):
+ def __eq__(
+ self,
+ other: object,
+ ) -> bool:
+ if isinstance(other, LogicArray):
+ return self._value == other._value
+ elif isinstance(other, int):
try:
return self.integer == other
except ValueError:
return False
- elif isinstance(other, type(self)):
- if len(self) != len(other):
+ elif isinstance(other, (str, list, tuple)):
+ try:
+ other = LogicArray(other)
+ except ValueError:
return False
- return all(a == b for a, b in zip(self, other))
+ return self == other
else:
return NotImplemented
@@ -230,9 +233,6 @@ def integer(self) -> int:
value = value << 1 | int(bit)
return value
- def __int__(self) -> int:
- return self.integer
-
@property
def signed_integer(self) -> int:
value = self.integer
@@ -269,7 +269,7 @@ def __getitem__(
)
value = self._value[start_i : stop_i + 1]
range = Range(start, self.direction, stop)
- return type(self)(value=value, range=range)
+ return LogicArray(value=value, range=range)
raise TypeError(f"indexes must be ints or slices, not {type(item).__name__}")
@typing.overload
@@ -303,17 +303,17 @@ def __setitem__(
start, stop, self.left, self.right
)
)
- value = [
+ value_as_logics = [
Logic(v)
for v in typing.cast(typing.Iterable[LogicConstructibleT], value)
]
- if len(value) != (stop_i - start_i + 1):
+ if len(value_as_logics) != (stop_i - start_i + 1):
raise ValueError(
"value of length {!r} will not fit in slice [{}:{}]".format(
- len(value), start, stop
+ len(value_as_logics), start, stop
)
)
- self._value[start_i : stop_i + 1] = value
+ self._value[start_i : stop_i + 1] = value_as_logics
else:
raise TypeError(
f"indexes must be ints or slices, not {type(item).__name__}"
@@ -328,60 +328,47 @@ def _translate_index(self, item: int) -> int:
def __repr__(self) -> str:
return f"{type(self).__qualname__}({self.binstr!r}, {self.range!r})"
- def __concat__(self: Self, other: Self) -> Self:
- if isinstance(other, type(self)):
- return type(self)(chain(self, other))
- return NotImplemented
+ def __str__(self) -> str:
+ return self.binstr
- def __rconcat__(self: Self, other: Self) -> Self:
- if isinstance(other, type(self)):
- return type(self)(chain(other, self))
- return NotImplemented
+ def __int__(self) -> int:
+ return self.integer
- def __and__(self: Self, other: Self) -> Self:
- if isinstance(other, type(self)):
+ def __and__(self, other: "LogicArray") -> "LogicArray":
+ if isinstance(other, LogicArray):
if len(self) != len(other):
raise ValueError(
f"cannot perform bitwise & "
f"between {type(self).__qualname__} of length {len(self)} "
f"and {type(other).__qualname__} of length {len(other)}"
)
- return type(self)(a & b for a, b in zip(self, other)) # type: ignore
+ return LogicArray(a & b for a, b in zip(self, other))
return NotImplemented
- def __rand__(self: Self, other: Self) -> Self:
- return self & other
-
- def __or__(self: Self, other: Self) -> Self:
- if isinstance(other, type(self)):
+ def __or__(self, other: "LogicArray") -> "LogicArray":
+ if isinstance(other, LogicArray):
if len(self) != len(other):
raise ValueError(
f"cannot perform bitwise | "
f"between {type(self).__qualname__} of length {len(self)} "
f"and {type(other).__qualname__} of length {len(other)}"
)
- return type(self)(a | b for a, b in zip(self, other)) # type: ignore
+ return LogicArray(a | b for a, b in zip(self, other))
return NotImplemented
- def __ror__(self: Self, other: Self) -> Self:
- return self | other
-
- def __xor__(self: Self, other: Self) -> Self:
- if isinstance(other, type(self)):
+ def __xor__(self, other: "LogicArray") -> "LogicArray":
+ if isinstance(other, LogicArray):
if len(self) != len(other):
raise ValueError(
f"cannot perform bitwise ^ "
f"between {type(self).__qualname__} of length {len(self)} "
f"and {type(other).__qualname__} of length {len(other)}"
)
- return type(self)(a ^ b for a, b in zip(self, other)) # type: ignore
+ return LogicArray(a ^ b for a, b in zip(self, other))
return NotImplemented
- def __rxor__(self: Self, other: Self) -> Self:
- return self ^ other
-
- def __invert__(self: Self) -> Self:
- return type(self)(~v for v in self)
+ def __invert__(self) -> "LogicArray":
+ return LogicArray(~v for v in self)
def _int_to_bitstr(value: int, n_bits: int) -> str:
diff --git a/src/cocotb/types/range.py b/src/cocotb/types/range.py
index 6ef97cff42..f52f25f009 100644
--- a/src/cocotb/types/range.py
+++ b/src/cocotb/types/range.py
@@ -3,8 +3,6 @@
# SPDX-License-Identifier: BSD-3-Clause
import typing
-T = typing.TypeVar("T")
-
class Range(typing.Sequence[int]):
r"""
diff --git a/src/pygpi/py.typed b/src/pygpi/py.typed
new file mode 100644
index 0000000000..e69de29bb2
| diff --git a/tests/pytest/test_array.py b/tests/pytest/test_array.py
index 43c3c0f9c1..123cbacb0c 100644
--- a/tests/pytest/test_array.py
+++ b/tests/pytest/test_array.py
@@ -2,7 +2,7 @@
# Licensed under the Revised BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-3-Clause
import pytest
-from cocotb.types import Array, Range, concat
+from cocotb.types import Array, Range
def test_value_only_construction():
@@ -51,8 +51,8 @@ def test_equality():
assert Array("1234", Range(1, 4)) == Array("1234", Range(0, -3))
assert Array("1234", Range(1, 4)) != Array("4321", Range(1, 4))
assert Array("1234") != Array("12")
- assert Array("1234") != "1234"
assert Array("1234") != 8
+ assert Array([1, 2, 3, 4]) == [1, 2, 3, 4]
def test_repr_eval():
@@ -157,28 +157,6 @@ def test_slice_correct_infered():
assert b.right == 0
-def test_array_concat():
- l = Array("01ZX", Range(0, "to", 3))
- p = Array("1101")
- r = concat(l, p)
- assert r == Array("01ZX1101")
-
- with pytest.raises(TypeError):
- concat(l, "nope")
- with pytest.raises(TypeError):
- concat("nope", l)
-
-
-def test_array_concat_promotion():
- class MyArray(Array[int]):
- ...
-
- assert type(concat(Array([]), Array([]))) is Array
- assert type(concat(MyArray([]), Array([]))) is Array
- assert type(concat(Array([]), MyArray([]))) is Array
- assert type(concat(MyArray([]), MyArray([]))) is MyArray
-
-
def test_changing_range():
a = Array("1234")
a.range = Range(3, "downto", 0)
diff --git a/tests/pytest/test_logic.py b/tests/pytest/test_logic.py
index 43c46b27f2..baf74b1d8d 100644
--- a/tests/pytest/test_logic.py
+++ b/tests/pytest/test_logic.py
@@ -52,6 +52,10 @@ def test_logic_equality():
assert Logic(0) == Logic("0")
assert Logic(0) != Logic("X")
assert Logic(0) != object()
+ assert Logic(0) == 0
+ assert Logic("X") == "X"
+ assert Logic("X") != "j"
+ assert Logic("1") != 5
def test_logic_default_value():
diff --git a/tests/pytest/test_logic_array.py b/tests/pytest/test_logic_array.py
index 8070c01a90..50d8735012 100644
--- a/tests/pytest/test_logic_array.py
+++ b/tests/pytest/test_logic_array.py
@@ -2,7 +2,7 @@
# Licensed under the Revised BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-3-Clause
import pytest
-from cocotb.types import Logic, LogicArray, Range, concat
+from cocotb.types import Logic, LogicArray, Range
def test_logic_array_constructor():
@@ -60,14 +60,6 @@ def test_logic_array_repr():
assert eval(repr(l)) == l
-def test_logic_array_concat():
- l = LogicArray("01ZX", Range(0, "to", 3))
- p = LogicArray("1101")
- assert concat(l, p) == LogicArray("01ZX1101")
- with pytest.raises(TypeError):
- concat(l, "nope")
-
-
def test_logic_array_and():
l = LogicArray("0011XZ")
p = LogicArray("011010")
@@ -106,3 +98,135 @@ def test_logic_array_xor():
def test_logic_array_invert():
assert ~LogicArray("01XZ") == LogicArray("10XX")
+
+
+def test_logic_array_literal_casts():
+ assert str(LogicArray("UX01ZWLH-")) == "UX01ZWLH-"
+ assert int(LogicArray("0101010")) == 0b0101010
+
+
+def test_equality():
+ # fmt: off
+ assert LogicArray("0101", Range(0, 'to', 3)) == LogicArray("0101", Range(0, 'to', 3))
+ assert LogicArray("0101", Range(0, 'to', 3)) == LogicArray("0101", Range(7, 'downto', 4))
+ assert LogicArray("0101", Range(0, 'to', 3)) != LogicArray("1010", Range(0, 'to', 3))
+ # fmt: on
+ assert LogicArray("0101") == "0101"
+ assert LogicArray("0101") == [0, 1, 0, 1]
+ assert LogicArray("0101") == 0b0101
+ assert LogicArray("XXXX") != 1
+ assert LogicArray("0101") != object()
+ assert LogicArray("0101") != "lol"
+ assert LogicArray("0101") != 123
+
+
+def test_repr_eval():
+ r = LogicArray("X01Z")
+ assert eval(repr(r)) == r
+
+
+def test_iter():
+ val = [Logic(0), Logic(1), Logic("X"), Logic("Z")]
+ a = LogicArray(val)
+ assert list(a) == val
+
+
+def test_reversed():
+ val = [Logic(0), Logic(1), Logic("X"), Logic("Z")]
+ a = LogicArray(val)
+ assert list(reversed(a)) == list(reversed(val))
+
+
+def test_contains():
+ a = LogicArray("01XZ")
+ assert Logic("X") in a
+ assert Logic("U") not in a
+
+
+def test_index():
+ r = LogicArray("0001101", Range(7, "downto", 1))
+ assert r.index(Logic("1")) == 4
+ assert r.index(Logic("1"), 2, 0) == 1
+ with pytest.raises(IndexError):
+ r.index(object())
+
+
+def test_count():
+ assert LogicArray("011X1Z").count(Logic("1")) == 3
+
+
+def test_indexing():
+ a = LogicArray("0101", Range(8, "to", 11))
+ assert a[8] == "0"
+ with pytest.raises(IndexError):
+ a[0]
+ a[11] = "X"
+ assert a[11] == "X"
+
+ b = LogicArray("Z01X", Range(10, "downto", 7))
+ assert b[8] == 1
+ with pytest.raises(IndexError):
+ b[-2]
+ b[8] = 0
+ assert b[8] == 0
+
+
+def test_bad_indexing():
+ with pytest.raises(TypeError):
+ LogicArray("01XZ")[list()]
+ with pytest.raises(TypeError):
+ LogicArray("1010")[object()] = 9
+
+
+def test_slicing():
+ a = LogicArray("0110XXUU")
+ b = a[5:1]
+ assert b.left == 5
+ assert b.right == 1
+ assert b == LogicArray("10XXU")
+ a[3:0] = "ZZZZ"
+ assert a == LogicArray("0110ZZZZ")
+
+
+def test_slicing_infered_start_stop():
+ a = LogicArray("XXXX")
+ assert a[:] == a
+ a[:] = "1010"
+ assert a == 0b1010
+
+
+def test_dont_specify_step():
+ with pytest.raises(IndexError):
+ LogicArray("1010")[::1]
+ with pytest.raises(IndexError):
+ LogicArray("1010")[1:2:1] = [1, 2]
+
+
+def test_slice_direction_mismatch():
+ a = LogicArray("1010", Range(10, "downto", 7))
+ with pytest.raises(IndexError):
+ a[7:9]
+ with pytest.raises(IndexError):
+ a[9:10] = "01"
+
+
+def test_set_slice_wrong_length():
+ a = LogicArray("XXXXXX")
+ with pytest.raises(ValueError):
+ a[4:2] = "0000000000000"
+
+
+def test_slice_correct_infered():
+ a = LogicArray("1111")
+ b = a[:3]
+ assert b.right == 3
+
+
+def test_changing_range():
+ a = LogicArray("X01Z")
+ a.range = Range(3, "downto", 0)
+ assert a.range == Range(3, "downto", 0)
+ with pytest.raises(TypeError):
+ a.range = range(10)
+ with pytest.raises(ValueError):
+ a.range = Range(7, "downto", 0)
| diff --git a/docs/source/newsfragments/3659.feature.rst b/docs/source/newsfragments/3659.feature.rst
new file mode 100644
index 0000000000..772ec771ef
--- /dev/null
+++ b/docs/source/newsfragments/3659.feature.rst
@@ -0,0 +1,1 @@
+:class:`~cocotb.types.Array` now supports equality with :class:`list` and :class:`tuple`.
diff --git a/docs/source/newsfragments/3696.feature.rst b/docs/source/newsfragments/3696.feature.rst
new file mode 100644
index 0000000000..dfee1c02f8
--- /dev/null
+++ b/docs/source/newsfragments/3696.feature.rst
@@ -0,0 +1,1 @@
+Support comparing :class:`~cocotb.types.LogicArray` with :class:`str`, :class:`list`, and :class:`tuple`.
diff --git a/docs/source/newsfragments/3705.removal.rst b/docs/source/newsfragments/3705.removal.rst
new file mode 100644
index 0000000000..30d0ad4ad4
--- /dev/null
+++ b/docs/source/newsfragments/3705.removal.rst
@@ -0,0 +1,1 @@
+``cocotb.types.concat`` was removed. Use ``Array(itertools.chain(a, b))`` instead.
diff --git a/src/cocotb/py.typed b/src/cocotb/py.typed
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/src/pygpi/py.typed b/src/pygpi/py.typed
new file mode 100644
index 0000000000..e69de29bb2
| [
{
"components": [
{
"doc": "Return the Logic object associated with the repr, enforcing singleton.",
"lines": [
116,
120
],
"name": "Logic._get_object",
"signature": "def _get_object(cls: typing.Type[\"Logic\"], _repr: int) -> \"Logic\":",
... | [
"tests/pytest/test_array.py::test_equality",
"tests/pytest/test_logic_array.py::test_logic_array_literal_casts",
"tests/pytest/test_logic_array.py::test_equality"
] | [
"tests/pytest/test_array.py::test_value_only_construction",
"tests/pytest/test_array.py::test_both_construction",
"tests/pytest/test_array.py::test_bad_construction",
"tests/pytest/test_array.py::test_length",
"tests/pytest/test_array.py::test_range",
"tests/pytest/test_array.py::test_repr_eval",
"tests... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Improvements to `cocotb.types`
Closes #3659. Closes #3696.
Additionally,
* `concat()` was removed. I doubt this was used by anyone and was difficult to type correctly.
* `ArrayLike` trying to be a Protocol made mypy *very* unhappy. So we don't try anymore.
* `py.typed` files were added so type checking can now be done.
* The `cocotb.types` package now passes strict type checking with mypy.
* Some small cleanups and performance enhancements, nothing radical.
* Improve code coverage to ~100% (generic impls in `ArrayLike` aren't tested).
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in src/cocotb/types/logic.py]
(definition of Logic._get_object:)
def _get_object(cls: typing.Type["Logic"], _repr: int) -> "Logic":
"""Return the Logic object associated with the repr, enforcing singleton."""
(definition of Logic._map_literal:)
def _map_literal( cls: typing.Type["Logic"], value: typing.Optional[LogicLiteralT] = None, ) -> "Logic":
"""Convert and cache all literals."""
[end of new definitions in src/cocotb/types/logic.py]
[start of new definitions in src/cocotb/types/logic_array.py]
(definition of LogicArray.__str__:)
def __str__(self) -> str:
[end of new definitions in src/cocotb/types/logic_array.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Support comparison between `LogicArray` and literal syntaxes
The LogicArray literals include unsigned integers and strings.
```python
assert dut.handle.value == "X10Z"
assert dut.handle.value == 0b1001
```
----------
--------------------
</issues> | d211018226edcc344b86ea3fc5839097498731c6 |
deepset-ai__haystack-6916 | 6,916 | deepset-ai/haystack | null | c3a9dac1969f6af731892ab7194ee242c140fddb | 2024-02-05T13:46:05Z | diff --git a/haystack/core/component/__init__.py b/haystack/core/component/__init__.py
index 3a292edaf0..dea7614129 100644
--- a/haystack/core/component/__init__.py
+++ b/haystack/core/component/__init__.py
@@ -1,7 +1,7 @@
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
#
# SPDX-License-Identifier: Apache-2.0
-from haystack.core.component.component import component, Component
-from haystack.core.component.sockets import InputSocket, OutputSocket
+from haystack.core.component.component import Component, component
+from haystack.core.component.types import InputSocket, OutputSocket
__all__ = ["component", "Component", "InputSocket", "OutputSocket"]
diff --git a/haystack/core/component/component.py b/haystack/core/component/component.py
index 9db5a7aabc..d211808825 100644
--- a/haystack/core/component/component.py
+++ b/haystack/core/component/component.py
@@ -74,9 +74,11 @@
from types import new_class
from typing import Any, Protocol, runtime_checkable
-from haystack.core.component.sockets import InputSocket, OutputSocket, _empty
from haystack.core.errors import ComponentError
+from .sockets import Sockets
+from .types import InputSocket, OutputSocket, _empty
+
logger = logging.getLogger(__name__)
@@ -131,12 +133,14 @@ def __call__(cls, *args, **kwargs):
# that stores the output specification.
# We deepcopy the content of the cache to transfer ownership from the class method
# to the actual instance, so that different instances of the same class won't share this data.
- instance.__haystack_output__ = deepcopy(getattr(instance.run, "_output_types_cache", {}))
+ instance.__haystack_output__ = Sockets(
+ instance, deepcopy(getattr(instance.run, "_output_types_cache", {})), OutputSocket
+ )
# Create the sockets if set_input_types() wasn't called in the constructor.
# If it was called and there are some parameters also in the `run()` method, these take precedence.
if not hasattr(instance, "__haystack_input__"):
- instance.__haystack_input__ = {}
+ instance.__haystack_input__ = Sockets(instance, {}, InputSocket)
run_signature = inspect.signature(getattr(cls, "run"))
for param in list(run_signature.parameters)[1:]: # First is 'self' and it doesn't matter.
if run_signature.parameters[param].kind not in (
@@ -185,7 +189,7 @@ def set_input_type(self, instance, name: str, type: Any, default: Any = _empty):
:param default: default value of the input socket, defaults to _empty
"""
if not hasattr(instance, "__haystack_input__"):
- instance.__haystack_input__ = {}
+ instance.__haystack_input__ = Sockets(instance, {}, InputSocket)
instance.__haystack_input__[name] = InputSocket(name=name, type=type, default_value=default)
def set_input_types(self, instance, **types):
@@ -229,7 +233,9 @@ def run(self, value_0: str, value_1: Optional[str] = None, **kwargs):
parameter mandatory as specified in `set_input_types`.
"""
- instance.__haystack_input__ = {name: InputSocket(name=name, type=type_) for name, type_ in types.items()}
+ instance.__haystack_input__ = Sockets(
+ instance, {name: InputSocket(name=name, type=type_) for name, type_ in types.items()}, InputSocket
+ )
def set_output_types(self, instance, **types):
"""
@@ -251,7 +257,9 @@ def run(self, value: int):
return {"output_1": 1, "output_2": "2"}
```
"""
- instance.__haystack_output__ = {name: OutputSocket(name=name, type=type_) for name, type_ in types.items()}
+ instance.__haystack_output__ = Sockets(
+ instance, {name: OutputSocket(name=name, type=type_) for name, type_ in types.items()}, OutputSocket
+ )
def output_types(self, **types):
"""
diff --git a/haystack/core/component/sockets.py b/haystack/core/component/sockets.py
index ff3080dcf3..25bf4fdc88 100644
--- a/haystack/core/component/sockets.py
+++ b/haystack/core/component/sockets.py
@@ -1,57 +1,107 @@
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
#
# SPDX-License-Identifier: Apache-2.0
+
import logging
-from dataclasses import dataclass, field
-from typing import Any, List, Type, get_args
+from typing import Dict, Type, Union
+
+from haystack.core.type_utils import _type_name
-from haystack.core.component.types import HAYSTACK_VARIADIC_ANNOTATION
+from .types import InputSocket, OutputSocket
logger = logging.getLogger(__name__)
+SocketsDict = Dict[str, Union[InputSocket, OutputSocket]]
+SocketsIOType = Union[Type[InputSocket], Type[OutputSocket]]
+
+
+class Sockets:
+ """
+ This class is used to represent the inputs or outputs of a `Component`.
+ Depending on the type passed to the constructor, it will represent either the inputs or the outputs of
+ the `Component`.
+
+ Usage:
+ ```python
+ from haystack.components.builders.prompt_builder import PromptBuilder
+
+ prompt_template = \"""
+ Given these documents, answer the question.\nDocuments:
+ {% for doc in documents %}
+ {{ doc.content }}
+ {% endfor %}
+
+ \nQuestion: {{question}}
+ \nAnswer:
+ \"""
-class _empty:
- """Custom object for marking InputSocket.default_value as not set."""
+ prompt_builder = PromptBuilder(template=prompt_template)
+ sockets = {"question": InputSocket("question", Any), "documents": InputSocket("documents", Any)}
+ inputs = Sockets(component=prompt_builder, sockets=sockets, sockets_type=InputSocket)
+ inputs
+ >>> PromptBuilder inputs:
+ >>> - question: Any
+ >>> - documents: Any
+ inputs.question
+ >>> InputSocket(name='question', type=typing.Any, default_value=<class 'haystack.core.component.types._empty'>, is_variadic=False, senders=[])
+ ```
+ """
-@dataclass
-class InputSocket:
- name: str
- type: Type
- default_value: Any = _empty
- is_variadic: bool = field(init=False)
- senders: List[str] = field(default_factory=list)
+ # We're using a forward declaration here to avoid a circular import.
+ def __init__(
+ self,
+ component: "Component", # type: ignore[name-defined] # noqa: F821
+ sockets_dict: SocketsDict,
+ sockets_io_type: SocketsIOType,
+ ):
+ """
+ Create a new Sockets object.
+ We don't do any enforcement on the types of the sockets here, the `sockets_type` is only used for
+ the `__repr__` method.
+ We could do without it and use the type of a random value in the `sockets` dict, but that wouldn't
+ work for components that have no sockets at all. Either input or output.
+ """
+ self._sockets_io_type = sockets_io_type
+ self._component = component
+ self._sockets_dict = sockets_dict
+ self.__dict__.update(sockets_dict)
- @property
- def is_mandatory(self):
- return self.default_value == _empty
+ def __setitem__(self, key: str, socket: Union[InputSocket, OutputSocket]):
+ """
+ Adds a new socket to this Sockets object.
+ This eases a bit updating the list of sockets after Sockets has been created.
+ That should happen only in the `component` decorator.
+ """
+ self._sockets_dict[key] = socket
+ self.__dict__[key] = socket
- def __post_init__(self):
+ def _component_name(self) -> str:
+ if pipeline := getattr(self._component, "__haystack_added_to_pipeline__"):
+ # This Component has been added in a Pipeline, let's get the name from there.
+ return pipeline.get_component_name(self._component)
+
+ # This Component has not been added to a Pipeline yet, so we can't know its name.
+ # Let's use the class name instead.
+ return str(self._component)
+
+ def __getattribute__(self, name):
try:
- # __metadata__ is a tuple
- self.is_variadic = self.type.__metadata__[0] == HAYSTACK_VARIADIC_ANNOTATION
+ sockets = object.__getattribute__(self, "_sockets")
+ if name in sockets:
+ return sockets[name]
except AttributeError:
- self.is_variadic = False
- if self.is_variadic:
- # We need to "unpack" the type inside the Variadic annotation,
- # otherwise the pipeline connection api will try to match
- # `Annotated[type, CANALS_VARIADIC_ANNOTATION]`.
- #
- # Note1: Variadic is expressed as an annotation of one single type,
- # so the return value of get_args will always be a one-item tuple.
- #
- # Note2: a pipeline always passes a list of items when a component
- # input is declared as Variadic, so the type itself always wraps
- # an iterable of the declared type. For example, Variadic[int]
- # is eventually an alias for Iterable[int]. Since we're interested
- # in getting the inner type `int`, we call `get_args` twice: the
- # first time to get `List[int]` out of `Variadic`, the second time
- # to get `int` out of `List[int]`.
- self.type = get_args(get_args(self.type)[0])[0]
-
-
-@dataclass
-class OutputSocket:
- name: str
- type: type
- receivers: List[str] = field(default_factory=list)
+ pass
+
+ return object.__getattribute__(self, name)
+
+ def __repr__(self) -> str:
+ result = self._component_name()
+ if self._sockets_io_type == InputSocket:
+ result += " inputs:\n"
+ elif self._sockets_io_type == OutputSocket:
+ result += " outputs:\n"
+
+ result += "\n".join([f" - {n}: {_type_name(s.type)}" for n, s in self._sockets_dict.items()])
+
+ return result
diff --git a/haystack/core/component/types.py b/haystack/core/component/types.py
index 2527879022..663bccf4fd 100644
--- a/haystack/core/component/types.py
+++ b/haystack/core/component/types.py
@@ -1,4 +1,5 @@
-from typing import Iterable, TypeVar
+from dataclasses import dataclass, field
+from typing import Any, Iterable, List, Type, TypeVar, get_args
from typing_extensions import Annotated, TypeAlias # Python 3.8 compatibility
@@ -13,3 +14,50 @@
# type so it can be used in the `InputSocket` creation where we
# check that its annotation equals to CANALS_VARIADIC_ANNOTATION
Variadic: TypeAlias = Annotated[Iterable[T], HAYSTACK_VARIADIC_ANNOTATION]
+
+
+class _empty:
+ """Custom object for marking InputSocket.default_value as not set."""
+
+
+@dataclass
+class InputSocket:
+ name: str
+ type: Type
+ default_value: Any = _empty
+ is_variadic: bool = field(init=False)
+ senders: List[str] = field(default_factory=list)
+
+ @property
+ def is_mandatory(self):
+ return self.default_value == _empty
+
+ def __post_init__(self):
+ try:
+ # __metadata__ is a tuple
+ self.is_variadic = self.type.__metadata__[0] == HAYSTACK_VARIADIC_ANNOTATION
+ except AttributeError:
+ self.is_variadic = False
+ if self.is_variadic:
+ # We need to "unpack" the type inside the Variadic annotation,
+ # otherwise the pipeline connection api will try to match
+ # `Annotated[type, HAYSTACK_VARIADIC_ANNOTATION]`.
+ #
+ # Note1: Variadic is expressed as an annotation of one single type,
+ # so the return value of get_args will always be a one-item tuple.
+ #
+ # Note2: a pipeline always passes a list of items when a component
+ # input is declared as Variadic, so the type itself always wraps
+ # an iterable of the declared type. For example, Variadic[int]
+ # is eventually an alias for Iterable[int]. Since we're interested
+ # in getting the inner type `int`, we call `get_args` twice: the
+ # first time to get `List[int]` out of `Variadic`, the second time
+ # to get `int` out of `List[int]`.
+ self.type = get_args(get_args(self.type)[0])[0]
+
+
+@dataclass
+class OutputSocket:
+ name: str
+ type: type
+ receivers: List[str] = field(default_factory=list)
diff --git a/haystack/core/pipeline/descriptions.py b/haystack/core/pipeline/descriptions.py
index 406bf19677..0e7c042091 100644
--- a/haystack/core/pipeline/descriptions.py
+++ b/haystack/core/pipeline/descriptions.py
@@ -1,14 +1,13 @@
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
#
# SPDX-License-Identifier: Apache-2.0
-from typing import List, Dict
import logging
+from typing import Dict, List
import networkx # type:ignore
+from haystack.core.component.types import InputSocket, OutputSocket
from haystack.core.type_utils import _type_name
-from haystack.core.component.sockets import InputSocket, OutputSocket
-
logger = logging.getLogger(__name__)
diff --git a/haystack/core/pipeline/pipeline.py b/haystack/core/pipeline/pipeline.py
index eeecdcea49..a435f52029 100644
--- a/haystack/core/pipeline/pipeline.py
+++ b/haystack/core/pipeline/pipeline.py
@@ -197,16 +197,17 @@ def add_component(self, name: str, instance: Component) -> None:
)
raise PipelineError(msg)
- # Create the component's input and output sockets
- input_sockets = getattr(instance, "__haystack_input__", {})
- output_sockets = getattr(instance, "__haystack_output__", {})
-
setattr(instance, "__haystack_added_to_pipeline__", self)
# Add component to the graph, disconnected
logger.debug("Adding component '%s' (%s)", name, instance)
+ # We're completely sure the fields exist so we ignore the type error
self.graph.add_node(
- name, instance=instance, input_sockets=input_sockets, output_sockets=output_sockets, visits=0
+ name,
+ instance=instance,
+ input_sockets=instance.__haystack_input__._sockets_dict, # type: ignore[attr-defined]
+ output_sockets=instance.__haystack_output__._sockets_dict, # type: ignore[attr-defined]
+ visits=0,
)
def connect(self, connect_from: str, connect_to: str) -> None:
@@ -381,6 +382,16 @@ def get_component(self, name: str) -> Component:
except KeyError as exc:
raise ValueError(f"Component named {name} not found in the pipeline.") from exc
+ def get_component_name(self, instance: Component) -> str:
+ """
+ Returns the name of a Component instance. If the Component has not been added to this Pipeline,
+ returns an empty string.
+ """
+ for name, inst in self.graph.nodes(data="instance"):
+ if inst == instance:
+ return name
+ return ""
+
def inputs(self) -> Dict[str, Dict[str, Any]]:
"""
Returns a dictionary containing the inputs of a pipeline. Each key in the dictionary
@@ -465,16 +476,16 @@ def _validate_input(self, data: Dict[str, Any]):
if component_name not in self.graph.nodes:
raise ValueError(f"Component named {component_name} not found in the pipeline.")
instance = self.graph.nodes[component_name]["instance"]
- for socket_name, socket in instance.__haystack_input__.items():
+ for socket_name, socket in instance.__haystack_input__._sockets_dict.items():
if socket.senders == [] and socket.is_mandatory and socket_name not in component_inputs:
raise ValueError(f"Missing input for component {component_name}: {socket_name}")
for input_name in component_inputs.keys():
- if input_name not in instance.__haystack_input__:
+ if input_name not in instance.__haystack_input__._sockets_dict:
raise ValueError(f"Input {input_name} not found in component {component_name}.")
for component_name in self.graph.nodes:
instance = self.graph.nodes[component_name]["instance"]
- for socket_name, socket in instance.__haystack_input__.items():
+ for socket_name, socket in instance.__haystack_input__._sockets_dict.items():
component_inputs = data.get(component_name, {})
if socket.senders == [] and socket.is_mandatory and socket_name not in component_inputs:
raise ValueError(f"Missing input for component {component_name}: {socket_name}")
@@ -518,7 +529,7 @@ def run( # noqa: C901, PLR0912 pylint: disable=too-many-branches
for component_input, input_value in component_inputs.items():
# Handle mutable input data
data[component_name][component_input] = copy(input_value)
- if instance.__haystack_input__[component_input].is_variadic:
+ if instance.__haystack_input__._sockets_dict[component_input].is_variadic:
# Components that have variadic inputs need to receive lists as input.
# We don't want to force the user to always pass lists, so we convert single values to lists here.
# If it's already a list we assume the component takes a variadic input of lists, so we
@@ -533,12 +544,12 @@ def run( # noqa: C901, PLR0912 pylint: disable=too-many-branches
for node_name in self.graph.nodes:
component = self.graph.nodes[node_name]["instance"]
- if len(component.__haystack_input__) == 0:
+ if len(component.__haystack_input__._sockets_dict) == 0:
# Component has no input, can run right away
to_run.append((node_name, component))
continue
- for socket in component.__haystack_input__.values():
+ for socket in component.__haystack_input__._sockets_dict.values():
if not socket.senders or socket.is_variadic:
# Component has at least one input not connected or is variadic, can run right away.
to_run.append((node_name, component))
@@ -561,12 +572,12 @@ def run( # noqa: C901, PLR0912 pylint: disable=too-many-branches
while len(to_run) > 0:
name, comp = to_run.pop(0)
- if any(socket.is_variadic for socket in comp.__haystack_input__.values()) and not getattr( # type: ignore
+ if any(socket.is_variadic for socket in comp.__haystack_input__._sockets_dict.values()) and not getattr( # type: ignore
comp, "is_greedy", False
):
there_are_non_variadics = False
for _, other_comp in to_run:
- if not any(socket.is_variadic for socket in other_comp.__haystack_input__.values()): # type: ignore
+ if not any(socket.is_variadic for socket in other_comp.__haystack_input__._sockets_dict.values()): # type: ignore
there_are_non_variadics = True
break
@@ -575,7 +586,7 @@ def run( # noqa: C901, PLR0912 pylint: disable=too-many-branches
waiting_for_input.append((name, comp))
continue
- if name in last_inputs and len(comp.__haystack_input__) == len(last_inputs[name]): # type: ignore
+ if name in last_inputs and len(comp.__haystack_input__._sockets_dict) == len(last_inputs[name]): # type: ignore
# This component has all the inputs it needs to run
res = comp.run(**last_inputs[name])
@@ -649,7 +660,7 @@ def run( # noqa: C901, PLR0912 pylint: disable=too-many-branches
# This is our last resort, if there's no lazy variadic waiting for input
# we're stuck for real and we can't make any progress.
for name, comp in waiting_for_input:
- is_variadic = any(socket.is_variadic for socket in comp.__haystack_input__.values()) # type: ignore
+ is_variadic = any(socket.is_variadic for socket in comp.__haystack_input__._sockets_dict.values()) # type: ignore
if is_variadic and not getattr(comp, "is_greedy", False):
break
else:
@@ -680,14 +691,14 @@ def run( # noqa: C901, PLR0912 pylint: disable=too-many-branches
last_inputs[name] = {}
# Lazy variadics must be removed only if there's nothing else to run at this stage
- is_variadic = any(socket.is_variadic for socket in comp.__haystack_input__.values()) # type: ignore
+ is_variadic = any(socket.is_variadic for socket in comp.__haystack_input__._sockets_dict.values()) # type: ignore
if is_variadic and not getattr(comp, "is_greedy", False):
there_are_only_lazy_variadics = True
for other_name, other_comp in waiting_for_input:
if name == other_name:
continue
there_are_only_lazy_variadics &= any(
- socket.is_variadic for socket in other_comp.__haystack_input__.values() # type: ignore
+ socket.is_variadic for socket in other_comp.__haystack_input__._sockets_dict.values() # type: ignore
) and not getattr(other_comp, "is_greedy", False)
if not there_are_only_lazy_variadics:
@@ -695,7 +706,7 @@ def run( # noqa: C901, PLR0912 pylint: disable=too-many-branches
# Find the first component that has all the inputs it needs to run
has_enough_inputs = True
- for input_socket in comp.__haystack_input__.values(): # type: ignore
+ for input_socket in comp.__haystack_input__._sockets_dict.values(): # type: ignore
if input_socket.is_mandatory and input_socket.name not in last_inputs[name]:
has_enough_inputs = False
break
| diff --git a/haystack/testing/sample_components/repeat.py b/haystack/testing/sample_components/repeat.py
index 73e25097a0..a1f6287982 100644
--- a/haystack/testing/sample_components/repeat.py
+++ b/haystack/testing/sample_components/repeat.py
@@ -9,11 +9,11 @@
@component
class Repeat:
def __init__(self, outputs: List[str]):
- self.outputs = outputs
+ self._outputs = outputs
component.set_output_types(self, **{k: int for k in outputs})
def run(self, value: int):
"""
:param value: the value to repeat.
"""
- return {val: value for val in self.outputs}
+ return {val: value for val in self._outputs}
diff --git a/test/components/builders/test_dynamic_chat_prompt_builder.py b/test/components/builders/test_dynamic_chat_prompt_builder.py
index 67183d0280..d75e0051cf 100644
--- a/test/components/builders/test_dynamic_chat_prompt_builder.py
+++ b/test/components/builders/test_dynamic_chat_prompt_builder.py
@@ -15,16 +15,16 @@ def test_initialization(self):
# we have inputs that contain: prompt_source, template_variables + runtime_variables
expected_keys = set(runtime_variables + ["prompt_source", "template_variables"])
- assert set(builder.__haystack_input__.keys()) == expected_keys
+ assert set(builder.__haystack_input__._sockets_dict.keys()) == expected_keys
# response is always prompt regardless of chat mode
- assert set(builder.__haystack_output__.keys()) == {"prompt"}
+ assert set(builder.__haystack_output__._sockets_dict.keys()) == {"prompt"}
# prompt_source is a list of ChatMessage
- assert builder.__haystack_input__["prompt_source"].type == List[ChatMessage]
+ assert builder.__haystack_input__._sockets_dict["prompt_source"].type == List[ChatMessage]
# output is always prompt, but the type is different depending on the chat mode
- assert builder.__haystack_output__["prompt"].type == List[ChatMessage]
+ assert builder.__haystack_output__._sockets_dict["prompt"].type == List[ChatMessage]
def test_non_empty_chat_messages(self):
prompt_builder = DynamicChatPromptBuilder(runtime_variables=["documents"])
diff --git a/test/components/builders/test_dynamic_prompt_builder.py b/test/components/builders/test_dynamic_prompt_builder.py
index c3508b255e..7afacd2ca1 100644
--- a/test/components/builders/test_dynamic_prompt_builder.py
+++ b/test/components/builders/test_dynamic_prompt_builder.py
@@ -16,16 +16,16 @@ def test_initialization(self):
# regardless of the chat mode
# we have inputs that contain: prompt_source, template_variables + runtime_variables
expected_keys = set(runtime_variables + ["prompt_source", "template_variables"])
- assert set(builder.__haystack_input__.keys()) == expected_keys
+ assert set(builder.__haystack_input__._sockets_dict.keys()) == expected_keys
# response is always prompt regardless of chat mode
- assert set(builder.__haystack_output__.keys()) == {"prompt"}
+ assert set(builder.__haystack_output__._sockets_dict.keys()) == {"prompt"}
# prompt_source is a list of ChatMessage or a string
- assert builder.__haystack_input__["prompt_source"].type == str
+ assert builder.__haystack_input__._sockets_dict["prompt_source"].type == str
# output is always prompt, but the type is different depending on the chat mode
- assert builder.__haystack_output__["prompt"].type == str
+ assert builder.__haystack_output__._sockets_dict["prompt"].type == str
def test_processing_a_simple_template_with_provided_variables(self):
runtime_variables = ["var1", "var2", "var3"]
diff --git a/test/components/routers/test_conditional_router.py b/test/components/routers/test_conditional_router.py
index 726d7cd4cf..dcb52cff14 100644
--- a/test/components/routers/test_conditional_router.py
+++ b/test/components/routers/test_conditional_router.py
@@ -86,8 +86,8 @@ def test_router_initialized(self, routes):
router = ConditionalRouter(routes)
assert router.routes == routes
- assert set(router.__haystack_input__.keys()) == {"query", "streams"}
- assert set(router.__haystack_output__.keys()) == {"query", "streams"}
+ assert set(router.__haystack_input__._sockets_dict.keys()) == {"query", "streams"}
+ assert set(router.__haystack_output__._sockets_dict.keys()) == {"query", "streams"}
def test_router_evaluate_condition_expressions(self, router):
# first route should be selected
diff --git a/test/core/component/test_component.py b/test/core/component/test_component.py
index a60ea6baea..bbe2605f03 100644
--- a/test/core/component/test_component.py
+++ b/test/core/component/test_component.py
@@ -1,5 +1,4 @@
-import typing
-from typing import Any, Optional
+from typing import Any
import pytest
@@ -89,6 +88,7 @@ def another_method(self, input_value: int):
def test_set_input_types():
+ @component
class MockComponent:
def __init__(self):
component.set_input_types(self, value=Any)
@@ -105,7 +105,7 @@ def run(self, **kwargs):
return {"value": 1}
comp = MockComponent()
- assert comp.__haystack_input__ == {"value": InputSocket("value", Any)}
+ assert comp.__haystack_input__._sockets_dict == {"value": InputSocket("value", Any)}
assert comp.run() == {"value": 1}
@@ -126,7 +126,7 @@ def run(self, value: int):
return {"value": 1}
comp = MockComponent()
- assert comp.__haystack_output__ == {"value": OutputSocket("value", int)}
+ assert comp.__haystack_output__._sockets_dict == {"value": OutputSocket("value", int)}
def test_output_types_decorator_with_compatible_type():
@@ -144,7 +144,7 @@ def from_dict(cls, data):
return cls()
comp = MockComponent()
- assert comp.__haystack_output__ == {"value": OutputSocket("value", int)}
+ assert comp.__haystack_output__._sockets_dict == {"value": OutputSocket("value", int)}
def test_component_decorator_set_it_as_component():
@@ -173,8 +173,8 @@ def run(self, value: int = 42):
return {"value": value}
comp = MockComponent()
- assert comp.__haystack_input__["value"].default_value == 42
- assert not comp.__haystack_input__["value"].is_mandatory
+ assert comp.__haystack_input__._sockets_dict["value"].default_value == 42
+ assert not comp.__haystack_input__._sockets_dict["value"].is_mandatory
def test_keyword_only_args():
@@ -187,5 +187,5 @@ def run(self, *, arg: int):
return {"value": arg}
comp = MockComponent()
- component_inputs = {name: {"type": socket.type} for name, socket in comp.__haystack_input__.items()}
+ component_inputs = {name: {"type": socket.type} for name, socket in comp.__haystack_input__._sockets_dict.items()}
assert component_inputs == {"arg": {"type": int}}
diff --git a/test/core/component/test_sockets.py b/test/core/component/test_sockets.py
new file mode 100644
index 0000000000..ac3b01bda8
--- /dev/null
+++ b/test/core/component/test_sockets.py
@@ -0,0 +1,57 @@
+import pytest
+
+from haystack.core.component.sockets import InputSocket, Sockets
+from haystack.core.pipeline import Pipeline
+from haystack.testing.factory import component_class
+
+
+class TestSockets:
+ def test_init(self):
+ comp = component_class("SomeComponent", input_types={"input_1": int, "input_2": int})()
+ sockets = {"input_1": InputSocket("input_1", int), "input_2": InputSocket("input_2", int)}
+ io = Sockets(component=comp, sockets_dict=sockets, sockets_io_type=InputSocket)
+ assert io._component == comp
+ assert "input_1" in io.__dict__
+ assert io.__dict__["input_1"] == comp.__haystack_input__._sockets_dict["input_1"]
+ assert "input_2" in io.__dict__
+ assert io.__dict__["input_2"] == comp.__haystack_input__._sockets_dict["input_2"]
+
+ def test_init_with_empty_sockets(self):
+ comp = component_class("SomeComponent")()
+ io = Sockets(component=comp, sockets_dict={}, sockets_io_type=InputSocket)
+
+ assert io._component == comp
+ assert io._sockets_dict == {}
+
+ def test_component_name(self):
+ comp = component_class("SomeComponent")()
+ io = Sockets(component=comp, sockets_dict={}, sockets_io_type=InputSocket)
+ assert io._component_name() == str(comp)
+
+ def test_component_name_added_to_pipeline(self):
+ comp = component_class("SomeComponent")()
+ pipeline = Pipeline()
+ pipeline.add_component("my_component", comp)
+
+ io = Sockets(component=comp, sockets_dict={}, sockets_io_type=InputSocket)
+ assert io._component_name() == "my_component"
+
+ def test_getattribute(self):
+ comp = component_class("SomeComponent", input_types={"input_1": int, "input_2": int})()
+ io = Sockets(component=comp, sockets_dict=comp.__haystack_input__._sockets_dict, sockets_io_type=InputSocket)
+
+ assert io.input_1 == comp.__haystack_input__._sockets_dict["input_1"]
+ assert io.input_2 == comp.__haystack_input__._sockets_dict["input_2"]
+
+ def test_getattribute_non_existing_socket(self):
+ comp = component_class("SomeComponent", input_types={"input_1": int, "input_2": int})()
+ io = Sockets(component=comp, sockets_dict=comp.__haystack_input__._sockets_dict, sockets_io_type=InputSocket)
+
+ with pytest.raises(AttributeError):
+ io.input_3
+
+ def test_repr(self):
+ comp = component_class("SomeComponent", input_types={"input_1": int, "input_2": int})()
+ io = Sockets(component=comp, sockets_dict=comp.__haystack_input__._sockets_dict, sockets_io_type=InputSocket)
+ res = repr(io)
+ assert res == f"{comp} inputs:\n - input_1: int\n - input_2: int"
diff --git a/test/core/pipeline/test_pipeline.py b/test/core/pipeline/test_pipeline.py
index 5930f5e570..45f1c883da 100644
--- a/test/core/pipeline/test_pipeline.py
+++ b/test/core/pipeline/test_pipeline.py
@@ -6,7 +6,7 @@
import pytest
-from haystack.core.component.sockets import InputSocket, OutputSocket
+from haystack.core.component.types import InputSocket, OutputSocket
from haystack.core.errors import PipelineError, PipelineRuntimeError
from haystack.core.pipeline import Pipeline
from haystack.testing.factory import component_class
@@ -28,6 +28,21 @@ def test_add_component_to_different_pipelines():
second_pipe.add_component("some", some_component)
+def test_get_component_name():
+ pipe = Pipeline()
+ some_component = component_class("Some")()
+ pipe.add_component("some", some_component)
+
+ assert pipe.get_component_name(some_component) == "some"
+
+
+def test_get_component_name_not_added_to_pipeline():
+ pipe = Pipeline()
+ some_component = component_class("Some")()
+
+ assert pipe.get_component_name(some_component) == ""
+
+
def test_run_with_component_that_does_not_return_dict():
BrokenComponent = component_class(
"BrokenComponent", input_types={"a": int}, output_types={"b": int}, output=1 # type:ignore
diff --git a/test/core/pipeline/test_validation_pipeline_io.py b/test/core/pipeline/test_validation_pipeline_io.py
index 47fb4c592d..f9160799fe 100644
--- a/test/core/pipeline/test_validation_pipeline_io.py
+++ b/test/core/pipeline/test_validation_pipeline_io.py
@@ -5,8 +5,7 @@
import pytest
-from haystack.core.component.sockets import InputSocket, OutputSocket
-from haystack.core.component.types import Variadic
+from haystack.core.component.types import InputSocket, OutputSocket, Variadic
from haystack.core.errors import PipelineValidationError
from haystack.core.pipeline import Pipeline
from haystack.core.pipeline.descriptions import find_pipeline_inputs, find_pipeline_outputs
| [
{
"components": [
{
"doc": " This class is used to represent the inputs or outputs of a `Component`.\n Depending on the type passed to the constructor, it will represent either the inputs or the outputs of\n the `Component`.\n\n Usage:\n ```python\n from haystack.components.build... | [
"test/components/builders/test_dynamic_chat_prompt_builder.py::TestDynamicChatPromptBuilder::test_initialization",
"test/components/builders/test_dynamic_chat_prompt_builder.py::TestDynamicChatPromptBuilder::test_non_empty_chat_messages",
"test/components/builders/test_dynamic_chat_prompt_builder.py::TestDynami... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat: Change Component's I/O dunder type
### Proposed Changes:
Change `__haystack_input__` and `__haystack_output__` type from `Dict` to dedicated `Sockets` class.
### How did you test it?
Added new tests.
### Notes for the reviewer
N/A
### Checklist
- I have read the [contributors guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md) and the [code of conduct](https://github.com/deepset-ai/haystack/blob/main/code_of_conduct.txt)
- I have updated the related issue with new insights and changes
- I added unit tests and updated the docstrings
- I've used one of the [conventional commit types](https://www.conventionalcommits.org/en/v1.0.0/) for my PR title: `fix:`, `feat:`, `build:`, `chore:`, `ci:`, `docs:`, `style:`, `refactor:`, `perf:`, `test:`.
- I documented my code
- I ran [pre-commit hooks](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md#installation) and fixed any issue
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/core/component/sockets.py]
(definition of Sockets:)
class Sockets:
""" This class is used to represent the inputs or outputs of a `Component`.
Depending on the type passed to the constructor, it will represent either the inputs or the outputs of
the `Component`.
Usage:
```python
from haystack.components.builders.prompt_builder import PromptBuilder
prompt_template = """
Given these documents, answer the question.
Documents:
{% for doc in documents %}
{{ doc.content }}
{% endfor %}
Question: {{question}}
Answer:
"""
prompt_builder = PromptBuilder(template=prompt_template)
sockets = {"question": InputSocket("question", Any), "documents": InputSocket("documents", Any)}
inputs = Sockets(component=prompt_builder, sockets=sockets, sockets_type=InputSocket)
inputs
>>> PromptBuilder inputs:
>>> - question: Any
>>> - documents: Any
inputs.question
>>> InputSocket(name='question', type=typing.Any, default_value=<class 'haystack.core.component.types._empty'>, is_variadic=False, senders=[])
```
"""
(definition of Sockets.__init__:)
def __init__( self, component: "Component",
"""Create a new Sockets object.
We don't do any enforcement on the types of the sockets here, the `sockets_type` is only used for
the `__repr__` method.
We could do without it and use the type of a random value in the `sockets` dict, but that wouldn't
work for components that have no sockets at all. Either input or output."""
(definition of Sockets.__setitem__:)
def __setitem__(self, key: str, socket: Union[InputSocket, OutputSocket]):
"""Adds a new socket to this Sockets object.
This eases a bit updating the list of sockets after Sockets has been created.
That should happen only in the `component` decorator."""
(definition of Sockets._component_name:)
def _component_name(self) -> str:
(definition of Sockets.__getattribute__:)
def __getattribute__(self, name):
(definition of Sockets.__repr__:)
def __repr__(self) -> str:
[end of new definitions in haystack/core/component/sockets.py]
[start of new definitions in haystack/core/component/types.py]
(definition of _empty:)
class _empty:
"""Custom object for marking InputSocket.default_value as not set."""
(definition of InputSocket:)
class InputSocket:
(definition of InputSocket.is_mandatory:)
def is_mandatory(self):
(definition of InputSocket.__post_init__:)
def __post_init__(self):
(definition of OutputSocket:)
class OutputSocket:
[end of new definitions in haystack/core/component/types.py]
[start of new definitions in haystack/core/pipeline/pipeline.py]
(definition of Pipeline.get_component_name:)
def get_component_name(self, instance: Component) -> str:
"""Returns the name of a Component instance. If the Component has not been added to this Pipeline,
returns an empty string."""
[end of new definitions in haystack/core/pipeline/pipeline.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 | ||
joke2k__faker-1986 | 1,986 | joke2k/faker | null | 24134d754fac1469b25c0fdbc399e9fd39fe0dbd | 2024-02-04T18:33:05Z | diff --git a/faker/providers/automotive/uk_UA/__init__.py b/faker/providers/automotive/uk_UA/__init__.py
new file mode 100644
index 0000000000..55544bd078
--- /dev/null
+++ b/faker/providers/automotive/uk_UA/__init__.py
@@ -0,0 +1,153 @@
+import random
+from typing import Optional
+
+from .. import Provider as AutomotiveProvider
+
+
+class Provider(AutomotiveProvider):
+ plate_number_formats = ("####",)
+
+ license_region_data = {
+ "Crimea": (("AK", "KK", "TK", "MK"), "01"),
+ "Kyiv": (("AA", "KA", "TT", "TA"), "11"),
+ "Vinnytsia": (("AB", "KB", "MM", "OK"), "02"),
+ "Volyn": (("AC", "KC", "SM", "TS"), "03"),
+ "Dnipro": (("AE", "KE", "RR", "MI"), "04"),
+ "Donetsk": (("AN", "KH", "TM", "MH"), "05"),
+ "Kyiv_reg": (("AI", "KI", "TI", "ME"), "10"),
+ "Zhytomyr": (("AM", "KM", "TM", "MV"), "06"),
+ "Zakarpattia": (("AO", "KO", "MT", "MO"), "07"),
+ "Zaporizhia": (("AR", "KR", "TR", "MR"), "08"),
+ "IvanoFrankivsk": (("AT", "KT", "TO", "XS"), "09"),
+ "Kirovohrad": (("BA", "NA", "XA", "EA"), "12"),
+ "Luhansk": (("BB", "NV", "EE", "EV"), "13"),
+ "Lviv": (("BS", "NS", "SS", "ES"), "14"),
+ "Mykolaiv": (("BE", "NE", "XE", "XN"), "15"),
+ "Odessa": (("BN", "NN", "OO", "EN"), "16"),
+ "Poltava": (("BI", "NI", "XI", "EI"), "17"),
+ "Rivne": (("BK", "NK", "XK", "EK"), "18"),
+ "Sumy": (("BM", "NM", "XM", "EM"), "19"),
+ "Ternopil": (("BO", "NO", "XO", "EO"), "20"),
+ "Kharkiv": (("AX", "KX", "XX", "EX"), "21"),
+ "Kherson": (("BT", "NT", "XT", "ET"), "22"),
+ "Khmelnytsky": (("BX", "NX", "OX", "RX"), "23"),
+ "Cherkasy": (("SA", "IA", "OA", "RA"), "24"),
+ "Chernihiv": (("SV", "IV", "OV", "RV"), "25"),
+ "Chernivtsi": (("SE", "IE", "OE", "RE"), "26"),
+ "Sevastopol": (("SN", "IN", "ON", "RN"), "27"),
+ "Nationwide": (("II", "ED", "DC", "DI", "PD"), "00")
+ }
+
+ license_plate_suffix = (
+ "AA", "BA", "CA", "EA", "HA", "IA", "KA", "MA", "OA", "PA", "TA", "XA",
+ "AB", "BB", "CB", "EB", "HB", "IB", "KB", "MB", "OB", "PB", "TB", "XB",
+ "AC", "BC", "BR", "EC", "HC", "IC", "KC", "MC", "OC", "PC", "TC", "XC",
+ "AE", "BE", "CE", "EE", "HE", "IE", "KE", "ME", "OE", "PE", "TE", "XE",
+ "AN", "BN", "CN", "EN", "HN", "IN", "KN", "MK", "ON", "PN", "TN", "XN",
+ "AI", "BI", "CI", "EI", "HI", "II", "KI", "MI", "OI", "PI", "TI", "XI",
+ "AK", "BK", "CK", "EK", "HK", "IK", "KK", "MK", "OK", "PK", "TK", "XK",
+ "AM", "BM", "CM", "EM", "HM", "IM", "KM", "MM", "OM", "PM", "TM", "XM",
+ "AO", "BO", "CO", "EO", "HO", "IO", "KO", "MO", "OO", "PO", "TO", "XO",
+ "AP", "BP", "CP", "EP", "HP", "IP", "KP", "MP", "OP", "PP", "TP", "XP",
+ "AT", "BT", "CT", "ET", "HT", "IT", "KT", "MT", "OT", "PT", "TT", "XT",
+ "AX", "BX", "CX", "EX", "HX", "IX", "KX", "MX", "OX", "PX", "TX", "XX",
+ "AY", "AZ", "BH", "BL", "BN", "BQ", "BR", "TU", "TV", "TY", "TZ"
+ )
+
+ vehicle_categories = (
+ "A1", "A", "B1", "B",
+ "C1", "C", "D1", "D",
+ "BE", "C1E", "CE", "D1E",
+ "DE", "T"
+ )
+
+ def __get_random_region_code(self, region_name: Optional[str] = None) -> (str, str):
+ try:
+ if region_name is None:
+ region_name, data = random.choice(list(self.license_region_data.items()))
+
+ prefix, region_number = self.license_region_data[region_name]
+ return random.choice(prefix), region_number
+ except KeyError:
+ region_names = ", ".join(self.license_region_data.keys())
+ raise KeyError(f'Keys name must be only {region_names}')
+
+ def license_plate(self, region_name: Optional[str] = None, temporary_plate: bool = False) -> str:
+ """Generate a license plate.
+
+ - If ``region_name`` is ``None`` (default), its value will be set to a random.
+ - If ``region_name`` is ``Kyiv``, will use this region in build of license plates.
+ - If ``temporary_plate`` is ``False`` (default), generate license plate AA0000AA format
+ - If ``temporary_plate`` is ``True``, generate temporary plate format 01 AA0000
+ 01 - 27 it's region number
+
+ :sample:
+ :sample: region_name=None, temporary_plate=False
+ :sample: region_name=None, temporary_plate=True
+ :sample: region_name="Kyiv", temporary_plate=False
+ :sample: region_name="Kyiv", temporary_plate=True
+ """
+ region, region_number = self.__get_random_region_code(region_name)
+ if temporary_plate:
+ return f"{region_number} {region}{self.plate_number()}"
+
+ number = self.plate_number()
+ series = self.plate_letter_suffix()
+ return f"{region}{number}{series}"
+
+ def plate_region_code(self, region_name: Optional[str] = None) -> str:
+ """
+ Generate plate region number
+
+ :sample:
+ :sample: region_name="Kyiv"
+ """
+ _, region_number = self.__get_random_region_code(region_name)
+ return region_number
+
+ def plate_letter_prefix(self, region_name: Optional[str] = None) -> str:
+ """
+ Generate a letter for license plates.
+
+ :sample:
+ :sample: region_name="Kyiv"
+ """
+ letters, _ = self.__get_random_region_code(region_name)
+ return letters
+
+ def plate_letter_suffix(self) -> str:
+ """
+ Generate a end letter for license plates.
+
+ :sample:
+ """
+ return self.random_element(self.license_plate_suffix)
+
+ def plate_number(self) -> str:
+ """
+ Generate a number for license plates.
+
+ :sample:
+ """
+ return self.numerify(self.random_element(self.plate_number_formats))
+
+ def diplomatic_license_plate(self) -> str:
+ """
+ Example: 'CDP 000' or 'DP 000 000' or 'S 000 000' format
+
+ :sample:
+ """
+ level = random.choice(("CDP", "DP", "S"))
+ country_code = self.random_number(3, fix_len=True)
+ car_number = self.random_number(3, fix_len=True)
+ if level == 'CDP':
+ return f"{level} {country_code}"
+ return f"{level} {country_code} {car_number}"
+
+ def vehicle_category(self) -> str:
+ """
+ Generate a vehicle category code for license plates.
+
+ :sample:
+ """
+ return self.random_element(self.vehicle_categories)
| diff --git a/tests/providers/test_automotive.py b/tests/providers/test_automotive.py
index ad193bc1f1..d74a3ecc60 100644
--- a/tests/providers/test_automotive.py
+++ b/tests/providers/test_automotive.py
@@ -9,6 +9,7 @@
from faker.providers.automotive.ru_RU import Provider as RuRuAutomotiveProvider
from faker.providers.automotive.sk_SK import Provider as SkSkAutomotiveProvider
from faker.providers.automotive.tr_TR import Provider as TrTrAutomotiveProvider
+from faker.providers.automotive.uk_UA import Provider as UkUaAutomotiveProvider
class _SimpleAutomotiveTestMixin:
@@ -347,3 +348,35 @@ class TestZhTw(_SimpleAutomotiveTestMixin):
r"([A-Z]{3}-\d{4})|" # new format since 2014
r"([A-Z]{3}-\d{3})", # commercial cars since 2012
)
+
+
+class TestUkUa(_SimpleAutomotiveTestMixin):
+ license_plate_pattern: Pattern = re.compile(r"[A-Z]{2}\d{4}[A-Z]{2}")
+
+ def perform_extra_checks(self, license_plate, match):
+ assert license_plate[-2:] in UkUaAutomotiveProvider.license_plate_suffix
+
+ def test_temporary_plate(self, faker, num_samples):
+ pattern = r"\d{2} [A-Z]{2}\d{4}"
+
+ for _ in range(num_samples):
+ temporary = faker.license_plate(temporary_plate=True)
+ match = re.search(pattern, temporary)
+ assert match is not None
+
+ def test_diplomatic_plate(self, faker, num_samples):
+ pattern = r"(CDP \d{3})|(DP|S) \d{3} \d{3}"
+
+ for _ in range(num_samples):
+ temporary = faker.diplomatic_license_plate()
+ match = re.search(pattern, temporary)
+ assert match is not None
+
+ def test_prefix(self, faker):
+ for _ in range(10):
+ temporary = faker.plate_letter_prefix(region_name='Lviv')
+ assert len(temporary) == 2
+ assert temporary in UkUaAutomotiveProvider.license_region_data.get('Lviv')[0]
+
+ def test_region_code(self, faker):
+ assert "14" == faker.plate_region_code(region_name='Lviv')
| [
{
"components": [
{
"doc": "",
"lines": [
7,
153
],
"name": "Provider",
"signature": "class Provider(AutomotiveProvider):",
"type": "class"
},
{
"doc": "",
"lines": [
64,
73
],
... | [
"tests/providers/test_automotive.py::TestArBh::test_license_plate",
"tests/providers/test_automotive.py::TestArBh::test_vin",
"tests/providers/test_automotive.py::TestAzAz::test_license_plate",
"tests/providers/test_automotive.py::TestAzAz::test_vin",
"tests/providers/test_automotive.py::TestSkSk::test_lice... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat(uk_UA-automotive) Add uk_UA automotive provider
### What does this change
Add Uk_UA Automotive provider
### What was wrong
This provider was absent in uk_UA locale
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in faker/providers/automotive/uk_UA/__init__.py]
(definition of Provider:)
class Provider(AutomotiveProvider):
(definition of Provider.__get_random_region_code:)
def __get_random_region_code(self, region_name: Optional[str] = None) -> (str, str):
(definition of Provider.license_plate:)
def license_plate(self, region_name: Optional[str] = None, temporary_plate: bool = False) -> str:
"""Generate a license plate.
- If ``region_name`` is ``None`` (default), its value will be set to a random.
- If ``region_name`` is ``Kyiv``, will use this region in build of license plates.
- If ``temporary_plate`` is ``False`` (default), generate license plate AA0000AA format
- If ``temporary_plate`` is ``True``, generate temporary plate format 01 AA0000
01 - 27 it's region number
:sample:
:sample: region_name=None, temporary_plate=False
:sample: region_name=None, temporary_plate=True
:sample: region_name="Kyiv", temporary_plate=False
:sample: region_name="Kyiv", temporary_plate=True"""
(definition of Provider.plate_region_code:)
def plate_region_code(self, region_name: Optional[str] = None) -> str:
"""Generate plate region number
:sample:
:sample: region_name="Kyiv""""
(definition of Provider.plate_letter_prefix:)
def plate_letter_prefix(self, region_name: Optional[str] = None) -> str:
"""Generate a letter for license plates.
:sample:
:sample: region_name="Kyiv""""
(definition of Provider.plate_letter_suffix:)
def plate_letter_suffix(self) -> str:
"""Generate a end letter for license plates.
:sample:"""
(definition of Provider.plate_number:)
def plate_number(self) -> str:
"""Generate a number for license plates.
:sample:"""
(definition of Provider.diplomatic_license_plate:)
def diplomatic_license_plate(self) -> str:
"""Example: 'CDP 000' or 'DP 000 000' or 'S 000 000' format
:sample:"""
(definition of Provider.vehicle_category:)
def vehicle_category(self) -> str:
"""Generate a vehicle category code for license plates.
:sample:"""
[end of new definitions in faker/providers/automotive/uk_UA/__init__.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 6edfdbf6ae90b0153309e3bf066aa3b2d16494a7 | ||
roboflow__supervision-847 | 847 | roboflow/supervision | null | d423ff3b6c74f38713255f18e363ea2e1986f5dd | 2024-02-03T03:10:21Z | diff --git a/docs/detection/utils.md b/docs/detection/utils.md
index 54d1f279f..74adc6e74 100644
--- a/docs/detection/utils.md
+++ b/docs/detection/utils.md
@@ -11,10 +11,22 @@ comments: true
:::supervision.detection.utils.box_iou_batch
<div class="md-typeset">
- <h2>non_max_suppression</h2>
+ <h2>mask_iou_batch</h2>
</div>
-:::supervision.detection.utils.non_max_suppression
+:::supervision.detection.utils.mask_iou_batch
+
+<div class="md-typeset">
+ <h2>box_non_max_suppression</h2>
+</div>
+
+:::supervision.detection.utils.box_non_max_suppression
+
+<div class="md-typeset">
+ <h2>mask_non_max_suppression</h2>
+</div>
+
+:::supervision.detection.utils.mask_non_max_suppression
<div class="md-typeset">
<h2>polygon_to_mask</h2>
diff --git a/supervision/__init__.py b/supervision/__init__.py
index 76abe5996..961162836 100644
--- a/supervision/__init__.py
+++ b/supervision/__init__.py
@@ -42,12 +42,14 @@
from supervision.detection.tools.smoother import DetectionsSmoother
from supervision.detection.utils import (
box_iou_batch,
+ box_non_max_suppression,
calculate_masks_centroids,
filter_polygons_by_area,
+ mask_iou_batch,
+ mask_non_max_suppression,
mask_to_polygons,
mask_to_xyxy,
move_boxes,
- non_max_suppression,
polygon_to_mask,
polygon_to_xyxy,
scale_boxes,
diff --git a/supervision/detection/core.py b/supervision/detection/core.py
index 7b9249630..e727426bf 100644
--- a/supervision/detection/core.py
+++ b/supervision/detection/core.py
@@ -8,12 +8,13 @@
from supervision.config import CLASS_NAME_DATA_FIELD, ORIENTED_BOX_COORDINATES
from supervision.detection.utils import (
+ box_non_max_suppression,
calculate_masks_centroids,
extract_ultralytics_masks,
get_data_item,
is_data_equal,
+ mask_non_max_suppression,
merge_data,
- non_max_suppression,
process_roboflow_result,
validate_detections_fields,
xywh_to_xyxy,
@@ -1001,7 +1002,8 @@ def with_nms(
self, threshold: float = 0.5, class_agnostic: bool = False
) -> Detections:
"""
- Perform non-maximum suppression on the current set of object detections.
+ Performs non-max suppression on detection set. If the detections result
+ from a segmentation model, the IoU mask is applied. Otherwise, box IoU is used.
Args:
threshold (float, optional): The intersection-over-union threshold
@@ -1028,18 +1030,26 @@ def with_nms(
if class_agnostic:
predictions = np.hstack((self.xyxy, self.confidence.reshape(-1, 1)))
- indices = non_max_suppression(
- predictions=predictions, iou_threshold=threshold
+ else:
+ assert self.class_id is not None, (
+ "Detections class_id must be given for NMS to be executed. If you"
+ " intended to perform class agnostic NMS set class_agnostic=True."
+ )
+ predictions = np.hstack(
+ (
+ self.xyxy,
+ self.confidence.reshape(-1, 1),
+ self.class_id.reshape(-1, 1),
+ )
)
- return self[indices]
- assert self.class_id is not None, (
- "Detections class_id must be given for NMS to be executed. If you intended"
- " to perform class agnostic NMS set class_agnostic=True."
- )
+ if self.mask is not None:
+ indices = mask_non_max_suppression(
+ predictions=predictions, masks=self.mask, iou_threshold=threshold
+ )
+ else:
+ indices = box_non_max_suppression(
+ predictions=predictions, iou_threshold=threshold
+ )
- predictions = np.hstack(
- (self.xyxy, self.confidence.reshape(-1, 1), self.class_id.reshape(-1, 1))
- )
- indices = non_max_suppression(predictions=predictions, iou_threshold=threshold)
return self[indices]
diff --git a/supervision/detection/utils.py b/supervision/detection/utils.py
index e8db4627e..f411df288 100644
--- a/supervision/detection/utils.py
+++ b/supervision/detection/utils.py
@@ -59,7 +59,119 @@ def box_area(box):
return area_inter / (area_true[:, None] + area_detection - area_inter)
-def non_max_suppression(
+def mask_iou_batch(masks_true: np.ndarray, masks_detection: np.ndarray) -> np.ndarray:
+ """
+ Compute Intersection over Union (IoU) of two sets of masks -
+ `masks_true` and `masks_detection`.
+
+ Args:
+ masks_true (np.ndarray): 3D `np.ndarray` representing ground-truth masks.
+ masks_detection (np.ndarray): 3D `np.ndarray` representing detection masks.
+
+ Returns:
+ np.ndarray: Pairwise IoU of masks from `masks_true` and `masks_detection`.
+ """
+ intersection_area = np.logical_and(masks_true[:, None], masks_detection).sum(
+ axis=(2, 3)
+ )
+ masks_true_area = masks_true.sum(axis=(1, 2))
+ masks_detection_area = masks_detection.sum(axis=(1, 2))
+
+ union_area = masks_true_area[:, None] + masks_detection_area - intersection_area
+
+ return np.divide(
+ intersection_area,
+ union_area,
+ out=np.zeros_like(intersection_area, dtype=float),
+ where=union_area != 0,
+ )
+
+
+def resize_masks(masks: np.ndarray, max_dimension: int = 640) -> np.ndarray:
+ """
+ Resize all masks in the array to have a maximum dimension of max_dimension,
+ maintaining aspect ratio.
+
+ Args:
+ masks (np.ndarray): 3D array of binary masks with shape (N, H, W).
+ max_dimension (int): The maximum dimension for the resized masks.
+
+ Returns:
+ np.ndarray: Array of resized masks.
+ """
+ max_height = np.max(masks.shape[1])
+ max_width = np.max(masks.shape[2])
+ scale = min(max_dimension / max_height, max_dimension / max_width)
+
+ new_height = int(scale * max_height)
+ new_width = int(scale * max_width)
+
+ x = np.linspace(0, max_width - 1, new_width).astype(int)
+ y = np.linspace(0, max_height - 1, new_height).astype(int)
+ xv, yv = np.meshgrid(x, y)
+
+ resized_masks = masks[:, yv, xv]
+
+ resized_masks = resized_masks.reshape(masks.shape[0], new_height, new_width)
+ return resized_masks
+
+
+def mask_non_max_suppression(
+ predictions: np.ndarray,
+ masks: np.ndarray,
+ iou_threshold: float = 0.5,
+ mask_dimension: int = 640,
+) -> np.ndarray:
+ """
+ Perform Non-Maximum Suppression (NMS) on segmentation predictions.
+
+ Args:
+ predictions (np.ndarray): A 2D array of object detection predictions in
+ the format of `(x_min, y_min, x_max, y_max, score)`
+ or `(x_min, y_min, x_max, y_max, score, class)`. Shape: `(N, 5)` or
+ `(N, 6)`, where N is the number of predictions.
+ masks (np.ndarray): A 3D array of binary masks corresponding to the predictions.
+ Shape: `(N, H, W)`, where N is the number of predictions, and H, W are the
+ dimensions of each mask.
+ iou_threshold (float, optional): The intersection-over-union threshold
+ to use for non-maximum suppression.
+ mask_dimension (int, optional): The dimension to which the masks should be
+ resized before computing IOU values. Defaults to 640.
+
+ Returns:
+ np.ndarray: A boolean array indicating which predictions to keep after
+ non-maximum suppression.
+
+ Raises:
+ AssertionError: If `iou_threshold` is not within the closed
+ range from `0` to `1`.
+ """
+ assert 0 <= iou_threshold <= 1, (
+ "Value of `iou_threshold` must be in the closed range from 0 to 1, "
+ f"{iou_threshold} given."
+ )
+ rows, columns = predictions.shape
+
+ if columns == 5:
+ predictions = np.c_[predictions, np.zeros(rows)]
+
+ sort_index = predictions[:, 4].argsort()[::-1]
+ predictions = predictions[sort_index]
+ masks = masks[sort_index]
+ masks_resized = resize_masks(masks, mask_dimension)
+ ious = mask_iou_batch(masks_resized, masks_resized)
+ categories = predictions[:, 5]
+
+ keep = np.ones(rows, dtype=bool)
+ for i in range(rows):
+ if keep[i]:
+ condition = (ious[i] > iou_threshold) & (categories[i] == categories)
+ keep[i + 1 :] = np.where(condition[i + 1 :], False, keep[i + 1 :])
+
+ return keep[sort_index.argsort()]
+
+
+def box_non_max_suppression(
predictions: np.ndarray, iou_threshold: float = 0.5
) -> np.ndarray:
"""
| diff --git a/test/detection/test_utils.py b/test/detection/test_utils.py
index 576b25f8f..d09348ff5 100644
--- a/test/detection/test_utils.py
+++ b/test/detection/test_utils.py
@@ -6,13 +6,14 @@
from supervision.config import CLASS_NAME_DATA_FIELD
from supervision.detection.utils import (
+ box_non_max_suppression,
calculate_masks_centroids,
clip_boxes,
filter_polygons_by_area,
get_data_item,
+ mask_non_max_suppression,
merge_data,
move_boxes,
- non_max_suppression,
process_roboflow_result,
scale_boxes,
)
@@ -113,19 +114,225 @@
), # three boxes with different category
],
)
-def test_non_max_suppression(
+def test_box_non_max_suppression(
predictions: np.ndarray,
iou_threshold: float,
expected_result: Optional[np.ndarray],
exception: Exception,
) -> None:
with exception:
- result = non_max_suppression(
+ result = box_non_max_suppression(
predictions=predictions, iou_threshold=iou_threshold
)
assert np.array_equal(result, expected_result)
+@pytest.mark.parametrize(
+ "predictions, masks, iou_threshold, expected_result, exception",
+ [
+ (
+ np.empty((0, 6)),
+ np.empty((0, 5, 5)),
+ 0.5,
+ np.array([]),
+ DoesNotRaise(),
+ ), # empty predictions and masks
+ (
+ np.array([[0, 0, 0, 0, 0.8]]),
+ np.array(
+ [
+ [
+ [False, False, False, False, False],
+ [False, True, True, True, False],
+ [False, True, True, True, False],
+ [False, True, True, True, False],
+ [False, False, False, False, False],
+ ]
+ ]
+ ),
+ 0.5,
+ np.array([True]),
+ DoesNotRaise(),
+ ), # single mask with no category
+ (
+ np.array([[0, 0, 0, 0, 0.8, 0]]),
+ np.array(
+ [
+ [
+ [False, False, False, False, False],
+ [False, True, True, True, False],
+ [False, True, True, True, False],
+ [False, True, True, True, False],
+ [False, False, False, False, False],
+ ]
+ ]
+ ),
+ 0.5,
+ np.array([True]),
+ DoesNotRaise(),
+ ), # single mask with category
+ (
+ np.array([[0, 0, 0, 0, 0.8], [0, 0, 0, 0, 0.9]]),
+ np.array(
+ [
+ [
+ [False, False, False, False, False],
+ [False, True, True, False, False],
+ [False, True, True, False, False],
+ [False, False, False, False, False],
+ [False, False, False, False, False],
+ ],
+ [
+ [False, False, False, False, False],
+ [False, False, False, False, False],
+ [False, False, False, True, True],
+ [False, False, False, True, True],
+ [False, False, False, False, False],
+ ],
+ ]
+ ),
+ 0.5,
+ np.array([True, True]),
+ DoesNotRaise(),
+ ), # two masks non-overlapping with no category
+ (
+ np.array([[0, 0, 0, 0, 0.8], [0, 0, 0, 0, 0.9]]),
+ np.array(
+ [
+ [
+ [False, False, False, False, False],
+ [False, True, True, True, False],
+ [False, True, True, True, False],
+ [False, True, True, True, False],
+ [False, False, False, False, False],
+ ],
+ [
+ [False, False, False, False, False],
+ [False, False, True, True, True],
+ [False, False, True, True, True],
+ [False, False, True, True, True],
+ [False, False, False, False, False],
+ ],
+ ]
+ ),
+ 0.4,
+ np.array([False, True]),
+ DoesNotRaise(),
+ ), # two masks partially overlapping with no category
+ (
+ np.array([[0, 0, 0, 0, 0.8, 0], [0, 0, 0, 0, 0.9, 1]]),
+ np.array(
+ [
+ [
+ [False, False, False, False, False],
+ [False, True, True, True, False],
+ [False, True, True, True, False],
+ [False, True, True, True, False],
+ [False, False, False, False, False],
+ ],
+ [
+ [False, False, False, False, False],
+ [False, False, True, True, True],
+ [False, False, True, True, True],
+ [False, False, True, True, True],
+ [False, False, False, False, False],
+ ],
+ ]
+ ),
+ 0.5,
+ np.array([True, True]),
+ DoesNotRaise(),
+ ), # two masks partially overlapping with different category
+ (
+ np.array(
+ [
+ [0, 0, 0, 0, 0.8],
+ [0, 0, 0, 0, 0.85],
+ [0, 0, 0, 0, 0.9],
+ ]
+ ),
+ np.array(
+ [
+ [
+ [False, False, False, False, False],
+ [False, True, True, False, False],
+ [False, True, True, False, False],
+ [False, False, False, False, False],
+ [False, False, False, False, False],
+ ],
+ [
+ [False, False, False, False, False],
+ [False, True, True, False, False],
+ [False, True, True, False, False],
+ [False, False, False, False, False],
+ [False, False, False, False, False],
+ ],
+ [
+ [False, False, False, False, False],
+ [False, False, False, True, True],
+ [False, False, False, True, True],
+ [False, False, False, False, False],
+ [False, False, False, False, False],
+ ],
+ ]
+ ),
+ 0.5,
+ np.array([False, True, True]),
+ DoesNotRaise(),
+ ), # three masks with no category
+ (
+ np.array(
+ [
+ [0, 0, 0, 0, 0.8, 0],
+ [0, 0, 0, 0, 0.85, 1],
+ [0, 0, 0, 0, 0.9, 2],
+ ]
+ ),
+ np.array(
+ [
+ [
+ [False, False, False, False, False],
+ [False, True, True, False, False],
+ [False, True, True, False, False],
+ [False, False, False, False, False],
+ [False, False, False, False, False],
+ ],
+ [
+ [False, False, False, False, False],
+ [False, True, True, False, False],
+ [False, True, True, False, False],
+ [False, True, True, False, False],
+ [False, False, False, False, False],
+ ],
+ [
+ [False, False, False, False, False],
+ [False, True, True, False, False],
+ [False, True, True, False, False],
+ [False, False, False, False, False],
+ [False, False, False, False, False],
+ ],
+ ]
+ ),
+ 0.5,
+ np.array([True, True, True]),
+ DoesNotRaise(),
+ ), # three masks with different category
+ ],
+)
+def test_mask_non_max_suppression(
+ predictions: np.ndarray,
+ masks: np.ndarray,
+ iou_threshold: float,
+ expected_result: Optional[np.ndarray],
+ exception: Exception,
+) -> None:
+ with exception:
+ result = mask_non_max_suppression(
+ predictions=predictions, masks=masks, iou_threshold=iou_threshold
+ )
+ assert np.array_equal(result, expected_result)
+
+
@pytest.mark.parametrize(
"xyxy, resolution_wh, expected_result",
[
| diff --git a/docs/detection/utils.md b/docs/detection/utils.md
index 54d1f279f..74adc6e74 100644
--- a/docs/detection/utils.md
+++ b/docs/detection/utils.md
@@ -11,10 +11,22 @@ comments: true
:::supervision.detection.utils.box_iou_batch
<div class="md-typeset">
- <h2>non_max_suppression</h2>
+ <h2>mask_iou_batch</h2>
</div>
-:::supervision.detection.utils.non_max_suppression
+:::supervision.detection.utils.mask_iou_batch
+
+<div class="md-typeset">
+ <h2>box_non_max_suppression</h2>
+</div>
+
+:::supervision.detection.utils.box_non_max_suppression
+
+<div class="md-typeset">
+ <h2>mask_non_max_suppression</h2>
+</div>
+
+:::supervision.detection.utils.mask_non_max_suppression
<div class="md-typeset">
<h2>polygon_to_mask</h2>
| [
{
"components": [
{
"doc": "Compute Intersection over Union (IoU) of two sets of masks -\n `masks_true` and `masks_detection`.\n\nArgs:\n masks_true (np.ndarray): 3D `np.ndarray` representing ground-truth masks.\n masks_detection (np.ndarray): 3D `np.ndarray` representing detection masks.... | [
"test/detection/test_utils.py::test_box_non_max_suppression[predictions0-0.5-expected_result0-exception0]",
"test/detection/test_utils.py::test_box_non_max_suppression[predictions1-0.5-expected_result1-exception1]",
"test/detection/test_utils.py::test_box_non_max_suppression[predictions2-0.5-expected_result2-ex... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
[NMS] - add segmentation models support
# Description
This PR introduces Non-Maximum Suppression (NMS) algorithm focused on segmentation, enhancing our object detection capabilities, particularly in segmentation tasks. We have renamed the traditional `non_max_suppression` function to `box_non_max_suppression` for better clarity regarding its application to bounding boxes. Furthermore, we've integrated a conditional mechanism within the `with_nms` function that utilizes segmentation masks for NMS when such masks are present in the predictions. This optimization leverages the spatial context provided by segmentation masks to improve the suppression process.
This enhancement is part of a task segmented into two parts for more focused development and review. This PR addresses the first part, as discussed in the related issue [here](https://github.com/roboflow/supervision/issues/678). Splitting the task ensures thorough implementation and testing of each component.
In addition, this PR encompasses comprehensive unit tests for the new NMS functionality and a demo that demonstrates the algorithm's application and effectiveness in real-world scenarios.
## Type of change
- [x] New feature (non-breaking change which adds functionality)
## How has this change been tested
I created a demo to showcase this functionality [here](https://colab.research.google.com/drive/1lpJKXryY59FTrgZU85a3r0KK7gm1OTNh?usp=sharing)
## Docs
- [x] Docs updated? What were the changes:
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in supervision/detection/utils.py]
(definition of mask_iou_batch:)
def mask_iou_batch(masks_true: np.ndarray, masks_detection: np.ndarray) -> np.ndarray:
"""Compute Intersection over Union (IoU) of two sets of masks -
`masks_true` and `masks_detection`.
Args:
masks_true (np.ndarray): 3D `np.ndarray` representing ground-truth masks.
masks_detection (np.ndarray): 3D `np.ndarray` representing detection masks.
Returns:
np.ndarray: Pairwise IoU of masks from `masks_true` and `masks_detection`."""
(definition of resize_masks:)
def resize_masks(masks: np.ndarray, max_dimension: int = 640) -> np.ndarray:
"""Resize all masks in the array to have a maximum dimension of max_dimension,
maintaining aspect ratio.
Args:
masks (np.ndarray): 3D array of binary masks with shape (N, H, W).
max_dimension (int): The maximum dimension for the resized masks.
Returns:
np.ndarray: Array of resized masks."""
(definition of mask_non_max_suppression:)
def mask_non_max_suppression( predictions: np.ndarray, masks: np.ndarray, iou_threshold: float = 0.5, mask_dimension: int = 640, ) -> np.ndarray:
"""Perform Non-Maximum Suppression (NMS) on segmentation predictions.
Args:
predictions (np.ndarray): A 2D array of object detection predictions in
the format of `(x_min, y_min, x_max, y_max, score)`
or `(x_min, y_min, x_max, y_max, score, class)`. Shape: `(N, 5)` or
`(N, 6)`, where N is the number of predictions.
masks (np.ndarray): A 3D array of binary masks corresponding to the predictions.
Shape: `(N, H, W)`, where N is the number of predictions, and H, W are the
dimensions of each mask.
iou_threshold (float, optional): The intersection-over-union threshold
to use for non-maximum suppression.
mask_dimension (int, optional): The dimension to which the masks should be
resized before computing IOU values. Defaults to 640.
Returns:
np.ndarray: A boolean array indicating which predictions to keep after
non-maximum suppression.
Raises:
AssertionError: If `iou_threshold` is not within the closed
range from `0` to `1`."""
(definition of box_non_max_suppression:)
def box_non_max_suppression( predictions: np.ndarray, iou_threshold: float = 0.5 ) -> np.ndarray:
"""Perform Non-Maximum Suppression (NMS) on object detection predictions.
Args:
predictions (np.ndarray): An array of object detection predictions in
the format of `(x_min, y_min, x_max, y_max, score)`
or `(x_min, y_min, x_max, y_max, score, class)`.
iou_threshold (float, optional): The intersection-over-union threshold
to use for non-maximum suppression.
Returns:
np.ndarray: A boolean array indicating which predictions to keep after n
on-maximum suppression.
Raises:
AssertionError: If `iou_threshold` is not within the
closed range from `0` to `1`."""
[end of new definitions in supervision/detection/utils.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 3eb5c0b024e3e46877b7fe4fd66e6177d1308ba0 | |
scikit-learn__scikit-learn-28351 | 28,351 | scikit-learn/scikit-learn | 1.5 | 8f96794b635985374bbd3cc99a9bc509104a5769 | 2024-02-02T10:23:31Z | diff --git a/doc/modules/model_evaluation.rst b/doc/modules/model_evaluation.rst
index 59f014b732e35..7caacd697ea1c 100644
--- a/doc/modules/model_evaluation.rst
+++ b/doc/modules/model_evaluation.rst
@@ -2826,6 +2826,51 @@ Here are some usage examples of the :func:`d2_absolute_error_score` function::
|details-end|
+|details-start|
+**D² log loss score**
+|details-split|
+
+The :func:`d2_log_loss_score` function implements the special case
+of D² with the log loss, see :ref:`log_loss`, i.e.:
+
+.. math::
+
+ \text{dev}(y, \hat{y}) = \text{log_loss}(y, \hat{y}).
+
+The :math:`y_{\text{null}}` for the :func:`log_loss` is the per-class
+proportion.
+
+Here are some usage examples of the :func:`d2_log_loss_score` function::
+
+ >>> from sklearn.metrics import d2_log_loss_score
+ >>> y_true = [1, 1, 2, 3]
+ >>> y_pred = [
+ ... [0.5, 0.25, 0.25],
+ ... [0.5, 0.25, 0.25],
+ ... [0.5, 0.25, 0.25],
+ ... [0.5, 0.25, 0.25],
+ ... ]
+ >>> d2_log_loss_score(y_true, y_pred)
+ 0.0
+ >>> y_true = [1, 2, 3]
+ >>> y_pred = [
+ ... [0.98, 0.01, 0.01],
+ ... [0.01, 0.98, 0.01],
+ ... [0.01, 0.01, 0.98],
+ ... ]
+ >>> d2_log_loss_score(y_true, y_pred)
+ 0.981...
+ >>> y_true = [1, 2, 3]
+ >>> y_pred = [
+ ... [0.1, 0.6, 0.3],
+ ... [0.1, 0.6, 0.3],
+ ... [0.4, 0.5, 0.1],
+ ... ]
+ >>> d2_log_loss_score(y_true, y_pred)
+ -0.552...
+
+|details-end|
+
.. _visualization_regression_evaluation:
Visual evaluation of regression models
diff --git a/doc/whats_new/v1.5.rst b/doc/whats_new/v1.5.rst
index 1fe0df6f97a61..d3064851e7f87 100644
--- a/doc/whats_new/v1.5.rst
+++ b/doc/whats_new/v1.5.rst
@@ -169,7 +169,7 @@ Changelog
..........................
- |Fix| Fixed a regression in :class:`calibration.CalibratedClassifierCV` where
- an error was wrongly raised with string targets.
+ an error was wrongly raised with string targets.
:pr:`28843` by :user:`Jérémie du Boisberranger <jeremiedbb>`.
:mod:`sklearn.cluster`
@@ -406,6 +406,10 @@ Changelog
is deprecated and will raise an error in v1.7.
:pr:`18555` by :user:`Kaushik Amar Das <cozek>`.
+- |Feature| :func:`metrics.d2_log_loss_score` has been added which
+ calculates the D^2 score for the log loss.
+ :pr:`28351` by :user:`Omar Salman <OmarManzoor>`.
+
:mod:`sklearn.mixture`
......................
diff --git a/sklearn/metrics/__init__.py b/sklearn/metrics/__init__.py
index 8a818c885043c..af25a219c79f1 100644
--- a/sklearn/metrics/__init__.py
+++ b/sklearn/metrics/__init__.py
@@ -12,6 +12,7 @@
classification_report,
cohen_kappa_score,
confusion_matrix,
+ d2_log_loss_score,
f1_score,
fbeta_score,
hamming_loss,
@@ -113,6 +114,7 @@
"coverage_error",
"d2_tweedie_score",
"d2_absolute_error_score",
+ "d2_log_loss_score",
"d2_pinball_score",
"dcg_score",
"davies_bouldin_score",
diff --git a/sklearn/metrics/_classification.py b/sklearn/metrics/_classification.py
index caa4db5479a29..04894a4d7a7e7 100644
--- a/sklearn/metrics/_classification.py
+++ b/sklearn/metrics/_classification.py
@@ -53,7 +53,11 @@
from ..utils.extmath import _nanaverage
from ..utils.multiclass import type_of_target, unique_labels
from ..utils.sparsefuncs import count_nonzero
-from ..utils.validation import _check_pos_label_consistency, _num_samples
+from ..utils.validation import (
+ _check_pos_label_consistency,
+ _check_sample_weight,
+ _num_samples,
+)
def _check_zero_division(zero_division):
@@ -3257,3 +3261,96 @@ def brier_score_loss(
raise
y_true = np.array(y_true == pos_label, int)
return np.average((y_true - y_proba) ** 2, weights=sample_weight)
+
+
+@validate_params(
+ {
+ "y_true": ["array-like"],
+ "y_pred": ["array-like"],
+ "sample_weight": ["array-like", None],
+ "labels": ["array-like", None],
+ },
+ prefer_skip_nested_validation=True,
+)
+def d2_log_loss_score(y_true, y_pred, *, sample_weight=None, labels=None):
+ """
+ :math:`D^2` score function, fraction of log loss explained.
+
+ Best possible score is 1.0 and it can be negative (because the model can be
+ arbitrarily worse). A model that always uses the empirical mean of `y_true` as
+ constant prediction, disregarding the input features, gets a D^2 score of 0.0.
+
+ Read more in the :ref:`User Guide <d2_score>`.
+
+ .. versionadded:: 1.5
+
+ Parameters
+ ----------
+ y_true : array-like or label indicator matrix
+ The actuals labels for the n_samples samples.
+
+ y_pred : array-like of shape (n_samples, n_classes) or (n_samples,)
+ Predicted probabilities, as returned by a classifier's
+ predict_proba method. If ``y_pred.shape = (n_samples,)``
+ the probabilities provided are assumed to be that of the
+ positive class. The labels in ``y_pred`` are assumed to be
+ ordered alphabetically, as done by
+ :class:`~sklearn.preprocessing.LabelBinarizer`.
+
+ sample_weight : array-like of shape (n_samples,), default=None
+ Sample weights.
+
+ labels : array-like, default=None
+ If not provided, labels will be inferred from y_true. If ``labels``
+ is ``None`` and ``y_pred`` has shape (n_samples,) the labels are
+ assumed to be binary and are inferred from ``y_true``.
+
+ Returns
+ -------
+ d2 : float or ndarray of floats
+ The D^2 score.
+
+ Notes
+ -----
+ This is not a symmetric function.
+
+ Like R^2, D^2 score may be negative (it need not actually be the square of
+ a quantity D).
+
+ This metric is not well-defined for a single sample and will return a NaN
+ value if n_samples is less than two.
+ """
+ y_pred = check_array(y_pred, ensure_2d=False, dtype="numeric")
+ check_consistent_length(y_pred, y_true, sample_weight)
+ if _num_samples(y_pred) < 2:
+ msg = "D^2 score is not well-defined with less than two samples."
+ warnings.warn(msg, UndefinedMetricWarning)
+ return float("nan")
+
+ # log loss of the fitted model
+ numerator = log_loss(
+ y_true=y_true,
+ y_pred=y_pred,
+ normalize=False,
+ sample_weight=sample_weight,
+ labels=labels,
+ )
+
+ # Proportion of labels in the dataset
+ weights = _check_sample_weight(sample_weight, y_true)
+
+ _, y_value_indices = np.unique(y_true, return_inverse=True)
+ counts = np.bincount(y_value_indices, weights=weights)
+ y_prob = counts / weights.sum()
+ y_pred_null = np.tile(y_prob, (len(y_true), 1))
+
+ # log loss of the null model
+ denominator = log_loss(
+ y_true=y_true,
+ y_pred=y_pred_null,
+ normalize=False,
+ sample_weight=sample_weight,
+ labels=labels,
+ )
+
+ return 1 - (numerator / denominator)
| diff --git a/sklearn/metrics/tests/test_classification.py b/sklearn/metrics/tests/test_classification.py
index 144871c8d02ee..40b762bfa7308 100644
--- a/sklearn/metrics/tests/test_classification.py
+++ b/sklearn/metrics/tests/test_classification.py
@@ -35,7 +35,7 @@
recall_score,
zero_one_loss,
)
-from sklearn.metrics._classification import _check_targets
+from sklearn.metrics._classification import _check_targets, d2_log_loss_score
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import LabelBinarizer, label_binarize
from sklearn.tree import DecisionTreeClassifier
@@ -2895,3 +2895,201 @@ def test_brier_score_loss_deprecation_warning():
y_prob=y_pred,
y_proba=y_pred,
)
+
+
+def test_d2_log_loss_score():
+ y_true = [0, 0, 0, 1, 1, 1]
+ y_true_string = ["no", "no", "no", "yes", "yes", "yes"]
+ y_pred = np.array(
+ [
+ [0.5, 0.5],
+ [0.9, 0.1],
+ [0.4, 0.6],
+ [0.6, 0.4],
+ [0.35, 0.65],
+ [0.01, 0.99],
+ ]
+ )
+ y_pred_null = np.array(
+ [
+ [0.5, 0.5],
+ [0.5, 0.5],
+ [0.5, 0.5],
+ [0.5, 0.5],
+ [0.5, 0.5],
+ [0.5, 0.5],
+ ]
+ )
+ d2_score = d2_log_loss_score(y_true=y_true, y_pred=y_pred)
+ log_likelihood = log_loss(y_true=y_true, y_pred=y_pred, normalize=False)
+ log_likelihood_null = log_loss(y_true=y_true, y_pred=y_pred_null, normalize=False)
+ d2_score_true = 1 - log_likelihood / log_likelihood_null
+ assert d2_score == pytest.approx(d2_score_true)
+
+ # check that using sample weight also gives the correct d2 score
+ sample_weight = np.array([2, 1, 3, 4, 3, 1])
+ y_pred_null[:, 0] = sample_weight[:3].sum() / sample_weight.sum()
+ y_pred_null[:, 1] = sample_weight[3:].sum() / sample_weight.sum()
+ d2_score = d2_log_loss_score(
+ y_true=y_true, y_pred=y_pred, sample_weight=sample_weight
+ )
+ log_likelihood = log_loss(
+ y_true=y_true,
+ y_pred=y_pred,
+ sample_weight=sample_weight,
+ normalize=False,
+ )
+ log_likelihood_null = log_loss(
+ y_true=y_true,
+ y_pred=y_pred_null,
+ sample_weight=sample_weight,
+ normalize=False,
+ )
+ d2_score_true = 1 - log_likelihood / log_likelihood_null
+ assert d2_score == pytest.approx(d2_score_true)
+
+ # check if good predictions give a relatively higher value for the d2 score
+ y_pred = np.array(
+ [
+ [0.9, 0.1],
+ [0.8, 0.2],
+ [0.9, 0.1],
+ [0.1, 0.9],
+ [0.2, 0.8],
+ [0.1, 0.9],
+ ]
+ )
+ d2_score = d2_log_loss_score(y_true, y_pred)
+ assert 0.5 < d2_score < 1.0
+ # check that a similar value is obtained for string labels
+ d2_score_string = d2_log_loss_score(y_true_string, y_pred)
+ assert d2_score_string == pytest.approx(d2_score)
+
+ # check if poor predictions gives a relatively low value for the d2 score
+ y_pred = np.array(
+ [
+ [0.5, 0.5],
+ [0.1, 0.9],
+ [0.1, 0.9],
+ [0.9, 0.1],
+ [0.75, 0.25],
+ [0.1, 0.9],
+ ]
+ )
+ d2_score = d2_log_loss_score(y_true, y_pred)
+ assert d2_score < 0
+ # check that a similar value is obtained for string labels
+ d2_score_string = d2_log_loss_score(y_true_string, y_pred)
+ assert d2_score_string == pytest.approx(d2_score)
+
+ # check if simply using the average of the classes as the predictions
+ # gives a d2 score of 0
+ y_true = [0, 0, 0, 1, 1, 1]
+ y_pred = np.array(
+ [
+ [0.5, 0.5],
+ [0.5, 0.5],
+ [0.5, 0.5],
+ [0.5, 0.5],
+ [0.5, 0.5],
+ [0.5, 0.5],
+ ]
+ )
+ d2_score = d2_log_loss_score(y_true, y_pred)
+ assert d2_score == 0
+ d2_score_string = d2_log_loss_score(y_true_string, y_pred)
+ assert d2_score_string == 0
+
+ # check if simply using the average of the classes as the predictions
+ # gives a d2 score of 0 when the positive class has a higher proportion
+ y_true = [0, 1, 1, 1]
+ y_true_string = ["no", "yes", "yes", "yes"]
+ y_pred = np.array([[0.25, 0.75], [0.25, 0.75], [0.25, 0.75], [0.25, 0.75]])
+ d2_score = d2_log_loss_score(y_true, y_pred)
+ assert d2_score == 0
+ d2_score_string = d2_log_loss_score(y_true_string, y_pred)
+ assert d2_score_string == 0
+ sample_weight = [2, 2, 2, 2]
+ d2_score_with_sample_weight = d2_log_loss_score(
+ y_true, y_pred, sample_weight=sample_weight
+ )
+ assert d2_score_with_sample_weight == 0
+
+ # check that the d2 scores seem correct when more than 2
+ # labels are specified
+ y_true = ["high", "high", "low", "neutral"]
+ sample_weight = [1.4, 0.6, 0.8, 0.2]
+
+ y_pred = np.array(
+ [
+ [0.8, 0.1, 0.1],
+ [0.8, 0.1, 0.1],
+ [0.1, 0.8, 0.1],
+ [0.1, 0.1, 0.8],
+ ]
+ )
+ d2_score = d2_log_loss_score(y_true, y_pred)
+ assert 0.5 < d2_score < 1.0
+ d2_score = d2_log_loss_score(y_true, y_pred, sample_weight=sample_weight)
+ assert 0.5 < d2_score < 1.0
+
+ y_pred = np.array(
+ [
+ [0.2, 0.5, 0.3],
+ [0.1, 0.7, 0.2],
+ [0.1, 0.1, 0.8],
+ [0.2, 0.7, 0.1],
+ ]
+ )
+ d2_score = d2_log_loss_score(y_true, y_pred)
+ assert d2_score < 0
+ d2_score = d2_log_loss_score(y_true, y_pred, sample_weight=sample_weight)
+ assert d2_score < 0
+
+
+def test_d2_log_loss_score_raises():
+ """Test that d2_log_loss raises error on invalid input."""
+ y_true = [0, 1, 2]
+ y_pred = [[0.2, 0.8], [0.5, 0.5], [0.4, 0.6]]
+ err = "contain different number of classes"
+ with pytest.raises(ValueError, match=err):
+ d2_log_loss_score(y_true, y_pred)
+
+ # check error if the number of classes in labels do not match the number
+ # of classes in y_pred.
+ y_true = ["a", "b", "c"]
+ y_pred = [[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]
+ labels = [0, 1, 2]
+ err = "number of classes in labels is different"
+ with pytest.raises(ValueError, match=err):
+ d2_log_loss_score(y_true, y_pred, labels=labels)
+
+ # check error if y_true and y_pred do not have equal lengths
+ y_true = [0, 1, 2]
+ y_pred = [[0.5, 0.5, 0.5], [0.6, 0.3, 0.1]]
+ err = "inconsistent numbers of samples"
+ with pytest.raises(ValueError, match=err):
+ d2_log_loss_score(y_true, y_pred)
+
+ # check warning for samples < 2
+ y_true = [1]
+ y_pred = [[0.5, 0.5]]
+ err = "score is not well-defined"
+ with pytest.warns(UndefinedMetricWarning, match=err):
+ d2_log_loss_score(y_true, y_pred)
+
+ # check error when y_true only has 1 label
+ y_true = [1, 1, 1]
+ y_pred = [[0.5, 0.5], [0.5, 0.5], [0.5, 5]]
+ err = "y_true contains only one label"
+ with pytest.raises(ValueError, match=err):
+ d2_log_loss_score(y_true, y_pred)
+
+ # check error when y_true only has 1 label and labels also has
+ # only 1 label
+ y_true = [1, 1, 1]
+ labels = [1]
+ y_pred = [[0.5, 0.5], [0.5, 0.5], [0.5, 5]]
+ err = "The labels array needs to contain at least two"
+ with pytest.raises(ValueError, match=err):
+ d2_log_loss_score(y_true, y_pred, labels=labels)
| diff --git a/doc/modules/model_evaluation.rst b/doc/modules/model_evaluation.rst
index 59f014b732e35..7caacd697ea1c 100644
--- a/doc/modules/model_evaluation.rst
+++ b/doc/modules/model_evaluation.rst
@@ -2826,6 +2826,51 @@ Here are some usage examples of the :func:`d2_absolute_error_score` function::
|details-end|
+|details-start|
+**D² log loss score**
+|details-split|
+
+The :func:`d2_log_loss_score` function implements the special case
+of D² with the log loss, see :ref:`log_loss`, i.e.:
+
+.. math::
+
+ \text{dev}(y, \hat{y}) = \text{log_loss}(y, \hat{y}).
+
+The :math:`y_{\text{null}}` for the :func:`log_loss` is the per-class
+proportion.
+
+Here are some usage examples of the :func:`d2_log_loss_score` function::
+
+ >>> from sklearn.metrics import d2_log_loss_score
+ >>> y_true = [1, 1, 2, 3]
+ >>> y_pred = [
+ ... [0.5, 0.25, 0.25],
+ ... [0.5, 0.25, 0.25],
+ ... [0.5, 0.25, 0.25],
+ ... [0.5, 0.25, 0.25],
+ ... ]
+ >>> d2_log_loss_score(y_true, y_pred)
+ 0.0
+ >>> y_true = [1, 2, 3]
+ >>> y_pred = [
+ ... [0.98, 0.01, 0.01],
+ ... [0.01, 0.98, 0.01],
+ ... [0.01, 0.01, 0.98],
+ ... ]
+ >>> d2_log_loss_score(y_true, y_pred)
+ 0.981...
+ >>> y_true = [1, 2, 3]
+ >>> y_pred = [
+ ... [0.1, 0.6, 0.3],
+ ... [0.1, 0.6, 0.3],
+ ... [0.4, 0.5, 0.1],
+ ... ]
+ >>> d2_log_loss_score(y_true, y_pred)
+ -0.552...
+
+|details-end|
+
.. _visualization_regression_evaluation:
Visual evaluation of regression models
diff --git a/doc/whats_new/v1.5.rst b/doc/whats_new/v1.5.rst
index 1fe0df6f97a61..d3064851e7f87 100644
--- a/doc/whats_new/v1.5.rst
+++ b/doc/whats_new/v1.5.rst
@@ -169,7 +169,7 @@ Changelog
..........................
- |Fix| Fixed a regression in :class:`calibration.CalibratedClassifierCV` where
- an error was wrongly raised with string targets.
+ an error was wrongly raised with string targets.
:pr:`28843` by :user:`Jérémie du Boisberranger <jeremiedbb>`.
:mod:`sklearn.cluster`
@@ -406,6 +406,10 @@ Changelog
is deprecated and will raise an error in v1.7.
:pr:`18555` by :user:`Kaushik Amar Das <cozek>`.
+- |Feature| :func:`metrics.d2_log_loss_score` has been added which
+ calculates the D^2 score for the log loss.
+ :pr:`28351` by :user:`Omar Salman <OmarManzoor>`.
+
:mod:`sklearn.mixture`
......................
| [
{
"components": [
{
"doc": ":math:`D^2` score function, fraction of log loss explained.\n\nBest possible score is 1.0 and it can be negative (because the model can be\narbitrarily worse). A model that always uses the empirical mean of `y_true` as\nconstant prediction, disregarding the input featur... | [
"sklearn/metrics/tests/test_classification.py::test_classification_report_dictionary_output",
"sklearn/metrics/tests/test_classification.py::test_classification_report_output_dict_empty_input",
"sklearn/metrics/tests/test_classification.py::test_classification_report_zero_division_warning[warn]",
"sklearn/met... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
FEA Add d2_log_loss_score
<!--
Thanks for contributing a pull request! Please ensure you have taken a look at
the contribution guidelines: https://github.com/scikit-learn/scikit-learn/blob/main/CONTRIBUTING.md
-->
#### Reference Issues/PRs
<!--
Example: Fixes #1234. See also #3456.
Please use keywords (e.g., Fixes) to create link to the issues or pull requests
you resolved, so that they will automatically be closed when your pull request
is merged. See https://github.com/blog/1506-closing-issues-via-pull-requests
-->
Fixes: #20943
#### What does this implement/fix? Explain your changes.
- Adds d2_log_loss_score
#### Any other comments?
<!--
Please be aware that we are a loose team of volunteers so patience is
necessary; assistance handling other issues is very welcome. We value
all user contributions, no matter how minor they are. If we are slow to
review, either the pull request needs some benchmarking, tinkering,
convincing, etc. or more likely the reviewers are simply busy. In either
case, we ask for your understanding during the review process.
For more information, see our FAQ on this topic:
http://scikit-learn.org/dev/faq.html#why-is-my-pull-request-not-getting-any-attention.
Thanks for contributing!
-->
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sklearn/metrics/_classification.py]
(definition of d2_log_loss_score:)
def d2_log_loss_score(y_true, y_pred, *, sample_weight=None, labels=None):
""":math:`D^2` score function, fraction of log loss explained.
Best possible score is 1.0 and it can be negative (because the model can be
arbitrarily worse). A model that always uses the empirical mean of `y_true` as
constant prediction, disregarding the input features, gets a D^2 score of 0.0.
Read more in the :ref:`User Guide <d2_score>`.
.. versionadded:: 1.5
Parameters
----------
y_true : array-like or label indicator matrix
The actuals labels for the n_samples samples.
y_pred : array-like of shape (n_samples, n_classes) or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If ``y_pred.shape = (n_samples,)``
the probabilities provided are assumed to be that of the
positive class. The labels in ``y_pred`` are assumed to be
ordered alphabetically, as done by
:class:`~sklearn.preprocessing.LabelBinarizer`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
labels : array-like, default=None
If not provided, labels will be inferred from y_true. If ``labels``
is ``None`` and ``y_pred`` has shape (n_samples,) the labels are
assumed to be binary and are inferred from ``y_true``.
Returns
-------
d2 : float or ndarray of floats
The D^2 score.
Notes
-----
This is not a symmetric function.
Like R^2, D^2 score may be negative (it need not actually be the square of
a quantity D).
This metric is not well-defined for a single sample and will return a NaN
value if n_samples is less than two."""
[end of new definitions in sklearn/metrics/_classification.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | c799133710d518f5fba2958bb0e0765ee280df12 | |
conan-io__conan-15573 | 15,573 | conan-io/conan | null | 8aeba3ec6ad5a36c6f57c32d6ede59c2ecc68151 | 2024-01-31T19:05:06Z | diff --git a/conan/cli/commands/graph.py b/conan/cli/commands/graph.py
index 325f1f58643..b8fb4a36bda 100644
--- a/conan/cli/commands/graph.py
+++ b/conan/cli/commands/graph.py
@@ -63,6 +63,9 @@ def graph_build_order(conan_api, parser, subparser, *args):
common_graph_args(subparser)
subparser.add_argument("--order-by", choices=['recipe', 'configuration'],
help='Select how to order the output, "recipe" by default if not set.')
+ subparser.add_argument("--reduce", action='store_true', default=False,
+ help='Reduce the build order, output only those to build. Use this '
+ 'only if the result will not be merged later with other build-order')
args = parser.parse_args(*args)
# parameter validation
@@ -102,7 +105,12 @@ def graph_build_order(conan_api, parser, subparser, *args):
out = ConanOutput()
out.title("Computing the build order")
+
install_graph = InstallGraph(deps_graph, order_by=args.order_by)
+ if args.reduce:
+ if args.order_by is None:
+ raise ConanException("--reduce needs --order-by argument defined")
+ install_graph.reduce()
install_order_serialized = install_graph.install_build_order()
if args.order_by is None: # legacy
install_order_serialized = install_order_serialized["order"]
@@ -120,15 +128,24 @@ def graph_build_order_merge(conan_api, parser, subparser, *args):
Merge more than 1 build-order file.
"""
subparser.add_argument("--file", nargs="?", action="append", help="Files to be merged")
+ subparser.add_argument("--reduce", action='store_true', default=False,
+ help='Reduce the build order, output only those to build. Use this '
+ 'only if the result will not be merged later with other build-order')
args = parser.parse_args(*args)
if not args.file or len(args.file) < 2:
raise ConanException("At least 2 files are needed to be merged")
result = InstallGraph.load(make_abs_path(args.file[0]))
+ if result.reduced:
+ raise ConanException(f"Reduced build-order file cannot be merged: {args.file[0]}")
for f in args.file[1:]:
install_graph = InstallGraph.load(make_abs_path(f))
+ if install_graph.reduced:
+ raise ConanException(f"Reduced build-order file cannot be merged: {f}")
result.merge(install_graph)
+ if args.reduce:
+ result.reduce()
install_order_serialized = result.install_build_order()
if getattr(result, "legacy"):
install_order_serialized = install_order_serialized["order"]
diff --git a/conans/client/graph/install_graph.py b/conans/client/graph/install_graph.py
index 82c9712611f..c044f4fb176 100644
--- a/conans/client/graph/install_graph.py
+++ b/conans/client/graph/install_graph.py
@@ -4,7 +4,7 @@
from conan.api.output import ConanOutput
from conans.client.graph.graph import RECIPE_CONSUMER, RECIPE_VIRTUAL, BINARY_SKIP, \
- BINARY_MISSING, BINARY_INVALID, Overrides, BINARY_BUILD
+ BINARY_MISSING, BINARY_INVALID, Overrides, BINARY_BUILD, BINARY_EDITABLE_BUILD
from conans.errors import ConanInvalidConfiguration, ConanException
from conans.model.package_ref import PkgReference
from conans.model.recipe_ref import RecipeReference
@@ -111,6 +111,13 @@ def __init__(self):
self.packages = {} # {package_id: _InstallPackageReference}
self.depends = [] # Other REFs, defines the graph topology and operation ordering
+ @property
+ def need_build(self):
+ for package in self.packages.values():
+ if package.binary in (BINARY_BUILD, BINARY_EDITABLE_BUILD):
+ return True
+ return False
+
@property
def node(self):
return self._node
@@ -212,6 +219,10 @@ def __init__(self):
self.depends = [] # List of full prefs
self.overrides = Overrides()
+ @property
+ def need_build(self):
+ return self.binary in (BINARY_BUILD, BINARY_EDITABLE_BUILD)
+
@property
def pref(self):
return PkgReference(self.ref, self.package_id, self.prev)
@@ -292,6 +303,8 @@ def deserialize(data, filename):
return result
def merge(self, other):
+ assert self.binary == other.binary, f"Binary for {self.ref}: {self.binary}!={other.binary}"
+
assert self.ref == other.ref
for d in other.depends:
if d not in self.depends:
@@ -312,6 +325,7 @@ def __init__(self, deps_graph, order_by=None):
self._order = order_by
self._node_cls = _InstallRecipeReference if order_by == "recipe" else _InstallConfiguration
self._is_test_package = False
+ self.reduced = False
if deps_graph is not None:
self._initialize_deps_graph(deps_graph)
self._is_test_package = deps_graph.root.conanfile.tested_reference_str is not None
@@ -328,8 +342,10 @@ def merge(self, other):
"""
@type other: InstallGraph
"""
+ if self.reduced or other.reduced:
+ raise ConanException("Reduced build-order files cannot be merged")
if self._order != other._order:
- raise ConanException(f"Cannot merge build-orders of `{self._order}!={other._order}")
+ raise ConanException(f"Cannot merge build-orders of {self._order}!={other._order}")
for ref, install_node in other._nodes.items():
existing = self._nodes.get(ref)
if existing is None:
@@ -340,8 +356,10 @@ def merge(self, other):
@staticmethod
def deserialize(data, filename):
legacy = isinstance(data, list)
- order, data = ("recipe", data) if legacy else (data["order_by"], data["order"])
+ order, data, reduced = ("recipe", data, False) if legacy else \
+ (data["order_by"], data["order"], data["reduced"])
result = InstallGraph(None, order_by=order)
+ result.reduced = reduced
result.legacy = legacy
for level in data:
for item in level:
@@ -362,6 +380,22 @@ def _initialize_deps_graph(self, deps_graph):
else:
existing.add(node)
+ def reduce(self):
+ result = {}
+ for k, node in self._nodes.items():
+ if node.need_build:
+ result[k] = node
+ else: # Eliminate this element from the graph
+ dependencies = node.depends
+ # Find all consumers
+ for n in self._nodes.values():
+ if k in n.depends:
+ n.depends = [d for d in n.depends if d != k] # Discard the removed node
+ # Add new edges, without repetition
+ n.depends.extend(d for d in dependencies if d not in n.depends)
+ self._nodes = result
+ self.reduced = True
+
def install_order(self, flat=False):
# a topological order by levels, returns a list of list, in order of processing
levels = []
@@ -390,6 +424,7 @@ def install_build_order(self):
"""
install_order = self.install_order()
result = {"order_by": self._order,
+ "reduced": self.reduced,
"order": [[n.serialize() for n in level] for level in install_order]}
return result
| diff --git a/conans/test/integration/command_v2/test_info_build_order.py b/conans/test/integration/command_v2/test_info_build_order.py
index 70d6dc97b5c..f7fdaff000a 100644
--- a/conans/test/integration/command_v2/test_info_build_order.py
+++ b/conans/test/integration/command_v2/test_info_build_order.py
@@ -2,6 +2,8 @@
import os
import textwrap
+import pytest
+
from conans.test.assets.genconanfile import GenConanfile
from conans.test.utils.tools import TestClient
@@ -66,6 +68,11 @@ def test_info_build_order():
assert bo_json["order_by"] == "recipe"
assert bo_json["order"] == result
+ c.run("graph build-order consumer --build=missing --order-by=recipe --reduce --format=json")
+ bo_json = json.loads(c.stdout)
+ assert bo_json["order_by"] == "recipe"
+ assert bo_json["order"] == result
+
def test_info_build_order_configuration():
c = TestClient()
@@ -116,6 +123,10 @@ def test_info_build_order_configuration():
assert bo_json["order"] == result
+ c.run("graph build-order consumer --build=missing --order=configuration --reduce --format=json")
+ bo_json = json.loads(c.stdout)
+ assert bo_json["order"] == result
+
def test_info_build_order_configuration_text_formatter():
c = TestClient()
@@ -533,3 +544,111 @@ def export(self):
c.run("graph build-order --requires=dep/0.1 --format=json", assert_error=True)
assert "ImportError" in c.out
assert "It is possible that this recipe is not Conan 2.0 ready" in c.out
+
+
+class TestBuildOrderReduce:
+ @pytest.mark.parametrize("order", ["recipe", "configuration"])
+ def test_build_order_reduce(self, order):
+ c = TestClient()
+ c.save({"liba/conanfile.py": GenConanfile("liba", "0.1"),
+ "libb/conanfile.py": GenConanfile("libb", "0.1").with_requires("liba/0.1"),
+ "libc/conanfile.py": GenConanfile("libc", "0.1").with_requires("libb/0.1"),
+ "consumer/conanfile.txt": "[requires]\nlibc/0.1"})
+ c.run("create liba")
+ c.run("create libb")
+ c.run("create libc")
+ c.run("remove liba:* -c")
+ c.run("remove libc:* -c")
+ c.run(f"graph build-order consumer --order={order} --build=missing --reduce --format=json")
+ bo_json = json.loads(c.stdout)
+ order_json = bo_json["order"]
+ assert len(order_json) == 2 # 2 levels
+ level0, level1 = order_json
+ assert len(level0) == 1
+ assert level0[0]["ref"] == "liba/0.1#a658e7beaaae5d6be0b6f67dcc9859e2"
+ # then libc -> directly on liba, no libb involved
+ assert len(level1) == 1
+ assert level1[0]["ref"] == "libc/0.1#c04c370ad966390e67388565b56f019a"
+ depends = "liba/0.1#a658e7beaaae5d6be0b6f67dcc9859e2"
+ if order == "configuration":
+ depends += ":da39a3ee5e6b4b0d3255bfef95601890afd80709"
+ assert level1[0]["depends"] == [depends]
+
+ @pytest.mark.parametrize("order", ["recipe", "configuration"])
+ def test_build_order_merge_reduce(self, order):
+ c = TestClient()
+ c.save({"liba/conanfile.py": GenConanfile("liba", "0.1").with_settings("os"),
+ "libb/conanfile.py": GenConanfile("libb", "0.1").with_settings("os")
+ .with_requires("liba/0.1"),
+ "libc/conanfile.py": GenConanfile("libc", "0.1").with_settings("os")
+ .with_requires("libb/0.1"),
+ "consumer/conanfile.txt": "[requires]\nlibc/0.1"})
+ for _os in ("Windows", "Linux"):
+ c.run(f"create liba -s os={_os}")
+ c.run(f"create libb -s os={_os}")
+ c.run(f"create libc -s os={_os}")
+
+ c.run("remove liba:* -c")
+ c.run("remove libc:* -c")
+ c.run(f"graph build-order consumer --order={order} --build=missing -s os=Windows "
+ "--format=json", redirect_stdout="windows.json")
+ c.run(f"graph build-order consumer --order={order} --build=missing -s os=Linux "
+ "--format=json", redirect_stdout="linux.json")
+
+ c.run(f"graph build-order-merge --file=windows.json --file=linux.json --reduce "
+ "--format=json")
+ bo_json = json.loads(c.stdout)
+ order_json = bo_json["order"]
+ assert len(order_json) == 2 # 2 levels
+ level0, level1 = order_json
+ if order == "recipe":
+ assert len(level0) == 1
+ assert level0[0]["ref"] == "liba/0.1#8c6ed89c12ab2ce78b239224bd7cb79e"
+ # then libc -> directly on liba, no libb involved
+ assert len(level1) == 1
+ assert level1[0]["ref"] == "libc/0.1#66db2600b9d6a2a61c9051fcf47da4a3"
+ depends = "liba/0.1#8c6ed89c12ab2ce78b239224bd7cb79e"
+ assert level1[0]["depends"] == [depends]
+ else:
+ assert len(level0) == 2
+ liba1 = "liba/0.1#8c6ed89c12ab2ce78b239224bd7cb79e:" \
+ "ebec3dc6d7f6b907b3ada0c3d3cdc83613a2b715"
+ liba2 = "liba/0.1#8c6ed89c12ab2ce78b239224bd7cb79e:" \
+ "9a4eb3c8701508aa9458b1a73d0633783ecc2270"
+ assert level0[0]["pref"] == liba1
+ assert level0[1]["pref"] == liba2
+ # then libc -> directly on liba, no libb involved
+ assert len(level1) == 2
+ assert level1[0]["ref"] == "libc/0.1#66db2600b9d6a2a61c9051fcf47da4a3"
+ assert level1[0]["depends"] == [liba1]
+ assert level1[1]["ref"] == "libc/0.1#66db2600b9d6a2a61c9051fcf47da4a3"
+ assert level1[1]["depends"] == [liba2]
+
+ def test_error_reduced(self):
+ c = TestClient()
+ c.save({"conanfile.py": GenConanfile("liba", "0.1")})
+ c.run("graph build-order . --format=json", redirect_stdout="bo1.json")
+ c.run("graph build-order . --order-by=recipe --reduce --format=json",
+ redirect_stdout="bo2.json")
+ c.run(f"graph build-order-merge --file=bo1.json --file=bo2.json", assert_error=True)
+ assert "ERROR: Reduced build-order file cannot be merged: bo2.json"
+ # different order
+ c.run(f"graph build-order-merge --file=bo2.json --file=bo1.json", assert_error=True)
+ assert "ERROR: Reduced build-order file cannot be merged: bo2.json"
+
+ def test_error_different_orders(self):
+ c = TestClient()
+ c.save({"conanfile.py": GenConanfile("liba", "0.1")})
+ c.run("graph build-order . --format=json", redirect_stdout="bo1.json")
+ c.run("graph build-order . --order-by=recipe --format=json", redirect_stdout="bo2.json")
+ c.run("graph build-order . --order-by=configuration --format=json",
+ redirect_stdout="bo3.json")
+ c.run(f"graph build-order-merge --file=bo1.json --file=bo2.json")
+ # Not error
+ c.run(f"graph build-order-merge --file=bo1.json --file=bo3.json", assert_error=True)
+ assert "ERROR: Cannot merge build-orders of recipe!=configuration" in c.out
+ c.run(f"graph build-order-merge --file=bo2.json --file=bo3.json", assert_error=True)
+ assert "ERROR: Cannot merge build-orders of recipe!=configuration" in c.out
+ # different order
+ c.run(f"graph build-order-merge --file=bo3.json --file=bo2.json", assert_error=True)
+ assert "ERROR: Cannot merge build-orders of configuration!=recipe" in c.out
| [
{
"components": [
{
"doc": "",
"lines": [
115,
119
],
"name": "_InstallRecipeReference.need_build",
"signature": "def need_build(self):",
"type": "function"
},
{
"doc": "",
"lines": [
223,
2... | [
"conans/test/integration/command_v2/test_info_build_order.py::test_info_build_order",
"conans/test/integration/command_v2/test_info_build_order.py::test_info_build_order_configuration",
"conans/test/integration/command_v2/test_info_build_order.py::TestBuildOrderReduce::test_build_order_reduce[recipe]",
"conan... | [
"conans/test/integration/command_v2/test_info_build_order.py::test_info_build_order_configuration_text_formatter",
"conans/test/integration/command_v2/test_info_build_order.py::test_info_build_order_build_require",
"conans/test/integration/command_v2/test_info_build_order.py::test_info_build_order_options",
"... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
InstallGraph.reduce
Changelog: Feature: Provide a new ``graph build-order --reduce`` argument to reduce the order exclusively to packages that need to be built from source.
Docs: https://github.com/conan-io/docs/pull/3584
Close https://github.com/conan-io/conan/issues/15566
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conans/client/graph/install_graph.py]
(definition of _InstallRecipeReference.need_build:)
def need_build(self):
(definition of _InstallConfiguration.need_build:)
def need_build(self):
(definition of InstallGraph.reduce:)
def reduce(self):
[end of new definitions in conans/client/graph/install_graph.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
RDFLib__rdflib-2699 | 2,699 | RDFLib/rdflib | null | ce7e41408dd29f907c8ed4e2bff91dc963f114ee | 2024-01-31T15:05:58Z | diff --git a/rdflib/extras/shacl.py b/rdflib/extras/shacl.py
new file mode 100644
index 000000000..6b62b3394
--- /dev/null
+++ b/rdflib/extras/shacl.py
@@ -0,0 +1,92 @@
+"""
+Utilities for interacting with SHACL Shapes Graphs more easily.
+"""
+from __future__ import annotations
+
+from typing import Optional, Union
+
+from rdflib import Graph, Literal, URIRef, paths
+from rdflib.namespace import RDF, SH
+from rdflib.paths import Path
+from rdflib.term import Node
+
+
+class SHACLPathError(Exception):
+ pass
+
+
+# This implementation is roughly based on
+# pyshacl.helper.sparql_query_helper::SPARQLQueryHelper._shacl_path_to_sparql_path
+def parse_shacl_path(
+ shapes_graph: Graph,
+ path_identifier: Node,
+) -> Union[URIRef, Path]:
+ """
+ Parse a valid SHACL path (e.g. the object of a triple with predicate sh:path)
+ from a :class:`~rdflib.graph.Graph` as a :class:`~rdflib.term.URIRef` if the path
+ is simply a predicate or a :class:`~rdflib.paths.Path` otherwise.
+
+ :param shapes_graph: A :class:`~rdflib.graph.Graph` containing the path to be parsed
+ :param path_identifier: A :class:`~rdflib.term.Node` of the path
+ :return: A :class:`~rdflib.term.URIRef` or a :class:`~rdflib.paths.Path`
+ """
+ path: Optional[Union[URIRef, Path]] = None
+
+ # Literals are not allowed.
+ if isinstance(path_identifier, Literal):
+ raise TypeError("Literals are not a valid SHACL path.")
+
+ # If a path is a URI, that's the whole path.
+ elif isinstance(path_identifier, URIRef):
+ if path_identifier == RDF.nil:
+ raise SHACLPathError(
+ "A list of SHACL Paths must contain at least two path items."
+ )
+ path = path_identifier
+
+ # Handle Sequence Paths
+ elif shapes_graph.value(path_identifier, RDF.first) is not None:
+ sequence = list(shapes_graph.items(path_identifier))
+ if len(sequence) < 2:
+ raise SHACLPathError(
+ "A list of SHACL Sequence Paths must contain at least two path items."
+ )
+ path = paths.SequencePath(
+ *(parse_shacl_path(shapes_graph, path) for path in sequence)
+ )
+
+ # Handle sh:inversePath
+ elif inverse_path := shapes_graph.value(path_identifier, SH.inversePath):
+ path = paths.InvPath(parse_shacl_path(shapes_graph, inverse_path))
+
+ # Handle sh:alternativePath
+ elif alternative_path := shapes_graph.value(path_identifier, SH.alternativePath):
+ alternatives = list(shapes_graph.items(alternative_path))
+ if len(alternatives) < 2:
+ raise SHACLPathError(
+ "List of SHACL alternate paths must have at least two path items."
+ )
+ path = paths.AlternativePath(
+ *(
+ parse_shacl_path(shapes_graph, alternative)
+ for alternative in alternatives
+ )
+ )
+
+ # Handle sh:zeroOrMorePath
+ elif zero_or_more_path := shapes_graph.value(path_identifier, SH.zeroOrMorePath):
+ path = paths.MulPath(parse_shacl_path(shapes_graph, zero_or_more_path), "*")
+
+ # Handle sh:oneOrMorePath
+ elif one_or_more_path := shapes_graph.value(path_identifier, SH.oneOrMorePath):
+ path = paths.MulPath(parse_shacl_path(shapes_graph, one_or_more_path), "+")
+
+ # Handle sh:zeroOrOnePath
+ elif zero_or_one_path := shapes_graph.value(path_identifier, SH.zeroOrOnePath):
+ path = paths.MulPath(parse_shacl_path(shapes_graph, zero_or_one_path), "?")
+
+ # Raise error if none of the above options were found
+ elif path is None:
+ raise SHACLPathError(f"Cannot parse {repr(path_identifier)} as a SHACL Path.")
+
+ return path
| diff --git a/test/test_extras/test_shacl_extras.py b/test/test_extras/test_shacl_extras.py
new file mode 100644
index 000000000..417e75b68
--- /dev/null
+++ b/test/test_extras/test_shacl_extras.py
@@ -0,0 +1,218 @@
+from __future__ import annotations
+
+from typing import Union
+
+import pytest
+
+from rdflib import Graph, URIRef
+from rdflib.extras.shacl import SHACLPathError, parse_shacl_path
+from rdflib.namespace import SH, Namespace
+from rdflib.paths import Path
+
+EX = Namespace("http://example.org/")
+
+
+# Create a graph that gets loaded only once
+@pytest.fixture(scope="module")
+def path_source_data():
+ data = """
+ @prefix ex: <http://example.org/> .
+ @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
+ @prefix sh: <http://www.w3.org/ns/shacl#> .
+
+
+ ex:TestPropShape1
+ sh:path ex:pred1 ;
+ .
+ ex:TestPropShape2a
+ sh:path (
+ ex:pred1
+ ex:pred2
+ ex:pred3
+ ) ;
+ .
+ ex:TestPropShape2b
+ sh:path (
+ (
+ ex:pred1
+ ex:pred2
+ )
+ ex:pred3
+ ) ;
+ .
+ ex:TestPropShape3
+ sh:path [
+ sh:inversePath ex:pred1 ;
+ ] ;
+ .
+ ex:TestPropShape4a
+ sh:path [
+ sh:alternativePath (
+ ex:pred1
+ ex:pred2
+ ex:pred3
+ ) ;
+ ] ;
+ .
+ ex:TestPropShape4b
+ sh:path [
+ sh:alternativePath (
+ [
+ sh:alternativePath (
+ ex:pred1
+ ex:pred2
+ ) ;
+ ]
+ ex:pred3
+ ) ;
+ ] ;
+ .
+ ex:TestPropShape5
+ sh:path [
+ sh:zeroOrMorePath ex:pred1 ;
+ ] ;
+ .
+ ex:TestPropShape6
+ sh:path [
+ sh:oneOrMorePath ex:pred1 ;
+ ] ;
+ .
+ ex:TestPropShape7
+ sh:path [
+ sh:zeroOrOnePath ex:pred1 ;
+ ] ;
+ .
+ ex:TestPropShape8
+ sh:path [
+ sh:zeroOrMorePath [
+ sh:inversePath ex:pred1 ;
+ ] ;
+ ] ;
+ .
+ ex:TestPropShape9
+ sh:path [
+ sh:alternativePath (
+ [
+ sh:inversePath ex:pred1 ;
+ ]
+ (
+ ex:pred1
+ ex:pred2
+ )
+ [
+ sh:alternativePath (
+ ex:pred1
+ ex:pred2
+ ex:pred3
+ ) ;
+ ]
+ ) ;
+ ] ;
+ .
+ ex:TestPropShape10
+ sh:path (
+ [
+ sh:zeroOrMorePath [
+ sh:inversePath ex:pred1 ;
+ ] ;
+ ]
+ [
+ sh:alternativePath (
+ [
+ sh:zeroOrMorePath [
+ sh:inversePath ex:pred1 ;
+ ] ;
+ ]
+ [
+ sh:alternativePath (
+ ex:pred1
+ [
+ sh:oneOrMorePath ex:pred2 ;
+ ]
+ [
+ sh:zeroOrMorePath ex:pred3 ;
+ ]
+ ) ;
+ ]
+ ) ;
+ ]
+ ) ;
+ .
+ ex:InvalidTestPropShape1
+ sh:path () ;
+ .
+ ex:InvalidTestPropShape2
+ sh:path (
+ ex:pred1
+ ) ;
+ .
+ ex:InvalidTestPropShape3
+ sh:path [
+ sh:alternativePath () ;
+ ] ;
+ .
+ ex:InvalidTestPropShape4
+ sh:path [
+ sh:alternativePath (
+ ex:pred1
+ ) ;
+ ] ;
+ .
+ ex:InvalidTestPropShape5
+ sh:path [
+ ex:invalidShaclPathProperty ex:pred1
+ ] ;
+ .
+ ex:InvalidTestPropShape6
+ sh:path "This can't be a literal!";
+ .
+ """
+ g = Graph()
+ g.parse(data=data, format="turtle")
+ yield g
+
+
+@pytest.mark.parametrize(
+ ("resource", "expected"),
+ (
+ # Single SHACL Path
+ (EX.TestPropShape1, EX.pred1),
+ (EX.TestPropShape2a, EX.pred1 / EX.pred2 / EX.pred3),
+ (EX.TestPropShape2b, EX.pred1 / EX.pred2 / EX.pred3),
+ (EX.TestPropShape3, ~EX.pred1),
+ (EX.TestPropShape4a, EX.pred1 | EX.pred2 | EX.pred3),
+ (EX.TestPropShape4b, EX.pred1 | EX.pred2 | EX.pred3),
+ (EX.TestPropShape5, EX.pred1 * "*"), # type: ignore[operator]
+ (EX.TestPropShape6, EX.pred1 * "+"), # type: ignore[operator]
+ (EX.TestPropShape7, EX.pred1 * "?"), # type: ignore[operator]
+ # SHACL Path Combinations
+ (EX.TestPropShape8, ~EX.pred1 * "*"),
+ (
+ EX.TestPropShape9,
+ ~EX.pred1 | EX.pred1 / EX.pred2 | EX.pred1 | EX.pred2 | EX.pred3,
+ ),
+ (
+ EX.TestPropShape10,
+ ~EX.pred1
+ * "*"
+ / (~EX.pred1 * "*" | EX.pred1 | EX.pred2 * "+" | EX.pred3 * "*"), # type: ignore[operator]
+ ),
+ # Invalid Operations
+ (EX.InvalidTestPropShape1, SHACLPathError),
+ (EX.InvalidTestPropShape2, SHACLPathError),
+ (EX.InvalidTestPropShape3, SHACLPathError),
+ (EX.InvalidTestPropShape4, SHACLPathError),
+ (EX.InvalidTestPropShape5, SHACLPathError),
+ (EX.InvalidTestPropShape6, TypeError),
+ ),
+)
+def test_parse_shacl_path(
+ path_source_data: Graph, resource: URIRef, expected: Union[URIRef, Path]
+):
+ path_root = path_source_data.value(resource, SH.path)
+
+ if isinstance(expected, type):
+ with pytest.raises(expected): # type: ignore[arg-type]
+ parse_shacl_path(path_source_data, path_root) # type: ignore[arg-type]
+ else:
+ assert parse_shacl_path(path_source_data, path_root) == expected # type: ignore[arg-type]
| [
{
"components": [
{
"doc": "",
"lines": [
14,
15
],
"name": "SHACLPathError",
"signature": "class SHACLPathError(Exception):",
"type": "class"
},
{
"doc": "Parse a valid SHACL path (e.g. the object of a triple with pre... | [
"test/test_extras/test_shacl_extras.py::test_parse_shacl_path[http://example.org/TestPropShape1-http://example.org/pred1]",
"test/test_extras/test_shacl_extras.py::test_parse_shacl_path[http://example.org/TestPropShape2a-expected1]",
"test/test_extras/test_shacl_extras.py::test_parse_shacl_path[http://example.o... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add SHACL path to RDFLib Path utility and corresponding tests
<!--
Thank you for your contribution to this project. This project has no formal
funding or full-time maintainers, and relies entirely on independent
contributors to keep it alive and relevant.
This pull request template includes some guidelines intended to help
contributors, not to deter contributions. While we prefer that PRs follow our
guidelines, we will not reject PRs solely on the basis that they do not, though
we may take longer to process them as in most cases the remaining work will
have to be done by someone else.
If you have any questions regarding our guidelines, submit the PR as is
and ask.
More detailed guidelines for pull requests are provided in our [developers
guide](https://github.com/RDFLib/rdflib/blob/main/docs/developers.rst).
PRs that are smaller in size and scope will be reviewed and merged quicker, so
please consider if your PR could be split up into more than one independent part
before submitting it, no PR is too small. The maintainers of this project may
also split up larger PRs into smaller, more manageable PRs, if they deem it
necessary.
PRs should be reviewed and approved by at least two people other than the author
using GitHub's review system before being merged. This is less important for bug
fixes and changes that don't impact runtime behaviour, but more important for
changes that expand the RDFLib public API. Reviews are open to anyone, so please
consider reviewing other open pull requests, as this will also free up the
capacity required for your PR to be reviewed.
-->
# Summary of changes
<!--
Briefly explain what changes the pull request is making and why. Ideally, this
should cover all changes in the pull request, as the changes will be reviewed
against this summary to ensure that the PR does not include unintended changes.
Please also explicitly state if the PR makes any changes that are not backwards
compatible.
-->
This adds a utility to parse valid SHACL Paths (e.g. the objects of triples with predicate `sh:path`) into an RDFLib-friendly object. The resulting object could either be:
- A `URIRef`, when the path is simply a predicate
- A `Path`, when the path is complex (e.g. `[sh:inversePath skos:broader]`)
This enables easy evaluation and manipulation of SHACL paths using RDFLib. For example:
- The output can be passed to any of several methods (`Graph.triples`, `Graph.value`, etc.) as a predicate to get value(s) at that path
- The `n3` method can be used to get a SPARQL property path representation of the path
Note that `pyshacl` does not have a utility to do this- rather, it only evaluates SHACL paths.
# Checklist
<!--
If an item on this list doesn't apply to your pull request, just remove it.
If, for some reason, you can't check some items on the checklist, or you are
unsure about them, submit your PR as is and ask for help.
-->
- [x] Checked that there aren't other open pull requests for
the same change.
- [x] Checked that all tests and type checking passes.
- [x] Considered granting [push permissions to the PR branch](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/allowing-changes-to-a-pull-request-branch-created-from-a-fork),
so maintainers can fix minor issues and keep your PR up to date.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in rdflib/extras/shacl.py]
(definition of SHACLPathError:)
class SHACLPathError(Exception):
(definition of parse_shacl_path:)
def parse_shacl_path( shapes_graph: Graph, path_identifier: Node, ) -> Union[URIRef, Path]:
"""Parse a valid SHACL path (e.g. the object of a triple with predicate sh:path)
from a :class:`~rdflib.graph.Graph` as a :class:`~rdflib.term.URIRef` if the path
is simply a predicate or a :class:`~rdflib.paths.Path` otherwise.
:param shapes_graph: A :class:`~rdflib.graph.Graph` containing the path to be parsed
:param path_identifier: A :class:`~rdflib.term.Node` of the path
:return: A :class:`~rdflib.term.URIRef` or a :class:`~rdflib.paths.Path`"""
[end of new definitions in rdflib/extras/shacl.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 0c11debb5178157baeac27b735e49a757916d2a6 | ||
roboflow__supervision-818 | 818 | roboflow/supervision | null | c692d264ae0ea480feb3502a0acdfaa00cf8a6d2 | 2024-01-31T06:51:00Z | diff --git a/docs/detection/tools/save_detections.md b/docs/detection/tools/save_detections.md
new file mode 100644
index 000000000..bdd7c9dc6
--- /dev/null
+++ b/docs/detection/tools/save_detections.md
@@ -0,0 +1,12 @@
+---
+comments: true
+status: new
+---
+
+# Save Detections
+
+<div class="md-typeset">
+ <h2>CSV Sink</h2>
+</div>
+
+:::supervision.detection.tools.csv_sink.CSVSink
diff --git a/mkdocs.yml b/mkdocs.yml
index 220737c82..98feb19a6 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -41,6 +41,7 @@ nav:
- Polygon Zone: detection/tools/polygon_zone.md
- Inference Slicer: detection/tools/inference_slicer.md
- Detection Smoother: detection/tools/smoother.md
+ - Save Detections: detection/tools/save_detections.md
- Annotators: annotators.md
- Trackers: trackers.md
- Datasets: datasets.md
diff --git a/supervision/__init__.py b/supervision/__init__.py
index d44f67236..76abe5996 100644
--- a/supervision/__init__.py
+++ b/supervision/__init__.py
@@ -36,6 +36,7 @@
from supervision.detection.annotate import BoxAnnotator
from supervision.detection.core import Detections
from supervision.detection.line_counter import LineZone, LineZoneAnnotator
+from supervision.detection.tools.csv_sink import CSVSink
from supervision.detection.tools.inference_slicer import InferenceSlicer
from supervision.detection.tools.polygon_zone import PolygonZone, PolygonZoneAnnotator
from supervision.detection.tools.smoother import DetectionsSmoother
diff --git a/supervision/annotators/core.py b/supervision/annotators/core.py
index 870b26f2d..7c162620b 100644
--- a/supervision/annotators/core.py
+++ b/supervision/annotators/core.py
@@ -99,7 +99,7 @@ class OrientedBoxAnnotator(BaseAnnotator):
def __init__(
self,
- color: Union[Color, ColorPalette] = ColorPalette.default(),
+ color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
thickness: int = 2,
color_lookup: ColorLookup = ColorLookup.CLASS,
):
diff --git a/supervision/detection/tools/csv_sink.py b/supervision/detection/tools/csv_sink.py
new file mode 100644
index 000000000..ba0dde97a
--- /dev/null
+++ b/supervision/detection/tools/csv_sink.py
@@ -0,0 +1,181 @@
+from __future__ import annotations
+
+import csv
+import os
+from typing import Any, Dict, List, Optional
+
+from supervision.detection.core import Detections
+
+BASE_HEADER = [
+ "x_min",
+ "y_min",
+ "x_max",
+ "y_max",
+ "class_id",
+ "confidence",
+ "tracker_id",
+]
+
+
+class CSVSink:
+ """
+ A utility class for saving detection data to a CSV file. This class is designed to
+ efficiently serialize detection objects into a CSV format, allowing for the
+ inclusion of bounding box coordinates and additional attributes like `confidence`,
+ `class_id`, and `tracker_id`.
+
+ !!! tip
+
+ CSVSink allow to pass custom data alongside the detection fields, providing
+ flexibility for logging various types of information.
+
+ Args:
+ file_name (str): The name of the CSV file where the detections will be stored.
+ Defaults to 'output.csv'.
+
+ Example:
+ ```python
+ import supervision as sv
+ from ultralytics import YOLO
+
+ model = YOLO(<SOURCE_MODEL_PATH>)
+ csv_sink = sv.CSVSink(<RESULT_CSV_FILE_PATH>)
+ frames_generator = sv.get_video_frames_generator(<SOURCE_VIDEO_PATH>)
+
+ with csv_sink:
+ for frame in frames_generator:
+ result = model(frame)[0]
+ detections = sv.Detections.from_ultralytics(result)
+ sink.append(detections, custom_data={'<CUSTOM_LABEL>':'<CUSTOM_DATA>'})
+ ```
+ """ # noqa: E501 // docs
+
+ def __init__(self, file_name: str = "output.csv") -> None:
+ """
+ Initialize the CSVSink instance.
+
+ Args:
+ file_name (str): The name of the CSV file.
+
+ Returns:
+ None
+ """
+ self.file_name = file_name
+ self.file: Optional[open] = None
+ self.writer: Optional[csv.writer] = None
+ self.header_written = False
+ self.field_names = []
+
+ def __enter__(self) -> CSVSink:
+ self.open()
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[type],
+ exc_val: Optional[Exception],
+ exc_tb: Optional[Any],
+ ) -> None:
+ self.close()
+
+ def open(self) -> None:
+ """
+ Open the CSV file for writing.
+
+ Returns:
+ None
+ """
+ parent_directory = os.path.dirname(self.file_name)
+ if parent_directory and not os.path.exists(parent_directory):
+ os.makedirs(parent_directory)
+
+ self.file = open(self.file_name, "w", newline="")
+ self.writer = csv.writer(self.file)
+
+ def close(self) -> None:
+ """
+ Close the CSV file.
+
+ Returns:
+ None
+ """
+ if self.file:
+ self.file.close()
+
+ @staticmethod
+ def parse_detection_data(
+ detections: Detections, custom_data: Dict[str, Any] = None
+ ) -> List[Dict[str, Any]]:
+ parsed_rows = []
+ for i in range(len(detections.xyxy)):
+ row = {
+ "x_min": detections.xyxy[i][0],
+ "y_min": detections.xyxy[i][1],
+ "x_max": detections.xyxy[i][2],
+ "y_max": detections.xyxy[i][3],
+ "class_id": ""
+ if detections.class_id is None
+ else str(detections.class_id[i]),
+ "confidence": ""
+ if detections.confidence is None
+ else str(detections.confidence[i]),
+ "tracker_id": ""
+ if detections.tracker_id is None
+ else str(detections.tracker_id[i]),
+ }
+
+ if hasattr(detections, "data"):
+ for key, value in detections.data.items():
+ if value.ndim == 0:
+ row[key] = value
+ else:
+ row[key] = value[i]
+
+ if custom_data:
+ row.update(custom_data)
+ parsed_rows.append(row)
+ return parsed_rows
+
+ def append(
+ self, detections: Detections, custom_data: Dict[str, Any] = None
+ ) -> None:
+ """
+ Append detection data to the CSV file.
+
+ Args:
+ detections (Detections): The detection data.
+ custom_data (Dict[str, Any]): Custom data to include.
+
+ Returns:
+ None
+ """
+ if not self.writer:
+ raise Exception(
+ f"Cannot append to CSV: The file '{self.file_name}' is not open."
+ )
+ field_names = CSVSink.parse_field_names(detections, custom_data)
+ if not self.header_written:
+ self.field_names = field_names
+ self.writer.writerow(field_names)
+ self.header_written = True
+
+ if field_names != self.field_names:
+ print(
+ f"Field names do not match the header. "
+ f"Expected: {self.field_names}, given: {field_names}"
+ )
+
+ parsed_rows = CSVSink.parse_detection_data(detections, custom_data)
+ for row in parsed_rows:
+ self.writer.writerow(
+ [row.get(field_name, "") for field_name in self.field_names]
+ )
+
+ @staticmethod
+ def parse_field_names(
+ detections: Detections, custom_data: Dict[str, Any]
+ ) -> List[str]:
+ dynamic_header = sorted(
+ set(custom_data.keys()) | set(getattr(detections, "data", {}).keys())
+ )
+ return BASE_HEADER + dynamic_header
| diff --git a/test/detection/test_csv.py b/test/detection/test_csv.py
new file mode 100644
index 000000000..c34444944
--- /dev/null
+++ b/test/detection/test_csv.py
@@ -0,0 +1,415 @@
+import csv
+import os
+from test.test_utils import mock_detections
+from typing import Any, Dict, List
+
+import pytest
+
+import supervision as sv
+
+
+@pytest.mark.parametrize(
+ "detections, custom_data, "
+ "second_detections, second_custom_data, "
+ "file_name, expected_result",
+ [
+ (
+ mock_detections(
+ xyxy=[[10, 20, 30, 40], [50, 60, 70, 80]],
+ confidence=[0.7, 0.8],
+ class_id=[0, 0],
+ tracker_id=[0, 1],
+ data={"class_name": ["person", "person"]},
+ ),
+ {"frame_number": 42},
+ mock_detections(
+ xyxy=[[15, 25, 35, 45], [55, 65, 75, 85]],
+ confidence=[0.6, 0.9],
+ class_id=[1, 1],
+ tracker_id=[2, 3],
+ data={"class_name": ["car", "car"]},
+ ),
+ {"frame_number": 43},
+ "test_detections.csv",
+ [
+ [
+ "x_min",
+ "y_min",
+ "x_max",
+ "y_max",
+ "class_id",
+ "confidence",
+ "tracker_id",
+ "class_name",
+ "frame_number",
+ ],
+ ["10.0", "20.0", "30.0", "40.0", "0", "0.7", "0", "person", "42"],
+ ["50.0", "60.0", "70.0", "80.0", "0", "0.8", "1", "person", "42"],
+ ["15.0", "25.0", "35.0", "45.0", "1", "0.6", "2", "car", "43"],
+ ["55.0", "65.0", "75.0", "85.0", "1", "0.9", "3", "car", "43"],
+ ],
+ ), # multiple detections
+ (
+ mock_detections(
+ xyxy=[[60, 70, 80, 90], [100, 110, 120, 130]],
+ tracker_id=[4, 5],
+ data={"class_name": ["bike", "dog"]},
+ ),
+ {"frame_number": 44},
+ mock_detections(
+ xyxy=[[65, 75, 85, 95], [105, 115, 125, 135]],
+ confidence=[0.5, 0.4],
+ data={"class_name": ["tree", "cat"]},
+ ),
+ {"frame_number": 45},
+ "test_detections_missing_fields.csv",
+ [
+ [
+ "x_min",
+ "y_min",
+ "x_max",
+ "y_max",
+ "class_id",
+ "confidence",
+ "tracker_id",
+ "class_name",
+ "frame_number",
+ ],
+ ["60.0", "70.0", "80.0", "90.0", "", "", "4", "bike", "44"],
+ ["100.0", "110.0", "120.0", "130.0", "", "", "5", "dog", "44"],
+ ["65.0", "75.0", "85.0", "95.0", "", "0.5", "", "tree", "45"],
+ ["105.0", "115.0", "125.0", "135.0", "", "0.4", "", "cat", "45"],
+ ],
+ ), # missing fields
+ (
+ mock_detections(
+ xyxy=[[10, 11, 12, 13]],
+ confidence=[0.95],
+ data={"class_name": "unknown", "is_detected": True, "score": 1},
+ ),
+ {"frame_number": 46},
+ mock_detections(
+ xyxy=[[14, 15, 16, 17]],
+ data={"class_name": "artifact", "is_detected": False, "score": 0.85},
+ ),
+ {"frame_number": 47},
+ "test_detections_varied_data.csv",
+ [
+ [
+ "x_min",
+ "y_min",
+ "x_max",
+ "y_max",
+ "class_id",
+ "confidence",
+ "tracker_id",
+ "class_name",
+ "frame_number",
+ "is_detected",
+ "score",
+ ],
+ [
+ "10.0",
+ "11.0",
+ "12.0",
+ "13.0",
+ "",
+ "0.95",
+ "",
+ "unknown",
+ "46",
+ "True",
+ "1",
+ ],
+ [
+ "14.0",
+ "15.0",
+ "16.0",
+ "17.0",
+ "",
+ "",
+ "",
+ "artifact",
+ "47",
+ "False",
+ "0.85",
+ ],
+ ],
+ ), # Inconsistent Data Types
+ (
+ mock_detections(
+ xyxy=[[20, 21, 22, 23]],
+ ),
+ {
+ "metadata": {"sensor_id": 101, "location": "north"},
+ "tags": ["urgent", "review"],
+ },
+ mock_detections(
+ xyxy=[[14, 15, 16, 17]],
+ ),
+ {
+ "metadata": {"sensor_id": 104, "location": "west"},
+ "tags": ["not-urgent", "done"],
+ },
+ "test_detections_complex_data.csv",
+ [
+ [
+ "x_min",
+ "y_min",
+ "x_max",
+ "y_max",
+ "class_id",
+ "confidence",
+ "tracker_id",
+ "metadata",
+ "tags",
+ ],
+ [
+ "20.0",
+ "21.0",
+ "22.0",
+ "23.0",
+ "",
+ "",
+ "",
+ "{'sensor_id': 101, 'location': 'north'}",
+ "['urgent', 'review']",
+ ],
+ [
+ "14.0",
+ "15.0",
+ "16.0",
+ "17.0",
+ "",
+ "",
+ "",
+ "{'sensor_id': 104, 'location': 'west'}",
+ "['not-urgent', 'done']",
+ ],
+ ],
+ ), # Complex Data
+ ],
+)
+def test_csv_sink(
+ detections: mock_detections,
+ custom_data: Dict[str, Any],
+ second_detections: mock_detections,
+ second_custom_data: Dict[str, Any],
+ file_name: str,
+ expected_result: List[List[Any]],
+) -> None:
+ with sv.CSVSink(file_name) as sink:
+ sink.append(detections, custom_data)
+ sink.append(second_detections, second_custom_data)
+
+ assert_csv_equal(file_name, expected_result)
+
+
+@pytest.mark.parametrize(
+ "detections, custom_data, "
+ "second_detections, second_custom_data, "
+ "file_name, expected_result",
+ [
+ (
+ mock_detections(
+ xyxy=[[10, 20, 30, 40], [50, 60, 70, 80]],
+ confidence=[0.7, 0.8],
+ class_id=[0, 0],
+ tracker_id=[0, 1],
+ data={"class_name": ["person", "person"]},
+ ),
+ {"frame_number": 42},
+ mock_detections(
+ xyxy=[[15, 25, 35, 45], [55, 65, 75, 85]],
+ confidence=[0.6, 0.9],
+ class_id=[1, 1],
+ tracker_id=[2, 3],
+ data={"class_name": ["car", "car"]},
+ ),
+ {"frame_number": 43},
+ "test_detections.csv",
+ [
+ [
+ "x_min",
+ "y_min",
+ "x_max",
+ "y_max",
+ "class_id",
+ "confidence",
+ "tracker_id",
+ "class_name",
+ "frame_number",
+ ],
+ ["10.0", "20.0", "30.0", "40.0", "0", "0.7", "0", "person", "42"],
+ ["50.0", "60.0", "70.0", "80.0", "0", "0.8", "1", "person", "42"],
+ ["15.0", "25.0", "35.0", "45.0", "1", "0.6", "2", "car", "43"],
+ ["55.0", "65.0", "75.0", "85.0", "1", "0.9", "3", "car", "43"],
+ ],
+ ), # multiple detections
+ (
+ mock_detections(
+ xyxy=[[60, 70, 80, 90], [100, 110, 120, 130]],
+ tracker_id=[4, 5],
+ data={"class_name": ["bike", "dog"]},
+ ),
+ {"frame_number": 44},
+ mock_detections(
+ xyxy=[[65, 75, 85, 95], [105, 115, 125, 135]],
+ confidence=[0.5, 0.4],
+ data={"class_name": ["tree", "cat"]},
+ ),
+ {"frame_number": 45},
+ "test_detections_missing_fields.csv",
+ [
+ [
+ "x_min",
+ "y_min",
+ "x_max",
+ "y_max",
+ "class_id",
+ "confidence",
+ "tracker_id",
+ "class_name",
+ "frame_number",
+ ],
+ ["60.0", "70.0", "80.0", "90.0", "", "", "4", "bike", "44"],
+ ["100.0", "110.0", "120.0", "130.0", "", "", "5", "dog", "44"],
+ ["65.0", "75.0", "85.0", "95.0", "", "0.5", "", "tree", "45"],
+ ["105.0", "115.0", "125.0", "135.0", "", "0.4", "", "cat", "45"],
+ ],
+ ), # missing fields
+ (
+ mock_detections(
+ xyxy=[[10, 11, 12, 13]],
+ confidence=[0.95],
+ data={"class_name": "unknown", "is_detected": True, "score": 1},
+ ),
+ {"frame_number": 46},
+ mock_detections(
+ xyxy=[[14, 15, 16, 17]],
+ data={"class_name": "artifact", "is_detected": False, "score": 0.85},
+ ),
+ {"frame_number": 47},
+ "test_detections_varied_data.csv",
+ [
+ [
+ "x_min",
+ "y_min",
+ "x_max",
+ "y_max",
+ "class_id",
+ "confidence",
+ "tracker_id",
+ "class_name",
+ "frame_number",
+ "is_detected",
+ "score",
+ ],
+ [
+ "10.0",
+ "11.0",
+ "12.0",
+ "13.0",
+ "",
+ "0.95",
+ "",
+ "unknown",
+ "46",
+ "True",
+ "1",
+ ],
+ [
+ "14.0",
+ "15.0",
+ "16.0",
+ "17.0",
+ "",
+ "",
+ "",
+ "artifact",
+ "47",
+ "False",
+ "0.85",
+ ],
+ ],
+ ), # Inconsistent Data Types
+ (
+ mock_detections(
+ xyxy=[[20, 21, 22, 23]],
+ ),
+ {
+ "metadata": {"sensor_id": 101, "location": "north"},
+ "tags": ["urgent", "review"],
+ },
+ mock_detections(
+ xyxy=[[14, 15, 16, 17]],
+ ),
+ {
+ "metadata": {"sensor_id": 104, "location": "west"},
+ "tags": ["not-urgent", "done"],
+ },
+ "test_detections_complex_data.csv",
+ [
+ [
+ "x_min",
+ "y_min",
+ "x_max",
+ "y_max",
+ "class_id",
+ "confidence",
+ "tracker_id",
+ "metadata",
+ "tags",
+ ],
+ [
+ "20.0",
+ "21.0",
+ "22.0",
+ "23.0",
+ "",
+ "",
+ "",
+ "{'sensor_id': 101, 'location': 'north'}",
+ "['urgent', 'review']",
+ ],
+ [
+ "14.0",
+ "15.0",
+ "16.0",
+ "17.0",
+ "",
+ "",
+ "",
+ "{'sensor_id': 104, 'location': 'west'}",
+ "['not-urgent', 'done']",
+ ],
+ ],
+ ), # Complex Data
+ ],
+)
+def test_csv_sink_manual(
+ detections: mock_detections,
+ custom_data: Dict[str, Any],
+ second_detections: mock_detections,
+ second_custom_data: Dict[str, Any],
+ file_name: str,
+ expected_result: List[List[Any]],
+) -> None:
+ sink = sv.CSVSink(file_name)
+ sink.open()
+ sink.append(detections, custom_data)
+ sink.append(second_detections, second_custom_data)
+ sink.close()
+
+ assert_csv_equal(file_name, expected_result)
+
+
+def assert_csv_equal(file_name, expected_rows):
+ with open(file_name, mode="r", newline="") as file:
+ reader = csv.reader(file)
+ for i, row in enumerate(reader):
+ assert (
+ [str(item) for item in expected_rows[i]] == row
+ ), f"Row in CSV didn't match expected output: {row} != {expected_rows[i]}"
+
+ os.remove(file_name)
| diff --git a/docs/detection/tools/save_detections.md b/docs/detection/tools/save_detections.md
new file mode 100644
index 000000000..bdd7c9dc6
--- /dev/null
+++ b/docs/detection/tools/save_detections.md
@@ -0,0 +1,12 @@
+---
+comments: true
+status: new
+---
+
+# Save Detections
+
+<div class="md-typeset">
+ <h2>CSV Sink</h2>
+</div>
+
+:::supervision.detection.tools.csv_sink.CSVSink
diff --git a/mkdocs.yml b/mkdocs.yml
index 220737c82..98feb19a6 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -41,6 +41,7 @@ nav:
- Polygon Zone: detection/tools/polygon_zone.md
- Inference Slicer: detection/tools/inference_slicer.md
- Detection Smoother: detection/tools/smoother.md
+ - Save Detections: detection/tools/save_detections.md
- Annotators: annotators.md
- Trackers: trackers.md
- Datasets: datasets.md
| [
{
"components": [
{
"doc": "A utility class for saving detection data to a CSV file. This class is designed to\nefficiently serialize detection objects into a CSV format, allowing for the\ninclusion of bounding box coordinates and additional attributes like `confidence`,\n`class_id`, and `tracker_... | [
"test/detection/test_csv.py::test_csv_sink[detections0-custom_data0-second_detections0-second_custom_data0-test_detections.csv-expected_result0]",
"test/detection/test_csv.py::test_csv_sink[detections1-custom_data1-second_detections1-second_custom_data1-test_detections_missing_fields.csv-expected_result1]",
"te... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
New function [CSVSink] - allowing to serialise Detections to a CSV file
## Description
This PR introduces the `CSVSink` class to address structured logging of object detection data into CSV format, as requested in issue [#746](https://github.com/roboflow/supervision/issues/746). The class supports logging of bounding boxes, class IDs, confidence scores, and frame numbers, enhancing post-processing data analysis and interpretability.
The PR includes:
- `CSVSink` class implementation.
- Comprehensive unit tests for the class.
- A demo showcasing the integration of `CSVSink` with a video processing pipeline.
Note: The `CSVSink` class was placed directly in the utils folder in the file.py file due to its capability to handle various types of file operations integral to the project's core functionality. Open to suggestions if there's a more appropriate location within the project structure.
## Type of Change
- [x] New feature: Added `CSVSink` class for structured logging of detection data.
## Testing
The `CSVSink` class was validated through a series of unit tests, covering:
- Logging with various custom data scenarios.
- Handling of scenarios with no detections.
- Verification of CSV output format and data integrity.
A demo script demonstrates the class's usage within a video processing workflow, annotating video frames and logging detections to a CSV file. [Demo Here](https://colab.research.google.com/drive/1DyKR_euGxHy4YXD_QGA6bwsBNDs9mScV?usp=sharing)
## Documentation Updates
- Added `CSVSink` class documentation within the module, detailing its purpose, usage, and example usage.
- Updated README to include a section on structured detection data logging, introducing the `CSVSink` class.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in supervision/detection/tools/csv_sink.py]
(definition of CSVSink:)
class CSVSink:
"""A utility class for saving detection data to a CSV file. This class is designed to
efficiently serialize detection objects into a CSV format, allowing for the
inclusion of bounding box coordinates and additional attributes like `confidence`,
`class_id`, and `tracker_id`.
!!! tip
CSVSink allow to pass custom data alongside the detection fields, providing
flexibility for logging various types of information.
Args:
file_name (str): The name of the CSV file where the detections will be stored.
Defaults to 'output.csv'.
Example:
```python
import supervision as sv
from ultralytics import YOLO
model = YOLO(<SOURCE_MODEL_PATH>)
csv_sink = sv.CSVSink(<RESULT_CSV_FILE_PATH>)
frames_generator = sv.get_video_frames_generator(<SOURCE_VIDEO_PATH>)
with csv_sink:
for frame in frames_generator:
result = model(frame)[0]
detections = sv.Detections.from_ultralytics(result)
sink.append(detections, custom_data={'<CUSTOM_LABEL>':'<CUSTOM_DATA>'})
```"""
(definition of CSVSink.__init__:)
def __init__(self, file_name: str = "output.csv") -> None:
"""Initialize the CSVSink instance.
Args:
file_name (str): The name of the CSV file.
Returns:
None"""
(definition of CSVSink.__enter__:)
def __enter__(self) -> CSVSink:
(definition of CSVSink.__exit__:)
def __exit__( self, exc_type: Optional[type], exc_val: Optional[Exception], exc_tb: Optional[Any], ) -> None:
(definition of CSVSink.open:)
def open(self) -> None:
"""Open the CSV file for writing.
Returns:
None"""
(definition of CSVSink.close:)
def close(self) -> None:
"""Close the CSV file.
Returns:
None"""
(definition of CSVSink.parse_detection_data:)
def parse_detection_data( detections: Detections, custom_data: Dict[str, Any] = None ) -> List[Dict[str, Any]]:
(definition of CSVSink.append:)
def append( self, detections: Detections, custom_data: Dict[str, Any] = None ) -> None:
"""Append detection data to the CSV file.
Args:
detections (Detections): The detection data.
custom_data (Dict[str, Any]): Custom data to include.
Returns:
None"""
(definition of CSVSink.parse_field_names:)
def parse_field_names( detections: Detections, custom_data: Dict[str, Any] ) -> List[str]:
[end of new definitions in supervision/detection/tools/csv_sink.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 3eb5c0b024e3e46877b7fe4fd66e6177d1308ba0 | |
deepset-ai__haystack-6855 | 6,855 | deepset-ai/haystack | null | ceda4cd6557274b992566d59085be1730f5613f8 | 2024-01-30T13:56:33Z | diff --git a/haystack/utils/__init__.py b/haystack/utils/__init__.py
index e4cdb87d60..df42fc59c8 100644
--- a/haystack/utils/__init__.py
+++ b/haystack/utils/__init__.py
@@ -2,3 +2,4 @@
from haystack.utils.requests_utils import request_with_retry
from haystack.utils.filters import document_matches_filter
from haystack.utils.device import ComponentDevice, DeviceType, Device, DeviceMap
+from haystack.utils.auth import Secret
diff --git a/haystack/utils/auth.py b/haystack/utils/auth.py
new file mode 100644
index 0000000000..324970d569
--- /dev/null
+++ b/haystack/utils/auth.py
@@ -0,0 +1,195 @@
+from enum import Enum
+import os
+from typing import Any, Dict, List, Optional, Union
+from dataclasses import dataclass
+from abc import ABC, abstractmethod
+
+
+class SecretType(Enum):
+ TOKEN = "token"
+ ENV_VAR = "env_var"
+
+ def __str__(self):
+ return self.value
+
+ @staticmethod
+ def from_str(string: str) -> "SecretType":
+ map = {e.value: e for e in SecretType}
+ type = map.get(string)
+ if type is None:
+ raise ValueError(f"Unknown secret type '{string}'")
+ return type
+
+
+@dataclass
+class Secret(ABC):
+ """
+ Encapsulates a secret used for authentication.
+ """
+
+ _type: SecretType
+
+ def __init__(self, type: SecretType):
+ super().__init__()
+ self._type = type
+
+ @staticmethod
+ def from_token(token: str) -> "Secret":
+ """
+ Create a token-based secret. Cannot be serialized.
+
+ :param token:
+ The token to use for authentication.
+ """
+ return TokenSecret(token)
+
+ @staticmethod
+ def from_env_var(env_vars: Union[str, List[str]], *, strict: bool = True) -> "Secret":
+ """
+ Create an environment variable-based secret. Accepts
+ one or more environment variables. Upon resolution, it
+ returns a string token from the first environment variable
+ that is set.
+
+ :param env_vars:
+ A single environment variable or an ordered list of
+ candidate environment variables.
+ :param strict:
+ Whether to raise an exception if none of the environment
+ variables are set.
+ """
+ if isinstance(env_vars, str):
+ env_vars = [env_vars]
+ return EnvVarSecret(env_vars, strict=strict)
+
+ def to_dict(self) -> Dict[str, Any]:
+ """
+ Convert the secret to a JSON-serializable dictionary.
+ Some secrets may not be serializable.
+
+ :returns:
+ The serialized policy.
+ """
+ out = {"type": self._type.value}
+ inner = self._to_dict()
+ assert all(k not in inner for k in out.keys())
+ out.update(inner)
+ return out
+
+ @staticmethod
+ def from_dict(dict: Dict[str, Any]) -> "Secret":
+ """
+ Create a secret from a JSON-serializable dictionary.
+
+ :param dict:
+ The dictionary with the serialized data.
+ :returns:
+ The deserialized secret.
+ """
+ secret_map = {SecretType.TOKEN: TokenSecret, SecretType.ENV_VAR: EnvVarSecret}
+ secret_type = SecretType.from_str(dict["type"])
+ return secret_map[secret_type]._from_dict(dict) # type: ignore
+
+ @abstractmethod
+ def resolve_value(self) -> Optional[Any]:
+ """
+ Resolve the secret to an atomic value. The semantics
+ of the value is secret-dependent.
+
+ :returns:
+ The value of the secret, if any.
+ """
+ pass
+
+ @abstractmethod
+ def _to_dict(self) -> Dict[str, Any]:
+ pass
+
+ @staticmethod
+ @abstractmethod
+ def _from_dict(dict: Dict[str, Any]) -> "Secret":
+ pass
+
+
+@dataclass
+class TokenSecret(Secret):
+ """
+ A secret that uses a string token/API key.
+ Cannot be serialized.
+ """
+
+ _token: str
+
+ def __init__(self, token: str):
+ """
+ Create a token secret.
+
+ :param token:
+ The token to use for authentication.
+ """
+ super().__init__(SecretType.TOKEN)
+ self._token = token
+
+ if len(token) == 0:
+ raise ValueError("Authentication token cannot be empty.")
+
+ def _to_dict(self) -> Dict[str, Any]:
+ raise ValueError(
+ "Cannot serialize token-based secret. Use an alternative secret type like environment variables."
+ )
+
+ @staticmethod
+ def _from_dict(dict: Dict[str, Any]) -> "Secret":
+ raise ValueError(
+ "Cannot deserialize token-based secret. Use an alternative secret type like environment variables."
+ )
+
+ def resolve_value(self) -> Optional[Any]:
+ return self._token
+
+
+@dataclass
+class EnvVarSecret(Secret):
+ """
+ A secret that accepts one or more environment variables.
+ Upon resolution, it returns a string token from the first
+ environment variable that is set. Can be serialized.
+ """
+
+ _env_vars: List[str]
+ _strict: bool
+
+ def __init__(self, env_vars: List[str], *, strict: bool = True):
+ """
+ Create an environment variable secret.
+
+ :param env_vars:
+ Ordered list of candidate environment variables.
+ :param strict:
+ Whether to raise an exception if none of the environment
+ variables are set.
+ """
+ super().__init__(SecretType.ENV_VAR)
+ self._env_vars = list(env_vars)
+ self._strict = strict
+
+ if len(env_vars) == 0:
+ raise ValueError("One or more environment variables must be provided for the secret.")
+
+ def _to_dict(self) -> Dict[str, Any]:
+ return {"env_vars": self._env_vars, "strict": self._strict}
+
+ @staticmethod
+ def _from_dict(dict: Dict[str, Any]) -> "Secret":
+ return EnvVarSecret(dict["env_vars"], strict=dict["strict"])
+
+ def resolve_value(self) -> Optional[Any]:
+ out = None
+ for env_var in self._env_vars:
+ value = os.getenv(env_var)
+ if value is not None:
+ out = value
+ break
+ if out is None and self._strict:
+ raise ValueError(f"None of the following authentication environment variables are set: {self._env_vars}")
+ return out
diff --git a/releasenotes/notes/secret-handling-for-components-d576a28135a224db.yaml b/releasenotes/notes/secret-handling-for-components-d576a28135a224db.yaml
new file mode 100644
index 0000000000..31e2fa8b38
--- /dev/null
+++ b/releasenotes/notes/secret-handling-for-components-d576a28135a224db.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Expose a `Secret` type to provide consistent API for any component that requires secrets for authentication.
+ Currently supports string tokens and environment variables. Token-based secrets are automatically
+ prevented from being serialized to disk (to prevent accidental leakage of secrets).
| diff --git a/test/utils/test_auth.py b/test/utils/test_auth.py
new file mode 100644
index 0000000000..c9221aad9f
--- /dev/null
+++ b/test/utils/test_auth.py
@@ -0,0 +1,63 @@
+import os
+
+import pytest
+
+from haystack.utils.auth import Secret, EnvVarSecret, SecretType, TokenSecret
+
+
+def test_secret_type():
+ for e in SecretType:
+ assert e == SecretType.from_str(e.value)
+
+ with pytest.raises(ValueError, match="Unknown secret type"):
+ SecretType.from_str("disk")
+
+
+def test_token_secret():
+ secret = Secret.from_token("test-token")
+ assert secret._type == SecretType.TOKEN
+ assert isinstance(secret, TokenSecret)
+ assert secret._token == "test-token"
+ assert secret.resolve_value() == "test-token"
+
+ with pytest.raises(ValueError, match="Cannot serialize token-based secret"):
+ secret.to_dict()
+
+ with pytest.raises(ValueError, match="cannot be empty"):
+ Secret.from_token("")
+
+
+def test_env_var_secret():
+ secret = Secret.from_env_var("TEST_ENV_VAR1")
+ os.environ["TEST_ENV_VAR1"] = "test-token"
+
+ assert secret._type == SecretType.ENV_VAR
+ assert isinstance(secret, EnvVarSecret)
+ assert secret._env_vars == ["TEST_ENV_VAR1"]
+ assert secret._strict is True
+ assert secret.resolve_value() == "test-token"
+
+ del os.environ["TEST_ENV_VAR1"]
+ with pytest.raises(ValueError, match="None of the following .* variables are set"):
+ secret.resolve_value()
+
+ secret = Secret.from_env_var("TEST_ENV_VAR2", strict=False)
+ assert secret._strict is False
+ assert secret.resolve_value() == None
+
+ secret = Secret.from_env_var(["TEST_ENV_VAR2", "TEST_ENV_VAR1"], strict=True)
+ assert secret._env_vars == ["TEST_ENV_VAR2", "TEST_ENV_VAR1"]
+ with pytest.raises(ValueError, match="None of the following .* variables are set"):
+ secret.resolve_value()
+ os.environ["TEST_ENV_VAR1"] = "test-token-2"
+ assert secret.resolve_value() == "test-token-2"
+ os.environ["TEST_ENV_VAR2"] = "test-token"
+ assert secret.resolve_value() == "test-token"
+
+ with pytest.raises(ValueError, match="One or more environment variables"):
+ Secret.from_env_var([])
+
+ assert secret.to_dict() == {"type": "env_var", "env_vars": ["TEST_ENV_VAR2", "TEST_ENV_VAR1"], "strict": True}
+ assert (
+ Secret.from_dict({"type": "env_var", "env_vars": ["TEST_ENV_VAR2", "TEST_ENV_VAR1"], "strict": True}) == secret
+ )
| diff --git a/releasenotes/notes/secret-handling-for-components-d576a28135a224db.yaml b/releasenotes/notes/secret-handling-for-components-d576a28135a224db.yaml
new file mode 100644
index 0000000000..31e2fa8b38
--- /dev/null
+++ b/releasenotes/notes/secret-handling-for-components-d576a28135a224db.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Expose a `Secret` type to provide consistent API for any component that requires secrets for authentication.
+ Currently supports string tokens and environment variables. Token-based secrets are automatically
+ prevented from being serialized to disk (to prevent accidental leakage of secrets).
| [
{
"components": [
{
"doc": "",
"lines": [
8,
21
],
"name": "SecretType",
"signature": "class SecretType(Enum):",
"type": "class"
},
{
"doc": "",
"lines": [
12,
13
],
"name": ... | [
"test/utils/test_auth.py::test_secret_type",
"test/utils/test_auth.py::test_token_secret",
"test/utils/test_auth.py::test_env_var_secret"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat: Implement `Secret` for structured authentication
### Related Issues
- Fixes https://github.com/deepset-ai/haystack/issues/6851
### Proposed Changes:
Expose a `Secret` type to provide common logic for any component that requires a secret for authentication, with the following variants:
- `token` - Use a string literal.
- `env_var` - Resolve the token from the passed environment variable(s).
Token-based secrets are not serializable, i.e., they'll raise an error when serializing a component that uses it. Env vars are serializable.
### Example usage
```python
@component
class MyComponent:
def __init__(self, api_key: Optional[Secret] = None, **kwargs):
self.api_key = api_key
self.backend = None
def warm_up(self):
# Call resolve_value to yield a single result. The semantics of the result is policy-dependent.
# Currently, all supported policies will return a single string token.
self.backend = SomeBackend(api_key=self.api_key.resolve_value() if self.api_key else None, ...)
def to_dict(self):
# Serialize the policy like any other (custom) data. If the policy is token-based, it will
# raise an error.
return default_to_dict(self, api_key=self.api_key.to_dict() if self.api_key else None, ...)
@classmethod
def from_dict(cls, data):
# Deserialize the policy data before passing it to the generic from_dict function.
api_key_data = data["init_parameters"]["api_key"]
api_key = Secret.from_dict(api_key_data) if api_key_data is not None else None
data["init_parameters"]["api_key"] = api_key
return default_from_dict(cls, data)
# No authentication.
component = MyComponent(api_key=None)
# Token based authentication
component = MyComponent(api_key=Secret.from_token("sk-randomAPIkeyasdsa32ekasd32e"))
component.to_dict() # Error! Can't serialize authentication tokens
# Environment variable based authentication
component = MyComponent(api_key=Secret.from_env("OPENAI_API_KEY"))
component.to_dict() # This is fine
```
### How did you test it?
<!-- unit tests, integration tests, manual verification, instructions for manual tests -->
Unit tests
### Notes for the reviewer
<!-- E.g. point out section where the reviewer -->
- A helper function to aid with deserialization will be introduced in a future PR.
### Checklist
- I have read the [contributors guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md) and the [code of conduct](https://github.com/deepset-ai/haystack/blob/main/code_of_conduct.txt)
- I have updated the related issue with new insights and changes
- I added unit tests and updated the docstrings
- I've used one of the [conventional commit types](https://www.conventionalcommits.org/en/v1.0.0/) for my PR title: `fix:`, `feat:`, `build:`, `chore:`, `ci:`, `docs:`, `style:`, `refactor:`, `perf:`, `test:`.
- I documented my code
- I ran [pre-commit hooks](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md#installation) and fixed any issue
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/utils/auth.py]
(definition of SecretType:)
class SecretType(Enum):
(definition of SecretType.__str__:)
def __str__(self):
(definition of SecretType.from_str:)
def from_str(string: str) -> "SecretType":
(definition of Secret:)
class Secret(ABC):
"""Encapsulates a secret used for authentication."""
(definition of Secret.__init__:)
def __init__(self, type: SecretType):
(definition of Secret.from_token:)
def from_token(token: str) -> "Secret":
"""Create a token-based secret. Cannot be serialized.
:param token:
The token to use for authentication."""
(definition of Secret.from_env_var:)
def from_env_var(env_vars: Union[str, List[str]], *, strict: bool = True) -> "Secret":
"""Create an environment variable-based secret. Accepts
one or more environment variables. Upon resolution, it
returns a string token from the first environment variable
that is set.
:param env_vars:
A single environment variable or an ordered list of
candidate environment variables.
:param strict:
Whether to raise an exception if none of the environment
variables are set."""
(definition of Secret.to_dict:)
def to_dict(self) -> Dict[str, Any]:
"""Convert the secret to a JSON-serializable dictionary.
Some secrets may not be serializable.
:returns:
The serialized policy."""
(definition of Secret.from_dict:)
def from_dict(dict: Dict[str, Any]) -> "Secret":
"""Create a secret from a JSON-serializable dictionary.
:param dict:
The dictionary with the serialized data.
:returns:
The deserialized secret."""
(definition of Secret.resolve_value:)
def resolve_value(self) -> Optional[Any]:
"""Resolve the secret to an atomic value. The semantics
of the value is secret-dependent.
:returns:
The value of the secret, if any."""
(definition of Secret._to_dict:)
def _to_dict(self) -> Dict[str, Any]:
(definition of Secret._from_dict:)
def _from_dict(dict: Dict[str, Any]) -> "Secret":
(definition of TokenSecret:)
class TokenSecret(Secret):
"""A secret that uses a string token/API key.
Cannot be serialized."""
(definition of TokenSecret.__init__:)
def __init__(self, token: str):
"""Create a token secret.
:param token:
The token to use for authentication."""
(definition of TokenSecret._to_dict:)
def _to_dict(self) -> Dict[str, Any]:
(definition of TokenSecret._from_dict:)
def _from_dict(dict: Dict[str, Any]) -> "Secret":
(definition of TokenSecret.resolve_value:)
def resolve_value(self) -> Optional[Any]:
(definition of EnvVarSecret:)
class EnvVarSecret(Secret):
"""A secret that accepts one or more environment variables.
Upon resolution, it returns a string token from the first
environment variable that is set. Can be serialized."""
(definition of EnvVarSecret.__init__:)
def __init__(self, env_vars: List[str], *, strict: bool = True):
"""Create an environment variable secret.
:param env_vars:
Ordered list of candidate environment variables.
:param strict:
Whether to raise an exception if none of the environment
variables are set."""
(definition of EnvVarSecret._to_dict:)
def _to_dict(self) -> Dict[str, Any]:
(definition of EnvVarSecret._from_dict:)
def _from_dict(dict: Dict[str, Any]) -> "Secret":
(definition of EnvVarSecret.resolve_value:)
def resolve_value(self) -> Optional[Any]:
[end of new definitions in haystack/utils/auth.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 | |
deepset-ai__haystack-6836 | 6,836 | deepset-ai/haystack | null | a771d7f01511bcfe05ebd19961ee319865a5f3af | 2024-01-26T16:58:54Z | diff --git a/docs/pydoc/config/retrievers_api.yml b/docs/pydoc/config/retrievers_api.yml
index f7bad19826..35b6e248ef 100644
--- a/docs/pydoc/config/retrievers_api.yml
+++ b/docs/pydoc/config/retrievers_api.yml
@@ -1,7 +1,12 @@
loaders:
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
- search_path: [../../../haystack/components/retrievers/in_memory]
- modules: ["bm25_retriever", "embedding_retriever"]
+ search_path: [../../../haystack/components/retrievers]
+ modules:
+ [
+ "in_memory/bm25_retriever",
+ "in_memory/embedding_retriever",
+ "filter_retriever",
+ ]
ignore_when_discovered: ["__init__"]
processors:
- type: filter
diff --git a/haystack/components/retrievers/__init__.py b/haystack/components/retrievers/__init__.py
index e69de29bb2..1f4c914247 100644
--- a/haystack/components/retrievers/__init__.py
+++ b/haystack/components/retrievers/__init__.py
@@ -0,0 +1,3 @@
+from haystack.components.retrievers.filter_retriever import FilterRetriever
+
+__all__ = ["FilterRetriever"]
diff --git a/haystack/components/retrievers/filter_retriever.py b/haystack/components/retrievers/filter_retriever.py
new file mode 100644
index 0000000000..5456ad47d7
--- /dev/null
+++ b/haystack/components/retrievers/filter_retriever.py
@@ -0,0 +1,97 @@
+import importlib
+import logging
+
+from typing import Dict, List, Any, Optional
+
+from haystack import component, Document, default_to_dict, default_from_dict, DeserializationError
+from haystack.document_stores.types import DocumentStore
+
+
+logger = logging.getLogger(__name__)
+
+
+@component
+class FilterRetriever:
+ """
+ Retrieves documents that match the provided filters.
+
+ Usage example:
+ ```python
+ from haystack import Document
+ from haystack.components.retrievers import FilterRetriever
+ from haystack.document_stores.in_memory import InMemoryDocumentStore
+
+ docs = [
+ Document(content="Python is a popular programming language", meta={"lang": "en"}),
+ Document(content="python ist eine beliebte Programmiersprache", meta={"lang": "de"}),
+ ]
+
+ doc_store = InMemoryDocumentStore()
+ doc_store.write_documents(docs)
+ retriever = FilterRetriever(doc_store, filters={"field": "lang", "operator": "==", "value": "en"})
+
+ # if passed in the run method, filters will override those provided at initialization
+ result = retriever.run(filters={"field": "lang", "operator": "==", "value": "de"})
+
+ assert "documents" in result
+ assert len(result["documents"]) == 1
+ assert result["documents"][0].content == "python ist eine beliebte Programmiersprache"
+ ```
+ """
+
+ def __init__(self, document_store: DocumentStore, filters: Optional[Dict[str, Any]] = None):
+ """
+ Create the FilterRetriever component.
+
+ :param document_store: An instance of a DocumentStore.
+ :param filters: A dictionary with filters to narrow down the search space. Defaults to `None`.
+ """
+ self.document_store = document_store
+ self.filters = filters
+
+ def _get_telemetry_data(self) -> Dict[str, Any]:
+ """
+ Data that is sent to Posthog for usage analytics.
+ """
+ return {"document_store": type(self.document_store).__name__}
+
+ def to_dict(self) -> Dict[str, Any]:
+ """
+ Serialize this component to a dictionary.
+ """
+ docstore = self.document_store.to_dict()
+ return default_to_dict(self, document_store=docstore, filters=self.filters)
+
+ @classmethod
+ def from_dict(cls, data: Dict[str, Any]) -> "FilterRetriever":
+ """
+ Deserialize this component from a dictionary.
+ """
+ init_params = data.get("init_parameters", {})
+ if "document_store" not in init_params:
+ raise DeserializationError("Missing 'document_store' in serialization data")
+ if "type" not in init_params["document_store"]:
+ raise DeserializationError("Missing 'type' in document store's serialization data")
+ try:
+ module_name, type_ = init_params["document_store"]["type"].rsplit(".", 1)
+ logger.debug("Trying to import %s", module_name)
+ module = importlib.import_module(module_name)
+ except (ImportError, DeserializationError) as e:
+ raise DeserializationError(
+ f"DocumentStore of type '{init_params['document_store']['type']}' not correctly imported"
+ ) from e
+
+ docstore_class = getattr(module, type_)
+ data["init_parameters"]["document_store"] = docstore_class.from_dict(data["init_parameters"]["document_store"])
+ return default_from_dict(cls, data)
+
+ @component.output_types(documents=List[Document])
+ def run(self, filters: Optional[Dict[str, Any]] = None):
+ """
+ Run the FilterRetriever on the given input data.
+
+ :param filters: A dictionary with filters to narrow down the search space.
+ If not specified, the FilterRetriever uses the value provided at initialization.
+ :return: The retrieved documents.
+ """
+ return {"documents": self.document_store.filter_documents(filters=filters or self.filters)}
diff --git a/releasenotes/notes/add-filter-retriever-8901af26144d1a17.yaml b/releasenotes/notes/add-filter-retriever-8901af26144d1a17.yaml
new file mode 100644
index 0000000000..12fc6794a4
--- /dev/null
+++ b/releasenotes/notes/add-filter-retriever-8901af26144d1a17.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Add FilterRetriever.
+ It retrieves documents that match the provided (either at init or runtime) filters.
| diff --git a/test/components/retrievers/test_filter_retriever.py b/test/components/retrievers/test_filter_retriever.py
new file mode 100644
index 0000000000..17765be29d
--- /dev/null
+++ b/test/components/retrievers/test_filter_retriever.py
@@ -0,0 +1,139 @@
+from typing import Dict, Any, List
+
+import pytest
+
+from haystack import Pipeline, DeserializationError
+from haystack.testing.factory import document_store_class
+from haystack.components.retrievers.filter_retriever import FilterRetriever
+from haystack.dataclasses import Document
+from haystack.document_stores.in_memory import InMemoryDocumentStore
+
+
+@pytest.fixture()
+def sample_docs():
+ en_docs = [
+ Document(content="Javascript is a popular programming language", meta={"lang": "en"}),
+ Document(content="Python is a popular programming language", meta={"lang": "en"}),
+ Document(content="A chromosome is a package of DNA ", meta={"lang": "en"}),
+ ]
+ de_docs = [
+ Document(content="python ist eine beliebte Programmiersprache", meta={"lang": "de"}),
+ Document(content="javascript ist eine beliebte Programmiersprache", meta={"lang": "de"}),
+ ]
+ all_docs = en_docs + de_docs
+ return {"en_docs": en_docs, "de_docs": de_docs, "all_docs": all_docs}
+
+
+@pytest.fixture()
+def sample_document_store(sample_docs):
+ doc_store = InMemoryDocumentStore()
+ doc_store.write_documents(sample_docs["all_docs"])
+ return doc_store
+
+
+class TestFilterRetriever:
+ @classmethod
+ def _documents_equal(cls, docs1: List[Document], docs2: List[Document]) -> bool:
+ # # Order doesn't matter; we sort before comparing
+ docs1.sort(key=lambda x: x.id)
+ docs2.sort(key=lambda x: x.id)
+ return docs1 == docs2
+
+ def test_init_default(self):
+ retriever = FilterRetriever(InMemoryDocumentStore())
+ assert retriever.filters is None
+
+ def test_init_with_parameters(self):
+ retriever = FilterRetriever(InMemoryDocumentStore(), filters={"lang": "en"})
+ assert retriever.filters == {"lang": "en"}
+
+ def test_to_dict(self):
+ FilterDocStore = document_store_class("MyFakeStore", bases=(InMemoryDocumentStore,))
+ document_store = FilterDocStore()
+ document_store.to_dict = lambda: {"type": "FilterDocStore", "init_parameters": {}}
+ component = FilterRetriever(document_store=document_store)
+
+ data = component.to_dict()
+ assert data == {
+ "type": "haystack.components.retrievers.filter_retriever.FilterRetriever",
+ "init_parameters": {"document_store": {"type": "FilterDocStore", "init_parameters": {}}, "filters": None},
+ }
+
+ def test_to_dict_with_custom_init_parameters(self):
+ ds = InMemoryDocumentStore()
+ serialized_ds = ds.to_dict()
+
+ component = FilterRetriever(document_store=InMemoryDocumentStore(), filters={"lang": "en"})
+ data = component.to_dict()
+ assert data == {
+ "type": "haystack.components.retrievers.filter_retriever.FilterRetriever",
+ "init_parameters": {"document_store": serialized_ds, "filters": {"lang": "en"}},
+ }
+
+ def test_from_dict(self):
+ valid_data = {
+ "type": "haystack.components.retrievers.filter_retriever.FilterRetriever",
+ "init_parameters": {
+ "document_store": {
+ "type": "haystack.document_stores.in_memory.document_store.InMemoryDocumentStore",
+ "init_parameters": {},
+ },
+ "filters": {"lang": "en"},
+ },
+ }
+ component = FilterRetriever.from_dict(valid_data)
+ assert isinstance(component.document_store, InMemoryDocumentStore)
+ assert component.filters == {"lang": "en"}
+
+ def test_from_dict_without_docstore(self):
+ data = {"type": "InMemoryBM25Retriever", "init_parameters": {}}
+ with pytest.raises(DeserializationError, match="Missing 'document_store' in serialization data"):
+ FilterRetriever.from_dict(data)
+
+ def test_retriever_init_filter(self, sample_document_store, sample_docs):
+ retriever = FilterRetriever(sample_document_store, filters={"field": "lang", "operator": "==", "value": "en"})
+ result = retriever.run()
+
+ assert "documents" in result
+ assert len(result["documents"]) == 3
+ assert TestFilterRetriever._documents_equal(result["documents"], sample_docs["en_docs"])
+
+ def test_retriever_runtime_filter(self, sample_document_store, sample_docs):
+ retriever = FilterRetriever(sample_document_store)
+ result = retriever.run(filters={"field": "lang", "operator": "==", "value": "en"})
+
+ assert "documents" in result
+ assert len(result["documents"]) == 3
+ assert TestFilterRetriever._documents_equal(result["documents"], sample_docs["en_docs"])
+
+ def test_retriever_init_filter_run_filter_override(self, sample_document_store, sample_docs):
+ retriever = FilterRetriever(sample_document_store, filters={"field": "lang", "operator": "==", "value": "en"})
+ result = retriever.run(filters={"field": "lang", "operator": "==", "value": "de"})
+
+ assert "documents" in result
+ assert len(result["documents"]) == 2
+ assert TestFilterRetriever._documents_equal(result["documents"], sample_docs["de_docs"])
+
+ @pytest.mark.integration
+ def test_run_with_pipeline(self, sample_document_store, sample_docs):
+ retriever = FilterRetriever(sample_document_store, filters={"field": "lang", "operator": "==", "value": "de"})
+
+ pipeline = Pipeline()
+ pipeline.add_component("retriever", retriever)
+ result: Dict[str, Any] = pipeline.run(data={"retriever": {}})
+
+ assert result
+ assert "retriever" in result
+ results_docs = result["retriever"]["documents"]
+ assert results_docs
+ assert TestFilterRetriever._documents_equal(results_docs, sample_docs["de_docs"])
+
+ result: Dict[str, Any] = pipeline.run(
+ data={"retriever": {"filters": {"field": "lang", "operator": "==", "value": "en"}}}
+ )
+
+ assert result
+ assert "retriever" in result
+ results_docs = result["retriever"]["documents"]
+ assert results_docs
+ assert TestFilterRetriever._documents_equal(results_docs, sample_docs["en_docs"])
| diff --git a/docs/pydoc/config/retrievers_api.yml b/docs/pydoc/config/retrievers_api.yml
index f7bad19826..35b6e248ef 100644
--- a/docs/pydoc/config/retrievers_api.yml
+++ b/docs/pydoc/config/retrievers_api.yml
@@ -1,7 +1,12 @@
loaders:
- type: haystack_pydoc_tools.loaders.CustomPythonLoader
- search_path: [../../../haystack/components/retrievers/in_memory]
- modules: ["bm25_retriever", "embedding_retriever"]
+ search_path: [../../../haystack/components/retrievers]
+ modules:
+ [
+ "in_memory/bm25_retriever",
+ "in_memory/embedding_retriever",
+ "filter_retriever",
+ ]
ignore_when_discovered: ["__init__"]
processors:
- type: filter
diff --git a/releasenotes/notes/add-filter-retriever-8901af26144d1a17.yaml b/releasenotes/notes/add-filter-retriever-8901af26144d1a17.yaml
new file mode 100644
index 0000000000..12fc6794a4
--- /dev/null
+++ b/releasenotes/notes/add-filter-retriever-8901af26144d1a17.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Add FilterRetriever.
+ It retrieves documents that match the provided (either at init or runtime) filters.
| [
{
"components": [
{
"doc": "Retrieves documents that match the provided filters.\n\nUsage example:\n```python\nfrom haystack import Document\nfrom haystack.components.retrievers import FilterRetriever\nfrom haystack.document_stores.in_memory import InMemoryDocumentStore\n\ndocs = [\n Document(c... | [
"test/components/retrievers/test_filter_retriever.py::TestFilterRetriever::test_init_default",
"test/components/retrievers/test_filter_retriever.py::TestFilterRetriever::test_init_with_parameters",
"test/components/retrievers/test_filter_retriever.py::TestFilterRetriever::test_to_dict",
"test/components/retri... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Feat: Add FilterRetriever
### Proposed Changes:
Porting `FilterRetriever` from v1.
### How did you test it?
Unit tests
### Notes for the reviewer
Discussion notes for following points attached later on in this PR:
#### Q1) Should we have top_k as an argument or not?
Option 1: Don't include. Just return all docs
Option 2.1: Include. Simply return the first top_k docs.
Option 2.2: Include. Apply some sort of seeded random sampling on top.
Option 3: Include at the DocumentStore level
- Thought: Ideally this would be handled at the DocumentStore level as fetching all docs for a filter and then FilterRetriever just taking top_k docs could be much more inefficient.
- But then top_k isn't part of the `DocumentStore.filter_documents` protocol. Could add it but maybe would bloat it as perhaps not all DocumentStores support this "sampling with filter"
_Leaning towards Option-1, excluding top_k._
#### Q2) Runtime input metadata value filtering
Idea: For a (init)-specified metadata (e.g. "category"), for convenience we want to allow users to provide a value (e.g. "sports") at runtime. Note: for instance, would be needed as part of FileSimilaritRetriever (#5629)
Option 1: Make `FilterRetriever` flexible enough for this. An attribute like `filter_meta_key` at `__init__` and then `run(filter_meta_value: str,...)` which would form the corresponding filter
`filters = {"field": self.filter_meta_key, "operator": "==", "value": filter_meta_value}` and pass it onto the document_store. Issue: increases complexity in the component.
Option 2: Create another retriever `EqualsFilterRetriever` inheriting from `FilterRetriever` and overwriting `run`
Option 3: Create a `EqualsFilterGenerator` to create the filter and connect it to `FilterRetriever.filters`
_Slightly leaning towards Option3_ though such a component feels too small/specific.
#### Q3) Lazy run/evaluation of the component
Wondering if there is a way to setup the component to only run if there is a downstream component needing its output.
E.g. Possible usage: the FilterRetriever is in one of many optional branches. And we would only want to run it if the branch is followed (e.g. for a certain type of query). I guess this can be generalized to any "inputless" component.
One such setup could be:

Here `FilterRetriever` may run even when it's not necessary.
__Currently leaning towards letting this pass__
#### Q4) location: `haystack.components.retrievers.filter_retriever` is fine?
### Checklist
- I have read the [contributors guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md) and the [code of conduct](https://github.com/deepset-ai/haystack/blob/main/code_of_conduct.txt)
- I have updated the related issue with new insights and changes
- I added unit tests and updated the docstrings
- I've used one of the [conventional commit types](https://www.conventionalcommits.org/en/v1.0.0/) for my PR title: `fix:`, `feat:`, `build:`, `chore:`, `ci:`, `docs:`, `style:`, `refactor:`, `perf:`, `test:`.
- I documented my code
- I ran [pre-commit hooks](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md#installation) and fixed any issue
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/components/retrievers/filter_retriever.py]
(definition of FilterRetriever:)
class FilterRetriever:
"""Retrieves documents that match the provided filters.
Usage example:
```python
from haystack import Document
from haystack.components.retrievers import FilterRetriever
from haystack.document_stores.in_memory import InMemoryDocumentStore
docs = [
Document(content="Python is a popular programming language", meta={"lang": "en"}),
Document(content="python ist eine beliebte Programmiersprache", meta={"lang": "de"}),
]
doc_store = InMemoryDocumentStore()
doc_store.write_documents(docs)
retriever = FilterRetriever(doc_store, filters={"field": "lang", "operator": "==", "value": "en"})
# if passed in the run method, filters will override those provided at initialization
result = retriever.run(filters={"field": "lang", "operator": "==", "value": "de"})
assert "documents" in result
assert len(result["documents"]) == 1
assert result["documents"][0].content == "python ist eine beliebte Programmiersprache"
```"""
(definition of FilterRetriever.__init__:)
def __init__(self, document_store: DocumentStore, filters: Optional[Dict[str, Any]] = None):
"""Create the FilterRetriever component.
:param document_store: An instance of a DocumentStore.
:param filters: A dictionary with filters to narrow down the search space. Defaults to `None`."""
(definition of FilterRetriever._get_telemetry_data:)
def _get_telemetry_data(self) -> Dict[str, Any]:
"""Data that is sent to Posthog for usage analytics."""
(definition of FilterRetriever.to_dict:)
def to_dict(self) -> Dict[str, Any]:
"""Serialize this component to a dictionary."""
(definition of FilterRetriever.from_dict:)
def from_dict(cls, data: Dict[str, Any]) -> "FilterRetriever":
"""Deserialize this component from a dictionary."""
(definition of FilterRetriever.run:)
def run(self, filters: Optional[Dict[str, Any]] = None):
"""Run the FilterRetriever on the given input data.
:param filters: A dictionary with filters to narrow down the search space.
If not specified, the FilterRetriever uses the value provided at initialization.
:return: The retrieved documents."""
[end of new definitions in haystack/components/retrievers/filter_retriever.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 | |
statsmodels__statsmodels-9130 | 9,130 | statsmodels/statsmodels | null | a0eca865c65ef9336c6403f8ff4bc29a1d3ec26b | 2024-01-24T22:24:28Z | diff --git a/statsmodels/genmod/generalized_linear_model.py b/statsmodels/genmod/generalized_linear_model.py
index 4617ad4bc18..6cd29fd7691 100644
--- a/statsmodels/genmod/generalized_linear_model.py
+++ b/statsmodels/genmod/generalized_linear_model.py
@@ -41,6 +41,7 @@
cached_data,
cached_value,
)
+from statsmodels.tools.data import _as_array_with_name
from statsmodels.tools.docstring import Docstring
from statsmodels.tools.sm_exceptions import (
DomainWarning,
@@ -307,15 +308,21 @@ def __init__(self, endog, exog, family=None, offset=None,
f"{type(family).__name__} family."),
DomainWarning)
+ self._exposure_name = None
+ self._offset_name = None
+ self._freq_weights_name = None
+ self._var_weights_name = None
+
if exposure is not None:
- exposure = np.log(exposure)
+ exposure_array, self._exposure_name = _as_array_with_name(exposure, "exposure")
+ exposure = np.log(exposure_array)
if offset is not None: # this should probably be done upstream
- offset = np.asarray(offset)
+ offset, self._offset_name = _as_array_with_name(offset, "offset")
if freq_weights is not None:
- freq_weights = np.asarray(freq_weights)
+ freq_weights, self._freq_weights_name = _as_array_with_name(freq_weights, "freq_weights")
if var_weights is not None:
- var_weights = np.asarray(var_weights)
+ var_weights, self._var_weights_name = _as_array_with_name(var_weights, "var_weights")
self.freq_weights = freq_weights
self.var_weights = var_weights
@@ -1558,6 +1565,39 @@ def fit_constrained(self, constraints, start_params=None, **fit_kwds):
res._results.results_constrained = res_constr
return res
+ @property
+ def offset_name(self):
+ """
+ Name of the offset variable if available. If offset is not a pd.Series,
+ defaults to 'offset'.
+ """
+ return self._offset_name
+
+ @property
+ def exposure_name(self):
+ """
+ Name of the exposure variable if available. If exposure is not a pd.Series,
+ defaults to 'exposure'.
+ """
+ return self._exposure_name
+
+ @property
+ def freq_weights_name(self):
+ """
+ Name of the freq weights variable if available. If freq_weights is not a
+ pd.Series, defaults to 'freq_weights'.
+ """
+ return self._freq_weights_name
+
+ @property
+ def var_weights_name(self):
+ """
+ Name of var weights variable if available. If var_weights is not a pd.Series,
+ defaults to 'var_weights'.
+
+ """
+ return self._var_weights_name
+
get_prediction_doc = Docstring(pred.get_prediction_glm.__doc__)
get_prediction_doc.remove_parameters("pred_kwds")
diff --git a/statsmodels/tools/data.py b/statsmodels/tools/data.py
index 115bc169999..9e507030c3f 100644
--- a/statsmodels/tools/data.py
+++ b/statsmodels/tools/data.py
@@ -19,7 +19,8 @@ def _check_period_index(x, freq="M"):
if not inferred_freq.startswith(freq):
raise ValueError("Expected frequency {}. Got {}".format(freq,
inferred_freq))
-
+def is_series(obj):
+ return isinstance(obj, pd.Series)
def is_data_frame(obj):
return isinstance(obj, pd.DataFrame)
@@ -121,3 +122,24 @@ def _is_recarray(data):
return isinstance(data, np.core.recarray)
else:
return isinstance(data, np.rec.recarray)
+
+def _as_array_with_name(obj, default_name):
+ """
+ Call np.asarray() on obj and attempt to get the name if its a Series.
+
+ Parameters
+ ----------
+ obj: pd.Series
+ Series to convert to an array
+ default_name: str
+ The default name to return in case the object isn't a pd.Series or has
+ no name attribute.
+
+ Returns
+ -------
+ array_and_name: tuple[np.ndarray, str]
+ The data casted to np.ndarra and the series name or None
+ """
+ if is_series(obj):
+ return (np.asarray(obj), obj.name)
+ return (np.asarray(obj), default_name)
| diff --git a/statsmodels/genmod/tests/test_glm.py b/statsmodels/genmod/tests/test_glm.py
index fd169d36fd6..b3b6876a43d 100644
--- a/statsmodels/genmod/tests/test_glm.py
+++ b/statsmodels/genmod/tests/test_glm.py
@@ -2661,3 +2661,62 @@ def test_tweedie_score():
nhess = approx_hess_cs(pa, lambda x: model.loglike(x, scale=1))
ahess = model.hessian(pa, scale=1)
assert_allclose(nhess, ahess, atol=5e-8, rtol=5e-8)
+
+def test_names():
+ """Test the name properties if using a pandas series.
+
+ They should not be the defaults if the series has a name.
+
+ Don't care about the data here, only testing the name properties.
+ """
+ y = pd.Series([0, 1], name="endog_not_default")
+ x = pd.DataFrame({"a": [1, 1], "b": [1, 0]})
+ exposure = pd.Series([0, 0], name="exposure_not_default")
+ freq_weights = pd.Series([0, 0], name="freq_weights_not_default")
+ offset = pd.Series([0, 0], name="offset_not_default")
+ var_weights = pd.Series([0, 0], name="var_weights_not_default")
+
+ model = GLM(
+ endog=y,
+ exog=x,
+ exposure=exposure,
+ freq_weights=freq_weights,
+ offset=offset,
+ var_weights=var_weights,
+ family=sm.families.Tweedie(),
+ )
+ assert model.offset_name == "offset_not_default"
+ assert model.exposure_name == "exposure_not_default"
+ assert model.freq_weights_name == "freq_weights_not_default"
+ assert model.var_weights_name == "var_weights_not_default"
+ assert model.endog_names == "endog_not_default"
+ assert model.exog_names == ["a", "b"]
+
+
+def test_names_default():
+ """Test the name properties if using a numpy arrays.
+
+ Don't care about the data here, only testing the name properties.
+ """
+ y = np.array([0, 1])
+ x = np.array([[1, 1,], [1, 0]])
+ exposure = np.array([0, 0])
+ freq_weights = np.array([0, 0])
+ offset = np.array([0, 0])
+ var_weights = np.array([0, 0])
+
+ model = GLM(
+ endog=y,
+ exog=x,
+ exposure=exposure,
+ freq_weights=freq_weights,
+ offset=offset,
+ var_weights=var_weights,
+ family=sm.families.Tweedie(),
+ )
+ assert model.offset_name == "offset"
+ assert model.exposure_name == "exposure"
+ assert model.freq_weights_name == "freq_weights"
+ assert model.var_weights_name == "var_weights"
+ assert model.endog_names == "y"
+ assert model.exog_names == ["const", "x1"]
diff --git a/statsmodels/tools/tests/test_data.py b/statsmodels/tools/tests/test_data.py
index 178004aa178..21a4f7f8e7e 100644
--- a/statsmodels/tools/tests/test_data.py
+++ b/statsmodels/tools/tests/test_data.py
@@ -33,3 +33,16 @@ def test_patsy_577():
np.testing.assert_(data._is_using_patsy(endog, None))
exog = dmatrix("var2 - 1", df)
np.testing.assert_(data._is_using_patsy(endog, exog))
+
+
+def test_as_array_with_name_series():
+ s = pandas.Series([1], name="hello")
+ arr, name = data._as_array_with_name(s, "not_used")
+ np.testing.assert_array_equal(np.array([1]), arr)
+ assert name == "hello"
+
+
+def test_as_array_with_name_array():
+ arr, name = data._as_array_with_name(np.array([1]), "default")
+ np.testing.assert_array_equal(np.array([1]), arr)
+ assert name == "default"
| [
{
"components": [
{
"doc": "Name of the offset variable if available. If offset is not a pd.Series,\ndefaults to 'offset'.",
"lines": [
1569,
1574
],
"name": "GLM.offset_name",
"signature": "def offset_name(self):",
"type": "function"
... | [
"statsmodels/genmod/tests/test_glm.py::test_names",
"statsmodels/genmod/tests/test_glm.py::test_names_default",
"statsmodels/tools/tests/test_data.py::test_as_array_with_name_series",
"statsmodels/tools/tests/test_data.py::test_as_array_with_name_array"
] | [
"statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_params",
"statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_standard_errors",
"statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_residuals",
"statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_aic_R",
"statsmodels/genm... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
ENH: GLM models now save the names of input Pandas Series
Offset, exposure, freq_weights and var_weights have the name of the series saved on the model object. They can be accessed via the class properties.
Closes #9100
- [x] closes #9100
- [x] tests added / passed.
- [x] code/documentation is well formatted.
- [x] properly formatted commit message. See
[NumPy's guide](https://docs.scipy.org/doc/numpy-1.15.1/dev/gitwash/development_workflow.html#writing-the-commit-message).
Getting this as a starting point. This enables the feature for GLM's. Happy to make any recommended changes.
I didn't edit the release notes, wasn't sure if this would be worth mentioning. Kept the smaller notes in the commit message for now.
I opted for only doing one dimensional arrays in the helper because the caller would need to check the shape if it returned more than one name. Seems like a separate helper for multi dimensional might be needed unless there's a different way we were expecting to use it.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in statsmodels/genmod/generalized_linear_model.py]
(definition of GLM.offset_name:)
def offset_name(self):
"""Name of the offset variable if available. If offset is not a pd.Series,
defaults to 'offset'."""
(definition of GLM.exposure_name:)
def exposure_name(self):
"""Name of the exposure variable if available. If exposure is not a pd.Series,
defaults to 'exposure'."""
(definition of GLM.freq_weights_name:)
def freq_weights_name(self):
"""Name of the freq weights variable if available. If freq_weights is not a
pd.Series, defaults to 'freq_weights'."""
(definition of GLM.var_weights_name:)
def var_weights_name(self):
"""Name of var weights variable if available. If var_weights is not a pd.Series,
defaults to 'var_weights'."""
[end of new definitions in statsmodels/genmod/generalized_linear_model.py]
[start of new definitions in statsmodels/tools/data.py]
(definition of is_series:)
def is_series(obj):
(definition of _as_array_with_name:)
def _as_array_with_name(obj, default_name):
"""Call np.asarray() on obj and attempt to get the name if its a Series.
Parameters
----------
obj: pd.Series
Series to convert to an array
default_name: str
The default name to return in case the object isn't a pd.Series or has
no name attribute.
Returns
-------
array_and_name: tuple[np.ndarray, str]
The data casted to np.ndarra and the series name or None"""
[end of new definitions in statsmodels/tools/data.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Save the offset name in `GLM` and results wrapper
#### Is your feature request related to a problem? Please describe
Post model training, it is helpful to know which variable was used as the offset. This aids in post model analysis and deployment.
The offset array is saved and can be accessed after saving the model, but the name of the offset variable is lost when it is a pandas series. The series is [converted to a np.array](https://github.com/statsmodels/statsmodels/blob/ab10165eb897729b50e703b4ea831ae712b53585/statsmodels/genmod/generalized_linear_model.py#L315-L316) which removed the name. Current state, it is difficult to tell which variable may have been used as an offset without tracking it outside the model.
Example use case: Sharing a saved model with a peer. They inspect it to determine what variable was used as the offset in training.
The same may apply to the `var_weights` and `freq_weights` for GLM.
#### Describe the solution you'd like
The model has access on `__init__` to the name of the offset if it is a pandas series. A way to save the offset array's name if it is a series would be wonderful.
Similar to how the endog and exog names can be used in the model summary.
Here's a few ideas I had for how to implement this. Happy to hear if there's a better option.
1. Add an `offset_name` property for GLM
- Similar to the base models [`endog_names`/`exog_names`](https://github.com/statsmodels/statsmodels/blob/ab10165eb897729b50e703b4ea831ae712b53585/statsmodels/base/model.py#L235-L247)
- Simple to implement
2. Add it to the `model.data` so it's handled by [`PandasData`](https://github.com/statsmodels/statsmodels/blob/ab10165eb897729b50e703b4ea831ae712b53585/statsmodels/base/data.py#L498)
- The name could be added back to the offset when making the results wrapper (at least I think that's how it works)
- I could use some guidance on how to implement this if it is the preferred approach
- I think it has something to do with the data attrs but it's a bit hard to track down
3. Do not convert to a numpy array if it is a series
- One could use `model.offset.name` to get at the variable name
- Doesn't line up with how the rest of the code works, it expects numpy arrays
- Likely not a good option
4. User adds `offset_name` attribute to the model class before saving it.
- Seems like a bad idea, would like support in statsmodels
#### Describe alternatives you have considered
Current workaround is saving the offset name in a separate file, which is not ideal.
#### Additional context
Happy to work on a PR for this.
----------
I think currently `1.` is the only option. `2.` would be good but currently the extra arrays are not going through the endog/exog `model.data` handling (at least not in most cases.
We could add a helper function that can be added to the `__init__` as replacement for np.asarray which does asarray plus return additionally the name of the variable if it is available.
This could also be applied to other extra data like exposure and the various weights.
Current extra data like offset, exposure, weights are 1dim.
For flexibility the helper function could check for and distinguish 1dim and 2dim. In the later case, return individual column names instead of the Series name.
The same as in GLM also applies to discrete models and likely to some other models.
--------------------
</issues> | 589f167fed77ebf6031d01ad3de1aa7b0040ced3 | |
google-deepmind__optax-721 | 721 | google-deepmind/optax | null | 437d79cacb115ee2ffcc6430b483a7d068b2b57e | 2024-01-19T10:05:11Z | diff --git a/optax/contrib/__init__.py b/optax/contrib/__init__.py
index b516463e2..f9ed5c47d 100644
--- a/optax/contrib/__init__.py
+++ b/optax/contrib/__init__.py
@@ -22,6 +22,8 @@
from optax.contrib._dadapt_adamw import DAdaptAdamWState
from optax.contrib._mechanic import MechanicState
from optax.contrib._mechanic import mechanize
+from optax.contrib.momo import momo, momo_adam
+from optax.contrib.momo import MomoState, MomoAdamState
from optax.contrib._privacy import differentially_private_aggregate
from optax.contrib._privacy import DifferentiallyPrivateAggregateState
from optax.contrib._privacy import dpsgd
diff --git a/optax/contrib/momo.py b/optax/contrib/momo.py
new file mode 100644
index 000000000..3a17fcefb
--- /dev/null
+++ b/optax/contrib/momo.py
@@ -0,0 +1,297 @@
+# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""MoMo.
+Implementation of
+"MoMo: Momentum Models for Adaptive Learning Rates"
+(https://arxiv.org/abs/2305.07583) by Fabian Schaipp, Ruben Ohana,
+Michael Eickenberg, Aaron Defazio and Robert M. Gower.
+"""
+from typing import NamedTuple, Optional
+import chex
+import jax.numpy as jnp
+import jax.tree_util as tu
+from jax import Array
+from jax import lax
+from optax import tree_utils
+from optax._src import base
+from optax._src import utils
+
+class MomoState(NamedTuple):
+ """State of the `GradientTransformation` returned by `momo`."""
+ exp_avg: base.Updates
+ barf: chex.Array # shape=(), dtype=jnp.float32.
+ gamma: chex.Array # shape=(), dtype=jnp.float32.
+ lb: chex.Array # shape=(), dtype=jnp.float32.
+ count: chex.Array # shape=(), dtype=jnp.int32.
+
+def momo(
+ learning_rate: base.ScalarOrSchedule = 1.0,
+ beta: float = 0.9,
+ lower_bound: float = 0.0,
+ weight_decay: float = 0.0,
+ adapt_lower_bound: bool = False
+) -> base.GradientTransformationExtraArgs:
+ """Adaptive Learning Rates for SGD with momentum.
+
+ MoMo typically needs less tuning for value of ``learning_rate``,
+ by exploting the fact that a lower bound of the loss (or the optimal value) is
+ known. For most tasks, zero is a lower bound and an accurate estimate of the
+ final loss.
+
+ MoMo performs SGD with momentum with a Polyak-type learning rate. The
+ effective step size is
+ ``min(learning_rate, <adaptive term>)``
+
+ where the adaptive term is computed on the fly.
+
+ Note that in ``update_fn`` you need to pass the latest (batch) loss value to
+ the argument `value`.
+
+ References:
+ Schaipp et al., `MoMo: Momentum Models for Adaptive Learning Rates
+ <https://arxiv.org/abs/2305.07583>`_, 2023
+ Args:
+ learning_rate: User-specified learning rate. Recommended to be chosen
+ rather large, by default 1.0.
+ beta: Momentum coefficient (for EMA).
+ lower_bound: Lower bound of the loss. Zero should be a good choice for
+ many tasks.
+ weight_decay: Weight-decay parameter.
+ adapt_lower_bound: If no good guess for the lower bound is available,
+ set this to true, in order to estimate the lower bound on the fly
+ (see the paper for details).
+
+ Returns:
+ A ``GradientTransformation`` object.
+ .. versionadded:: 0.2.3
+ """
+ def init_fn(params: base.Params) -> MomoState:
+ exp_avg = tu.tree_map(lambda p: jnp.zeros(p.shape), params)
+ barf = jnp.zeros([], jnp.float32)
+ gamma = jnp.zeros([], jnp.float32)
+ init_lb = jnp.array(lower_bound, jnp.float32)
+ count = jnp.zeros([], jnp.int32)
+ return MomoState(exp_avg, barf, gamma, init_lb, count)
+
+ def update_fn(
+ updates: base.Updates,
+ state: MomoState,
+ params: Optional[base.Params],
+ value: Optional[Array] = None) -> tuple[base.Updates, MomoState]:
+ if params is None:
+ raise ValueError(base.NO_PARAMS_MSG)
+ if value is None:
+ raise ValueError("""You need to pass the latest loss value to Momo.
+ Use ``jax.value_and_grad`` for this.""")
+ count = state.count
+ # initialize at first gradient, and loss
+ bt = lax.cond(count == 0, lambda: 0., lambda: beta)
+ barf = bt*state.barf + (1-bt)*value
+ exp_avg = tu.tree_map(
+ lambda ea, g: bt*ea + (1-bt)*g,
+ state.exp_avg,
+ updates
+ )
+ gamma = bt*state.gamma + (1-bt)*tree_utils.tree_vdot(updates, params)
+ exp_avg_norm = tree_utils.tree_l2_norm(exp_avg,squared=True)
+ iprod = tree_utils.tree_vdot(exp_avg, params)
+ alpha = learning_rate(count) if callable(learning_rate) else learning_rate
+ # Reset lower bound
+ if adapt_lower_bound:
+ cap = (1+alpha*weight_decay) * (barf-gamma) + iprod
+ this_lb = lax.cond(cap < (1+alpha*weight_decay)*state.lb,
+ lambda: jnp.maximum(cap/(2*(1+alpha*weight_decay)),
+ lower_bound
+ ),
+ lambda: state.lb
+ )
+ else:
+ this_lb = state.lb
+ t1 = jnp.maximum((1+alpha*weight_decay) * (barf-this_lb-gamma) + iprod, 0.
+ )/(exp_avg_norm)
+ # if denom is zero, take no step
+ t1 = lax.cond(exp_avg_norm <= jnp.finfo(float).eps,
+ lambda: 0.,
+ lambda: t1
+ )
+ tau = jnp.minimum(alpha, t1)
+ p_update = tu.tree_map(
+ lambda ea, p:
+ -(alpha*weight_decay)/(1+alpha*weight_decay)*p
+ - tau*ea,
+ exp_avg, params
+ )
+ if adapt_lower_bound:
+ new_lb = jnp.maximum((barf+iprod-gamma) - (1/2)*tau*exp_avg_norm,
+ lower_bound
+ )
+ else:
+ new_lb = state.lb
+ new_state = MomoState(
+ exp_avg=exp_avg,
+ barf=barf,
+ gamma=gamma,
+ lb=new_lb,
+ count=utils.safe_int32_increment(count)
+ )
+ return p_update, new_state
+
+ return base.GradientTransformationExtraArgs(init_fn, update_fn)
+
+class MomoAdamState(NamedTuple):
+ """State of the ``GradientTransformation`` returned by ``momo_adam``."""
+ exp_avg: base.Updates
+ exp_avg_sq: base.Updates
+ barf: chex.Array # shape=(), dtype=jnp.float32.
+ gamma: chex.Array # shape=(), dtype=jnp.float32.
+ lb: chex.Array # shape=(), dtype=jnp.float32.
+ count: chex.Array # shape=(), dtype=jnp.int32.
+
+
+def momo_adam(
+ learning_rate: base.ScalarOrSchedule = 1e-2,
+ b1: float = 0.9,
+ b2: float = 0.999,
+ eps: float = 1e-8,
+ lower_bound: float = 0.0,
+ weight_decay: float = 0.0,
+ adapt_lower_bound: bool = False
+) -> base.GradientTransformationExtraArgs:
+ """Adaptive Learning Rates for Adam(W).
+
+ MoMo-Adam typically needs less tuning for value of ``learning_rate``,
+ by exploting the fact that a lower bound of the loss (or the optimal value) is
+ known. For most tasks, zero is a lower bound and an accurate estimate of the
+ final loss.
+
+ MoMo performs Adam(W) with a Polyak-type learning rate. The
+ effective step size is
+ ``min(learning_rate, <adaptive term>)``
+
+ where the adaptive term is computed on the fly.
+
+ Note that in ``update_fn`` you need to pass the latest (batch) loss value to
+ the argument `value`.
+
+ References:
+ Schaipp et al., `MoMo: Momentum Models for Adaptive Learning Rates
+ <https://arxiv.org/abs/2305.07583>`_, 2023
+ Args:
+ learning_rate: User-specified learning rate. Recommended to be chosen
+ rather large, by default 1.0.
+ b1: Exponential decay rate to track the first moment of past gradients.
+ b2: Exponential decay rate to track the second moment of past gradients.
+ eps: eps for the underlying Adam Optimizer.
+ lower_bound: Lower bound of the loss. Zero should be a good choice for
+ many tasks.
+ weight_decay: Weight-decay parameter. Momo-Adam performs weight decay in
+ similar fashion to AdamW.
+ adapt_lower_bound: If no good guess for the lower bound is available,
+ set this to true, in order to estimate the lower bound on the fly
+ (see the paper for details).
+
+ Returns:
+ A ``GradientTransformation`` object.
+ .. versionadded:: 0.2.3
+ """
+ def init_fn(params: base.Params) -> MomoAdamState:
+ exp_avg = tu.tree_map(lambda p: jnp.zeros(p.shape), params)
+ exp_avg_sq = tu.tree_map(lambda p: jnp.zeros(p.shape, jnp.float32), params)
+ barf = jnp.zeros([], jnp.float32)
+ gamma = jnp.zeros([], jnp.float32)
+ init_lb = jnp.array(lower_bound, jnp.float32)
+ count = jnp.zeros([], jnp.int32)
+ return MomoAdamState(exp_avg, exp_avg_sq, barf, gamma, init_lb, count)
+
+ def update_fn(
+ updates: base.Updates,
+ state: MomoAdamState,
+ params: Optional[base.Params],
+ value: Optional[Array]) -> tuple[base.Updates, MomoAdamState]:
+ if params is None:
+ raise ValueError(base.NO_PARAMS_MSG)
+ if value is None:
+ raise ValueError("""You need to pass the latest loss value to Momo.
+ Use ``jax.value_and_grad`` for this.""")
+ count = state.count
+ barf = b1*state.barf + (1-b1)*value
+ exp_avg = tu.tree_map(
+ lambda ea, g: b1 * ea + (1-b1) * g,
+ state.exp_avg,
+ updates
+ )
+ exp_avg_sq = tu.tree_map(
+ lambda eas, g: b2 * eas + (1-b2) * g * g,
+ state.exp_avg_sq,
+ updates,
+ )
+ bc2 = 1-b2**(count+1)
+ precond = tu.tree_map(
+ lambda eas: eps + jnp.sqrt(eas/bc2),
+ exp_avg_sq
+ )
+ exp_avg_weighted = tu.tree_map(
+ lambda ea, prec: ea/prec,
+ exp_avg,
+ precond
+ )
+ exp_avg_norm = tree_utils.tree_vdot(exp_avg,exp_avg_weighted)
+ gamma = b1*state.gamma + (1-b1)*tree_utils.tree_vdot(updates, params)
+ iprod = tree_utils.tree_vdot(exp_avg, params)
+ alpha = learning_rate(count) if callable(learning_rate) else learning_rate
+ bc1 = 1-b1**(count+1)
+ # Reset lower bound
+ if adapt_lower_bound:
+ cap = (1+alpha*weight_decay) * (barf-gamma) + iprod
+ this_lb = lax.cond(cap < (1+alpha*weight_decay)*bc1*state.lb,
+ lambda: jnp.maximum(cap/(2*bc1*(1+alpha*weight_decay)),
+ lower_bound
+ ),
+ lambda: state.lb
+ )
+ else:
+ this_lb = state.lb
+ t1 = jnp.maximum((1+alpha*weight_decay)*(barf-bc1*this_lb-gamma) + iprod, 0.
+ )/(exp_avg_norm)
+ # if denom is zero, take no step
+ t1 = lax.cond(exp_avg_norm <= jnp.finfo(float).eps,
+ lambda: 0.,
+ lambda: t1
+ )
+ tau = jnp.minimum(alpha/bc1, t1)
+ p_update = tu.tree_map(
+ lambda ea, prec, p:
+ -(alpha*weight_decay)/(1+alpha*weight_decay)*p
+ - tau*ea/prec,
+ exp_avg,
+ precond,
+ params
+ )
+ if adapt_lower_bound:
+ new_lb = ((barf+iprod-gamma) - (1/2)*tau*exp_avg_norm)/bc1
+ new_lb = jnp.maximum(new_lb, lower_bound)
+ else:
+ new_lb = state.lb
+ new_state = MomoAdamState(
+ exp_avg=exp_avg,
+ exp_avg_sq=exp_avg_sq,
+ barf=barf,
+ gamma=gamma,
+ lb=new_lb,
+ count=utils.safe_int32_increment(count)
+ )
+ return p_update, new_state
+
+ return base.GradientTransformationExtraArgs(init_fn, update_fn)
| diff --git a/optax/contrib/_common_test.py b/optax/contrib/_common_test.py
index 01ac61c3c..4a11a5548 100644
--- a/optax/contrib/_common_test.py
+++ b/optax/contrib/_common_test.py
@@ -34,6 +34,8 @@
dict(opt_name='cocob', opt_kwargs=dict(alpha=100.0, eps=1e-8)),
dict(opt_name='dadapt_adamw', opt_kwargs=dict(learning_rate=1e-1)),
dict(opt_name='prodigy', opt_kwargs=dict(learning_rate=1e-1)),
+ dict(opt_name='momo', opt_kwargs=dict(learning_rate=1e-1)),
+ dict(opt_name='momo_adam', opt_kwargs=dict(learning_rate=1e-1)),
)
@@ -42,7 +44,7 @@ def _setup_parabola(dtype):
initial_params = jnp.array([-1.0, 10.0, 1.0], dtype=dtype)
final_params = jnp.array([1.0, -1.0, 1.0], dtype=dtype)
- @jax.grad
+ @jax.value_and_grad
def get_updates(params):
return jnp.sum(numerics.abs_sq(params - final_params))
@@ -57,7 +59,7 @@ def _setup_rosenbrock(dtype):
initial_params = jnp.array([0.0, 0.0], dtype=dtype)
final_params = jnp.array([a, a**2], dtype=dtype)
- @jax.grad
+ @jax.value_and_grad
def get_updates(params):
return numerics.abs_sq(a - params[0]) + b * numerics.abs_sq(
params[1] - params[0] ** 2
@@ -79,8 +81,12 @@ def test_optimizers(self, opt_name, opt_kwargs, target, dtype):
@jax.jit
def step(params, state):
- updates = get_updates(params)
- updates, state = opt.update(updates, state, params)
+ value, updates = get_updates(params)
+ if opt_name in ['momo', 'momo_adam']:
+ update_kwargs = {'value': value}
+ else:
+ update_kwargs = {}
+ updates, state = opt.update(updates, state, params, **update_kwargs)
params = update.apply_updates(params, updates)
return params, state
@@ -107,12 +113,20 @@ def test_optimizers_can_be_wrapped_in_inject_hyperparams(
params = [jnp.negative(jnp.ones((2, 3))), jnp.ones((2, 5, 2))]
grads = [jnp.ones((2, 3)), jnp.negative(jnp.ones((2, 5, 2)))]
+ if opt_name in ['momo', 'momo_adam']:
+ update_kwargs = {'value': jnp.array(1.)}
+ else:
+ update_kwargs = {}
+
state = self.variant(opt.init)(params)
- updates, new_state = self.variant(opt.update)(grads, state, params)
+ updates, new_state = self.variant(opt.update)(
+ grads, state, params, **update_kwargs
+ )
state_inject = self.variant(opt_inject.init)(params)
updates_inject, new_state_inject = self.variant(opt_inject.update)(
- grads, state_inject, params)
+ grads, state_inject, params, **update_kwargs
+ )
with self.subTest('Equality of updates.'):
chex.assert_trees_all_close(updates_inject, updates, rtol=1e-4)
diff --git a/optax/contrib/momo_test.py b/optax/contrib/momo_test.py
new file mode 100644
index 000000000..4729918aa
--- /dev/null
+++ b/optax/contrib/momo_test.py
@@ -0,0 +1,114 @@
+# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for `momo.py`."""
+
+from absl.testing import absltest
+from absl.testing import parameterized
+import chex
+import jax
+import jax.numpy as jnp
+from optax import contrib
+from optax._src import numerics
+from optax._src import update
+from optax.tree_utils import _state_utils
+
+
+def _setup_parabola(dtype):
+ """Quadratic function as an optimization target."""
+ initial_params = jnp.array([-1.0, 10.0, 1.0], dtype=dtype)
+ final_params = jnp.array([1.0, -1.0, 1.0], dtype=dtype)
+
+ @jax.value_and_grad
+ def get_updates(params):
+ return jnp.sum(numerics.abs_sq(params - final_params))
+
+ return initial_params, final_params, get_updates
+
+
+def _setup_rosenbrock(dtype):
+ """Rosenbrock function as an optimization target."""
+ a = 1.0
+ b = 100.0
+
+ initial_params = jnp.array([0.0, 0.0], dtype=dtype)
+ final_params = jnp.array([a, a**2], dtype=dtype)
+
+ @jax.value_and_grad
+ def get_updates(params):
+ return numerics.abs_sq(a - params[0]) + b * numerics.abs_sq(
+ params[1] - params[0] ** 2
+ )
+
+ return initial_params, final_params, get_updates
+
+
+class MomoTest(chex.TestCase):
+ """Tests for Momo optimizer."""
+
+ @parameterized.product(
+ opt_name=('momo',),
+ target=(_setup_parabola, _setup_rosenbrock),
+ dtype=(jnp.float32,),
+ )
+ def test_optimization(self, opt_name, target, dtype):
+ opt = getattr(contrib, opt_name)()
+ initial_params, final_params, get_updates = target(dtype)
+ @jax.jit
+ def step(params, state):
+ value, updates = get_updates(params)
+ updates, state = opt.update(updates, state, params, value)
+ params = update.apply_updates(params, updates)
+ return params, state
+
+ params = initial_params
+ state = opt.init(params)
+ # A no-op change, to verify that tree map works.
+ state = _state_utils.tree_map_params(opt, lambda v: v, state)
+
+ for _ in range(1500):
+ params, state = step(params, state)
+
+ chex.assert_trees_all_close(params, final_params, rtol=1e-1, atol=1e-1)
+
+class MomoAdamTest(chex.TestCase):
+ """Tests for Momo-Adam optimizer."""
+
+ @parameterized.product(
+ opt_name=('momo_adam',),
+ target=(_setup_parabola, _setup_rosenbrock),
+ dtype=(jnp.float32,),
+ )
+ def test_optimization(self, opt_name, target, dtype):
+ opt = getattr(contrib, opt_name)(learning_rate=0.1)
+ initial_params, final_params, get_updates = target(dtype)
+ @jax.jit
+ def step(params, state):
+ value, updates = get_updates(params)
+ updates, state = opt.update(updates, state, params, value)
+ params = update.apply_updates(params, updates)
+ return params, state
+
+ params = initial_params
+ state = opt.init(params)
+ # A no-op change, to verify that tree map works.
+ state = _state_utils.tree_map_params(opt, lambda v: v, state)
+
+ for _ in range(1500):
+ params, state = step(params, state)
+
+ chex.assert_trees_all_close(params, final_params, rtol=1e-1, atol=1e-1)
+
+if __name__ == '__main__':
+ absltest.main()
| [
{
"components": [
{
"doc": "State of the `GradientTransformation` returned by `momo`.",
"lines": [
31,
37
],
"name": "MomoState",
"signature": "class MomoState(NamedTuple):",
"type": "class"
},
{
"doc": "Adaptive Learn... | [
"optax/contrib/_common_test.py::ContribTest::test_optimizers6",
"optax/contrib/_common_test.py::ContribTest::test_optimizers7",
"optax/contrib/_common_test.py::ContribTest::test_optimizers8",
"optax/contrib/_common_test.py::ContribTest::test_optimizers9",
"optax/contrib/_common_test.py::ContribTest::test_op... | [
"optax/contrib/_common_test.py::ContribTest::test_optimizers0",
"optax/contrib/_common_test.py::ContribTest::test_optimizers1",
"optax/contrib/_common_test.py::ContribTest::test_optimizers2",
"optax/contrib/_common_test.py::ContribTest::test_optimizers3",
"optax/contrib/_common_test.py::ContribTest::test_op... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Implementation of MoMo algorithm
Upon suggestion by @fabianp I implemented the MoMo algorithm. MoMo is esentially a Polyak step size for SGD with momentum and for Adam (see https://arxiv.org/abs/2305.07583).
The Rosenbrock and least squares tests are passing locally.
I have still a few questions as this is the first time I am implementing in Optax:
* MoMo needs in each iteration the latest batch loss passed into `update_fn`. I named this argument `loss`, and adpated the tests. But maybe you have a convention how sth like this would be handled.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in optax/contrib/momo.py]
(definition of MomoState:)
class MomoState(NamedTuple):
"""State of the `GradientTransformation` returned by `momo`."""
(definition of momo:)
def momo( learning_rate: base.ScalarOrSchedule = 1.0, beta: float = 0.9, lower_bound: float = 0.0, weight_decay: float = 0.0, adapt_lower_bound: bool = False ) -> base.GradientTransformationExtraArgs:
"""Adaptive Learning Rates for SGD with momentum.
MoMo typically needs less tuning for value of ``learning_rate``,
by exploting the fact that a lower bound of the loss (or the optimal value) is
known. For most tasks, zero is a lower bound and an accurate estimate of the
final loss.
MoMo performs SGD with momentum with a Polyak-type learning rate. The
effective step size is
``min(learning_rate, <adaptive term>)``
where the adaptive term is computed on the fly.
Note that in ``update_fn`` you need to pass the latest (batch) loss value to
the argument `value`.
References:
Schaipp et al., `MoMo: Momentum Models for Adaptive Learning Rates
<https://arxiv.org/abs/2305.07583>`_, 2023
Args:
learning_rate: User-specified learning rate. Recommended to be chosen
rather large, by default 1.0.
beta: Momentum coefficient (for EMA).
lower_bound: Lower bound of the loss. Zero should be a good choice for
many tasks.
weight_decay: Weight-decay parameter.
adapt_lower_bound: If no good guess for the lower bound is available,
set this to true, in order to estimate the lower bound on the fly
(see the paper for details).
Returns:
A ``GradientTransformation`` object.
.. versionadded:: 0.2.3"""
(definition of momo.init_fn:)
def init_fn(params: base.Params) -> MomoState:
(definition of momo.update_fn:)
def update_fn( updates: base.Updates, state: MomoState, params: Optional[base.Params], value: Optional[Array] = None) -> tuple[base.Updates, MomoState]:
(definition of MomoAdamState:)
class MomoAdamState(NamedTuple):
"""State of the ``GradientTransformation`` returned by ``momo_adam``."""
(definition of momo_adam:)
def momo_adam( learning_rate: base.ScalarOrSchedule = 1e-2, b1: float = 0.9, b2: float = 0.999, eps: float = 1e-8, lower_bound: float = 0.0, weight_decay: float = 0.0, adapt_lower_bound: bool = False ) -> base.GradientTransformationExtraArgs:
"""Adaptive Learning Rates for Adam(W).
MoMo-Adam typically needs less tuning for value of ``learning_rate``,
by exploting the fact that a lower bound of the loss (or the optimal value) is
known. For most tasks, zero is a lower bound and an accurate estimate of the
final loss.
MoMo performs Adam(W) with a Polyak-type learning rate. The
effective step size is
``min(learning_rate, <adaptive term>)``
where the adaptive term is computed on the fly.
Note that in ``update_fn`` you need to pass the latest (batch) loss value to
the argument `value`.
References:
Schaipp et al., `MoMo: Momentum Models for Adaptive Learning Rates
<https://arxiv.org/abs/2305.07583>`_, 2023
Args:
learning_rate: User-specified learning rate. Recommended to be chosen
rather large, by default 1.0.
b1: Exponential decay rate to track the first moment of past gradients.
b2: Exponential decay rate to track the second moment of past gradients.
eps: eps for the underlying Adam Optimizer.
lower_bound: Lower bound of the loss. Zero should be a good choice for
many tasks.
weight_decay: Weight-decay parameter. Momo-Adam performs weight decay in
similar fashion to AdamW.
adapt_lower_bound: If no good guess for the lower bound is available,
set this to true, in order to estimate the lower bound on the fly
(see the paper for details).
Returns:
A ``GradientTransformation`` object.
.. versionadded:: 0.2.3"""
(definition of momo_adam.init_fn:)
def init_fn(params: base.Params) -> MomoAdamState:
(definition of momo_adam.update_fn:)
def update_fn( updates: base.Updates, state: MomoAdamState, params: Optional[base.Params], value: Optional[Array]) -> tuple[base.Updates, MomoAdamState]:
[end of new definitions in optax/contrib/momo.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 1e08bccf195ac54e7d9d766eb5e69345bf0e3230 | ||
sphinx-doc__sphinx-11891 | 11,891 | sphinx-doc/sphinx | 7.3 | 1785fc93520cc7595f740f3270f715d17a67c7df | 2024-01-18T02:03:03Z | diff --git a/sphinx/builders/html/__init__.py b/sphinx/builders/html/__init__.py
index 287bb79ce3c..be2bf09933a 100644
--- a/sphinx/builders/html/__init__.py
+++ b/sphinx/builders/html/__init__.py
@@ -884,7 +884,7 @@ def write_buildinfo(self) -> None:
def cleanup(self) -> None:
# clean up theme stuff
if self.theme:
- self.theme.cleanup()
+ self.theme._cleanup()
def post_process_images(self, doctree: Node) -> None:
"""Pick the best candidate for an image and link down-scaled images to
diff --git a/sphinx/theming.py b/sphinx/theming.py
index 49bf22eb2d1..e95f4c8cc25 100644
--- a/sphinx/theming.py
+++ b/sphinx/theming.py
@@ -3,6 +3,7 @@
from __future__ import annotations
import configparser
+import contextlib
import os
import shutil
import sys
@@ -11,13 +12,6 @@
from typing import TYPE_CHECKING, Any
from zipfile import ZipFile
-if sys.version_info >= (3, 10):
- from importlib.metadata import entry_points
-else:
- from importlib_metadata import entry_points
-
-import contextlib
-
from sphinx import package_dir
from sphinx.config import check_confval_types as _config_post_init
from sphinx.errors import ThemeError
@@ -25,28 +19,20 @@
from sphinx.util import logging
from sphinx.util.osutil import ensuredir
+if sys.version_info >= (3, 10):
+ from importlib.metadata import entry_points
+else:
+ from importlib_metadata import entry_points
+
if TYPE_CHECKING:
from sphinx.application import Sphinx
+__all__ = 'Theme', 'HTMLThemeFactory'
logger = logging.getLogger(__name__)
-NODEFAULT = object()
-THEMECONF = 'theme.conf'
-
-
-def extract_zip(filename: str, targetdir: str) -> None:
- """Extract zip file to target directory."""
- ensuredir(targetdir)
-
- with ZipFile(filename) as archive:
- for name in archive.namelist():
- if name.endswith('/'):
- continue
- entry = path.join(targetdir, name)
- ensuredir(path.dirname(entry))
- with open(path.join(entry), 'wb') as fp:
- fp.write(archive.read(name))
+_NO_DEFAULT = object()
+_THEME_CONF = 'theme.conf'
class Theme:
@@ -55,78 +41,57 @@ class Theme:
This class supports both theme directory and theme archive (zipped theme).
"""
- def __init__(self, name: str, theme_path: str, factory: HTMLThemeFactory) -> None:
+ def __init__(
+ self,
+ name: str,
+ *,
+ configs: dict[str, configparser.RawConfigParser],
+ paths: list[str],
+ tmp_dirs: list[str],
+ ) -> None:
self.name = name
- self.base = None
- self.rootdir = None
-
- if path.isdir(theme_path):
- # already a directory, do nothing
- self.rootdir = None
- self.themedir = theme_path
- else:
- # extract the theme to a temp directory
- self.rootdir = tempfile.mkdtemp('sxt')
- self.themedir = path.join(self.rootdir, name)
- extract_zip(theme_path, self.themedir)
+ self._dirs = paths
+ self._tmp_dirs = tmp_dirs
- self.config = configparser.RawConfigParser()
- config_file_path = path.join(self.themedir, THEMECONF)
- if not os.path.isfile(config_file_path):
- raise ThemeError(__('theme configuration file %r not found') % config_file_path)
- self.config.read(config_file_path, encoding='utf-8')
+ theme: dict[str, Any] = {}
+ options: dict[str, Any] = {}
+ for config in reversed(configs.values()):
+ theme |= dict(config.items('theme'))
+ if config.has_section('options'):
+ options |= dict(config.items('options'))
- try:
- inherit = self.config.get('theme', 'inherit')
- except configparser.NoSectionError as exc:
- raise ThemeError(__('theme %r doesn\'t have "theme" setting') % name) from exc
- except configparser.NoOptionError as exc:
- raise ThemeError(__('theme %r doesn\'t have "inherit" setting') % name) from exc
-
- if inherit != 'none':
- try:
- self.base = factory.create(inherit)
- except ThemeError as exc:
- raise ThemeError(__('no theme named %r found, inherited by %r') %
- (inherit, name)) from exc
+ self._settings = theme
+ self._options = options
def get_theme_dirs(self) -> list[str]:
"""Return a list of theme directories, beginning with this theme's,
then the base theme's, then that one's base theme's, etc.
"""
- if self.base is None:
- return [self.themedir]
- else:
- return [self.themedir] + self.base.get_theme_dirs()
+ return self._dirs.copy()
- def get_config(self, section: str, name: str, default: Any = NODEFAULT) -> Any:
+ def get_config(self, section: str, name: str, default: Any = _NO_DEFAULT) -> Any:
"""Return the value for a theme configuration setting, searching the
base theme chain.
"""
- try:
- return self.config.get(section, name)
- except (configparser.NoOptionError, configparser.NoSectionError) as exc:
- if self.base:
- return self.base.get_config(section, name, default)
-
- if default is NODEFAULT:
- raise ThemeError(__('setting %s.%s occurs in none of the '
- 'searched theme configs') % (section, name)) from exc
- return default
+ if section == 'theme':
+ value = self._settings.get(name, default)
+ elif section == 'options':
+ value = self._options.get(name, default)
+ else:
+ value = _NO_DEFAULT
+ if value is _NO_DEFAULT:
+ msg = __(
+ 'setting %s.%s occurs in none of the searched theme configs',
+ ) % (section, name)
+ raise ThemeError(msg)
+ return value
def get_options(self, overrides: dict[str, Any] | None = None) -> dict[str, Any]:
"""Return a dictionary of theme options and their values."""
if overrides is None:
overrides = {}
- if self.base:
- options = self.base.get_options()
- else:
- options = {}
-
- with contextlib.suppress(configparser.NoSectionError):
- options.update(self.config.items('options'))
-
+ options = self._options.copy()
for option, value in overrides.items():
if option not in options:
logger.warning(__('unsupported theme option %r given') % option)
@@ -135,50 +100,38 @@ def get_options(self, overrides: dict[str, Any] | None = None) -> dict[str, Any]
return options
- def cleanup(self) -> None:
+ def _cleanup(self) -> None:
"""Remove temporary directories."""
- if self.rootdir:
+ for tmp_dir in self._tmp_dirs:
with contextlib.suppress(Exception):
- shutil.rmtree(self.rootdir)
-
- if self.base:
- self.base.cleanup()
-
-
-def is_archived_theme(filename: str) -> bool:
- """Check whether the specified file is an archived theme file or not."""
- try:
- with ZipFile(filename) as f:
- return THEMECONF in f.namelist()
- except Exception:
- return False
+ shutil.rmtree(tmp_dir)
class HTMLThemeFactory:
"""A factory class for HTML Themes."""
def __init__(self, app: Sphinx) -> None:
- self.app = app
- self.themes = app.registry.html_themes
- self.load_builtin_themes()
+ self._app = app
+ self._themes = app.registry.html_themes
+ self._load_builtin_themes()
if getattr(app.config, 'html_theme_path', None):
- self.load_additional_themes(app.config.html_theme_path)
+ self._load_additional_themes(app.config.html_theme_path)
- def load_builtin_themes(self) -> None:
+ def _load_builtin_themes(self) -> None:
"""Load built-in themes."""
- themes = self.find_themes(path.join(package_dir, 'themes'))
+ themes = self._find_themes(path.join(package_dir, 'themes'))
for name, theme in themes.items():
- self.themes[name] = theme
+ self._themes[name] = theme
- def load_additional_themes(self, theme_paths: str) -> None:
+ def _load_additional_themes(self, theme_paths: str) -> None:
"""Load additional themes placed at specified directories."""
for theme_path in theme_paths:
- abs_theme_path = path.abspath(path.join(self.app.confdir, theme_path))
- themes = self.find_themes(abs_theme_path)
+ abs_theme_path = path.abspath(path.join(self._app.confdir, theme_path))
+ themes = self._find_themes(abs_theme_path)
for name, theme in themes.items():
- self.themes[name] = theme
+ self._themes[name] = theme
- def load_extra_theme(self, name: str) -> None:
+ def _load_extra_theme(self, name: str) -> None:
"""Try to load a theme with the specified name.
This uses the ``sphinx.html_themes`` entry point from package metadata.
@@ -189,10 +142,11 @@ def load_extra_theme(self, name: str) -> None:
except KeyError:
pass
else:
- self.app.registry.load_extension(self.app, entry_point.module)
- _config_post_init(None, self.app.config)
+ self._app.registry.load_extension(self._app, entry_point.module)
+ _config_post_init(self._app, self._app.config)
- def find_themes(self, theme_path: str) -> dict[str, str]:
+ @staticmethod
+ def _find_themes(theme_path: str) -> dict[str, str]:
"""Search themes from specified directory."""
themes: dict[str, str] = {}
if not path.isdir(theme_path):
@@ -201,24 +155,113 @@ def find_themes(self, theme_path: str) -> dict[str, str]:
for entry in os.listdir(theme_path):
pathname = path.join(theme_path, entry)
if path.isfile(pathname) and entry.lower().endswith('.zip'):
- if is_archived_theme(pathname):
+ if _is_archived_theme(pathname):
name = entry[:-4]
themes[name] = pathname
else:
logger.warning(__('file %r on theme path is not a valid '
'zipfile or contains no theme'), entry)
else:
- if path.isfile(path.join(pathname, THEMECONF)):
+ if path.isfile(path.join(pathname, _THEME_CONF)):
themes[entry] = pathname
return themes
def create(self, name: str) -> Theme:
"""Create an instance of theme."""
- if name not in self.themes:
- self.load_extra_theme(name)
+ if name not in self._themes:
+ self._load_extra_theme(name)
- if name not in self.themes:
+ if name not in self._themes:
raise ThemeError(__('no theme named %r found (missing theme.conf?)') % name)
- return Theme(name, self.themes[name], factory=self)
+ themes, theme_dirs, tmp_dirs = _load_theme_with_ancestors(self._themes, name)
+ return Theme(name, configs=themes, paths=theme_dirs, tmp_dirs=tmp_dirs)
+
+
+def _is_archived_theme(filename: str, /) -> bool:
+ """Check whether the specified file is an archived theme file or not."""
+ try:
+ with ZipFile(filename) as f:
+ return _THEME_CONF in f.namelist()
+ except Exception:
+ return False
+
+
+def _load_theme_with_ancestors(
+ theme_paths: dict[str, str],
+ name: str, /,
+) -> tuple[dict[str, configparser.RawConfigParser], list[str], list[str]]:
+ themes: dict[str, configparser.RawConfigParser] = {}
+ theme_dirs: list[str] = []
+ tmp_dirs: list[str] = []
+
+ # having 10+ theme ancestors is ludicrous
+ for _ in range(10):
+ inherit, theme_dir, tmp_dir, config = _load_theme(name, theme_paths[name])
+ theme_dirs.append(theme_dir)
+ if tmp_dir is not None:
+ tmp_dirs.append(tmp_dir)
+ themes[name] = config
+ if inherit == 'none':
+ break
+ if inherit in themes:
+ msg = __('The %r theme has circular inheritance') % name
+ raise ThemeError(msg)
+ if inherit not in theme_paths:
+ msg = __(
+ 'The %r theme inherits from %r, which is not a loaded theme. '
+ 'Loaded themes are: %s',
+ ) % (name, inherit, ', '.join(sorted(theme_paths)))
+ raise ThemeError(msg)
+ name = inherit
+ else:
+ msg = __('The %r theme has too many ancestors') % name
+ raise ThemeError(msg)
+
+ return themes, theme_dirs, tmp_dirs
+
+
+def _load_theme(
+ name: str, theme_path: str, /,
+) -> tuple[str, str, str | None, configparser.RawConfigParser]:
+ if path.isdir(theme_path):
+ # already a directory, do nothing
+ tmp_dir = None
+ theme_dir = theme_path
+ else:
+ # extract the theme to a temp directory
+ tmp_dir = tempfile.mkdtemp('sxt')
+ theme_dir = path.join(tmp_dir, name)
+ _extract_zip(theme_path, theme_dir)
+
+ config = _load_theme_conf(theme_dir)
+ try:
+ inherit = config.get('theme', 'inherit')
+ except (configparser.NoOptionError, configparser.NoSectionError):
+ msg = __('The %r theme must define the "theme.inherit" setting') % name
+ raise ThemeError(msg) from None
+ return inherit, theme_dir, tmp_dir, config
+
+
+def _extract_zip(filename: str, target_dir: str, /) -> None:
+ """Extract zip file to target directory."""
+ ensuredir(target_dir)
+
+ with ZipFile(filename) as archive:
+ for name in archive.namelist():
+ if name.endswith('/'):
+ continue
+ entry = path.join(target_dir, name)
+ ensuredir(path.dirname(entry))
+ with open(path.join(entry), 'wb') as fp:
+ fp.write(archive.read(name))
+
+
+def _load_theme_conf(theme_dir: os.PathLike[str] | str, /) -> configparser.RawConfigParser:
+ c = configparser.RawConfigParser()
+ config_file_path = path.join(theme_dir, _THEME_CONF)
+ if not os.path.isfile(config_file_path):
+ raise ThemeError(__('theme configuration file %r not found') % config_file_path)
+ c.read(config_file_path, encoding='utf-8')
+ return c
| diff --git a/tests/test_theming/test_theming.py b/tests/test_theming/test_theming.py
index 0544c710c5f..4bfb48ffbd8 100644
--- a/tests/test_theming/test_theming.py
+++ b/tests/test_theming/test_theming.py
@@ -5,7 +5,8 @@
import pytest
import sphinx.builders.html
-from sphinx.theming import Theme, ThemeError
+from sphinx.errors import ThemeError
+from sphinx.theming import _load_theme_conf
@pytest.mark.sphinx(
@@ -13,8 +14,6 @@
confoverrides={'html_theme': 'ziptheme',
'html_theme_options.testopt': 'foo'})
def test_theme_api(app, status, warning):
- cfg = app.config
-
themes = ['basic', 'default', 'scrolls', 'agogo', 'sphinxdoc', 'haiku',
'traditional', 'epub', 'nature', 'pyramid', 'bizstyle', 'classic', 'nonav',
'test-theme', 'ziptheme', 'staticfiles', 'parent', 'child', 'alabaster']
@@ -28,8 +27,6 @@ def test_theme_api(app, status, warning):
# test Theme instance API
theme = app.builder.theme
assert theme.name == 'ziptheme'
- themedir = theme.themedir
- assert theme.base.name == 'basic'
assert len(theme.get_theme_dirs()) == 2
# direct setting
@@ -46,20 +43,20 @@ def test_theme_api(app, status, warning):
options = theme.get_options({'nonexisting': 'foo'})
assert 'nonexisting' not in options
- options = theme.get_options(cfg.html_theme_options)
+ options = theme.get_options(app.config.html_theme_options)
assert options['testopt'] == 'foo'
assert options['nosidebar'] == 'false'
# cleanup temp directories
- theme.cleanup()
- assert not os.path.exists(themedir)
+ theme._cleanup()
+ assert not any(map(os.path.exists, theme._tmp_dirs))
def test_nonexistent_theme_conf(tmp_path):
# Check that error occurs with a non-existent theme.conf
# (https://github.com/sphinx-doc/sphinx/issues/11668)
with pytest.raises(ThemeError):
- Theme('dummy', str(tmp_path), None)
+ _load_theme_conf(tmp_path)
@pytest.mark.sphinx(testroot='double-inheriting-theme')
| [
{
"components": [
{
"doc": "Remove temporary directories.",
"lines": [
103,
107
],
"name": "Theme._cleanup",
"signature": "def _cleanup(self) -> None:",
"type": "function"
},
{
"doc": "Load built-in themes.",
"... | [
"tests/test_theming/test_theming.py::test_theme_api",
"tests/test_theming/test_theming.py::test_nonexistent_theme_conf",
"tests/test_theming/test_theming.py::test_double_inheriting_theme",
"tests/test_theming/test_theming.py::test_nested_zipped_theme",
"tests/test_theming/test_theming.py::test_staticfiles",... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Load themes through iteration rather than recursion
To prepare for allowing TOML-based theme definitions.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sphinx/theming.py]
(definition of Theme._cleanup:)
def _cleanup(self) -> None:
"""Remove temporary directories."""
(definition of HTMLThemeFactory._load_builtin_themes:)
def _load_builtin_themes(self) -> None:
"""Load built-in themes."""
(definition of HTMLThemeFactory._load_additional_themes:)
def _load_additional_themes(self, theme_paths: str) -> None:
"""Load additional themes placed at specified directories."""
(definition of HTMLThemeFactory._load_extra_theme:)
def _load_extra_theme(self, name: str) -> None:
"""Try to load a theme with the specified name.
This uses the ``sphinx.html_themes`` entry point from package metadata."""
(definition of HTMLThemeFactory._find_themes:)
def _find_themes(theme_path: str) -> dict[str, str]:
"""Search themes from specified directory."""
(definition of _is_archived_theme:)
def _is_archived_theme(filename: str, /) -> bool:
"""Check whether the specified file is an archived theme file or not."""
(definition of _load_theme_with_ancestors:)
def _load_theme_with_ancestors( theme_paths: dict[str, str], name: str, /, ) -> tuple[dict[str, configparser.RawConfigParser], list[str], list[str]]:
(definition of _load_theme:)
def _load_theme( name: str, theme_path: str, /, ) -> tuple[str, str, str | None, configparser.RawConfigParser]:
(definition of _extract_zip:)
def _extract_zip(filename: str, target_dir: str, /) -> None:
"""Extract zip file to target directory."""
(definition of _load_theme_conf:)
def _load_theme_conf(theme_dir: os.PathLike[str] | str, /) -> configparser.RawConfigParser:
[end of new definitions in sphinx/theming.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | aaecc9376d0662aeca5d3bd7c9d9fa36d6398478 | ||
deepset-ai__haystack-6758 | 6,758 | deepset-ai/haystack | null | 88191e74bf72345924ed703c65edb9fdf6bd8edd | 2024-01-17T14:05:22Z | diff --git a/haystack/components/converters/html.py b/haystack/components/converters/html.py
index 0586065c78..dea38dbd1a 100644
--- a/haystack/components/converters/html.py
+++ b/haystack/components/converters/html.py
@@ -3,7 +3,7 @@
from typing import Any, Dict, List, Optional, Union, Literal
from boilerpy3 import extractors
-from haystack import Document, component
+from haystack import Document, component, default_from_dict, default_to_dict
from haystack.dataclasses import ByteStream
from haystack.components.converters.utils import get_bytestream_from_source, normalize_metadata
@@ -49,6 +49,13 @@ def __init__(
"""
self.extractor_type = extractor_type
+ def to_dict(self) -> Dict[str, Any]:
+ return default_to_dict(self, extractor_type=self.extractor_type)
+
+ @classmethod
+ def from_dict(cls, data: Dict[str, Any]) -> "HTMLToDocument":
+ return default_from_dict(cls, data)
+
@component.output_types(documents=List[Document])
def run(
self,
| diff --git a/test/components/converters/test_html_to_document.py b/test/components/converters/test_html_to_document.py
index aa8df51197..519a1c053e 100644
--- a/test/components/converters/test_html_to_document.py
+++ b/test/components/converters/test_html_to_document.py
@@ -160,3 +160,12 @@ def test_mixed_sources_run(self, test_files_path):
assert len(docs) == 3
for doc in docs:
assert "Haystack" in doc.content
+
+ def test_serde(self):
+ """
+ Test if the component runs correctly gets serialized and deserialized.
+ """
+ converter = HTMLToDocument("ArticleExtractor")
+ serde_data = converter.to_dict()
+ new_converter = HTMLToDocument.from_dict(serde_data)
+ assert new_converter.extractor_type == converter.extractor_type
| [
{
"components": [
{
"doc": "",
"lines": [
52,
53
],
"name": "HTMLToDocument.to_dict",
"signature": "def to_dict(self) -> Dict[str, Any]:",
"type": "function"
},
{
"doc": "",
"lines": [
56,
5... | [
"test/components/converters/test_html_to_document.py::TestHTMLToDocument::test_serde"
] | [
"[",
"test/components/converters/test_html_to_document.py::TestHTMLToDocument::test_run",
"test/components/converters/test_html_to_document.py::TestHTMLToDocument::test_run_different_extractors",
"test/components/converters/test_html_to_document.py::TestHTMLToDocument::test_run_doc_metadata",
"test/componen... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
feat: Add serde methods to `HTMLToDocument`
### Related Issues
- fixes #6588
### How did you test it?
<!-- unit tests, integration tests, manual verification, instructions for manual tests -->
Unit.
### Checklist
- I have read the [contributors guidelines](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md) and the [code of conduct](https://github.com/deepset-ai/haystack/blob/main/code_of_conduct.txt)
- I have updated the related issue with new insights and changes
- I added unit tests and updated the docstrings
- I've used one of the [conventional commit types](https://www.conventionalcommits.org/en/v1.0.0/) for my PR title: `fix:`, `feat:`, `build:`, `chore:`, `ci:`, `docs:`, `style:`, `refactor:`, `perf:`, `test:`.
- I documented my code
- I ran [pre-commit hooks](https://github.com/deepset-ai/haystack/blob/main/CONTRIBUTING.md#installation) and fixed any issue
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in haystack/components/converters/html.py]
(definition of HTMLToDocument.to_dict:)
def to_dict(self) -> Dict[str, Any]:
(definition of HTMLToDocument.from_dict:)
def from_dict(cls, data: Dict[str, Any]) -> "HTMLToDocument":
[end of new definitions in haystack/components/converters/html.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Add `to_dict` and `from_dict` methods in `HTMLToDocument`
`HTMLToDocument` Component is missing serialization methods.
We need to add them so it can be properly serialized.
----------
--------------------
</issues> | f4d9c2bb917be0ffe132dffcc2ad4f1b0fcc5967 | |
google-deepmind__optax-718 | 718 | google-deepmind/optax | null | 95af789cb7f1d0279f44e143e3207e2e7b4d70d1 | 2024-01-17T10:39:55Z | diff --git a/docs/api/optimizers.rst b/docs/api/optimizers.rst
index faf536084..5bc93b1f7 100644
--- a/docs/api/optimizers.rst
+++ b/docs/api/optimizers.rst
@@ -22,6 +22,7 @@ Optimizers
noisy_sgd
novograd
optimistic_gradient_descent
+ polyak_sgd
radam
rmsprop
sgd
@@ -101,6 +102,10 @@ Optimistic GD
~~~~~~~~~~~~~
.. autofunction:: optimistic_gradient_descent
+Polyak step-size SGD
+~~~~~~~~~~~~~~~~~~~~
+.. autofunction:: polyak_sgd
+
RAdam
~~~~~
.. autofunction:: radam
diff --git a/docs/api/transformations.rst b/docs/api/transformations.rst
index 58bfe962d..667e35848 100644
--- a/docs/api/transformations.rst
+++ b/docs/api/transformations.rst
@@ -55,6 +55,7 @@ Transformations
scale_by_optimistic_gradient
scale_by_param_block_norm
scale_by_param_block_rms
+ scale_by_polyak
scale_by_radam
scale_by_rms
ScaleByRmsState
@@ -208,6 +209,8 @@ Transformations and states
.. autofunction:: scale_by_radam
+.. autofunction:: scale_by_polyak
+
.. autofunction:: scale_by_rms
.. autoclass:: ScaleByRmsState
:members:
diff --git a/optax/__init__.py b/optax/__init__.py
index 9115c1dd7..41e2ff758 100644
--- a/optax/__init__.py
+++ b/optax/__init__.py
@@ -40,6 +40,7 @@
from optax._src.alias import noisy_sgd
from optax._src.alias import novograd
from optax._src.alias import optimistic_gradient_descent
+from optax._src.alias import polyak_sgd
from optax._src.alias import radam
from optax._src.alias import rmsprop
from optax._src.alias import rprop
@@ -115,6 +116,7 @@
from optax._src.transform import scale_by_optimistic_gradient
from optax._src.transform import scale_by_param_block_norm
from optax._src.transform import scale_by_param_block_rms
+from optax._src.transform import scale_by_polyak
from optax._src.transform import scale_by_radam
from optax._src.transform import scale_by_rms
from optax._src.transform import scale_by_rprop
@@ -308,6 +310,7 @@
"piecewise_interpolate_schedule",
"polynomial_schedule",
"power_iteration",
+ "polyak_sgd",
"radam",
"rmsprop",
"rprop",
@@ -326,6 +329,7 @@
"scale_by_novograd",
"scale_by_param_block_norm",
"scale_by_param_block_rms",
+ "scale_by_polyak",
"scale_by_radam",
"scale_by_rms",
"scale_by_rprop",
diff --git a/optax/_src/alias.py b/optax/_src/alias.py
index e9f97268f..aa9b05d3a 100644
--- a/optax/_src/alias.py
+++ b/optax/_src/alias.py
@@ -1649,7 +1649,7 @@ def rprop(
gradient descent. It responds only to the sign of the gradient by increasing
or decreasing the step size selected per parameter exponentially to speed up
convergence and avoid oscillations.
-
+
Examples:
>>> import optax
>>> import jax
@@ -1702,3 +1702,80 @@ def rprop(
),
transform.scale(-1.0),
)
+
+
+def polyak_sgd(
+ max_learning_rate: float = 1.,
+ scaling: base.ScalarOrSchedule = 1.,
+ f_min: float = 0.0,
+ eps: float = 0.0,
+) -> base.GradientTransformationExtraArgs:
+ r"""SGD with Polyak step-size.
+
+ This solver implements the SGD with Polyak step size of (Loizou et al. 2021).
+ It sets the step-size as
+
+ .. math::
+ s \min\left\{\frac{f(x) - f^\star}{\|\nabla f(x)\|^2 + \epsilon},
+ \gamma_{\max}\right\}\,,
+
+ where :math:`f` is the function from which a gradient is computed,
+ :math:`\gamma_{\max}` is a maximal acceptable learning rate set by
+ ``max_learning_rate``, :math:`\epsilon` is a constant preventing division by
+ zero set with ``eps``, :math:`s` scales the formula by ``scaling``, and
+ :math:`f^\star` is a guess of the minimum value of the function set with
+ ``f_min``.
+
+
+ Examples:
+ >>> import optax
+ >>> import jax
+ >>> import jax.numpy as jnp
+ >>> def f(x): return jnp.sum(x ** 2) # simple quadratic function
+ >>> solver = optax.polyak_sgd()
+ >>> params = jnp.array([1., 2., 3.])
+ >>> print('Objective function: ', f(params))
+ Objective function: 14.0
+ >>> opt_state = solver.init(params)
+ >>> for _ in range(5):
+ ... value, grad = jax.value_and_grad(f)(params)
+ ... params, opt_state = solver.update(grad, opt_state, params, value=value)
+ ... print('Objective function: ', f(params))
+ Objective function: 3.5
+ Objective function: 0.875
+ Objective function: 0.21875
+ Objective function: 0.0546875
+ Objective function: 0.013671875
+
+ .. warning::
+ This method requires knowledge of an approximate value of the of the
+ objective function minimum, passed through the ``f_min`` argument.
+ For models that interpolate the data, this can be set to 0 (default
+ value).
+ Failing to set an appropriate value for ``f_min`` can lead to
+ divergence or convergence to a suboptimal solution.
+
+ References:
+ Loizou et al. `Stochastic polyak step-size for SGD: An adaptive learning
+ rate for fast convergence <https://arxiv.org/abs/2002.10542>`_, 2021
+
+ Berrada et al., `Training neural networks for and by interpolation
+ <https://arxiv.org/pdf/1906.05661.pdf>`_, 2020
+
+ Args:
+ max_learning_rate: a maximum step size to use (defaults to 1).
+ scaling: A global scaling factor, either fixed or evolving along
+ iterations with a scheduler (defaults to 1).
+ f_min: a lower bound on the objective function (defaults to 0). Corresponds
+ to :math:`f^\star` in the formula above.
+ eps: a value to add in the denominator of the update (defaults to 0).
+
+ Returns:
+ A :class:`GradientTransformationExtraArgs`.
+ """
+ return combine.chain(
+ sgd(learning_rate=scaling),
+ transform.scale_by_polyak(
+ max_learning_rate=max_learning_rate, f_min=f_min, eps=eps
+ ),
+ )
diff --git a/optax/_src/transform.py b/optax/_src/transform.py
index 7fc81179d..c8f92422b 100644
--- a/optax/_src/transform.py
+++ b/optax/_src/transform.py
@@ -21,6 +21,7 @@
import jax
import jax.numpy as jnp
+from optax import tree_utils
from optax._src import base
from optax._src import numerics
from optax._src import utils
@@ -31,6 +32,12 @@
_abs_sq = numerics.abs_sq
+def _init_empty_state(params: base.Params) -> base.EmptyState:
+ """Init function for an empty state."""
+ del params
+ return base.EmptyState()
+
+
class TraceState(NamedTuple):
"""Holds an aggregation of past updates."""
trace: base.Params
@@ -563,10 +570,6 @@ def scale_by_param_block_norm(
A `GradientTransformation` object.
"""
- def init_fn(params):
- del params
- return base.EmptyState()
-
def update_fn(updates, state, params):
if params is None:
raise ValueError(base.NO_PARAMS_MSG)
@@ -575,7 +578,7 @@ def update_fn(updates, state, params):
updates, params)
return updates, state
- return base.GradientTransformation(init_fn, update_fn)
+ return base.GradientTransformation(_init_empty_state, update_fn)
def scale_by_param_block_rms(
@@ -593,10 +596,6 @@ def scale_by_param_block_rms(
A `GradientTransformation` object.
"""
- def init_fn(params):
- del params
- return base.EmptyState()
-
def update_fn(updates, state, params):
if params is None:
raise ValueError(base.NO_PARAMS_MSG)
@@ -605,7 +604,7 @@ def update_fn(updates, state, params):
updates, params)
return updates, state
- return base.GradientTransformation(init_fn, update_fn)
+ return base.GradientTransformation(_init_empty_state, update_fn)
class ScaleByAdaDeltaState(NamedTuple):
@@ -1364,7 +1363,7 @@ def scale_by_distance_over_gradients(
The authors recommend using model averaging with this optimizer.
References:
- ["DoG is SGD’s Best Friend: A Parameter-Free Dynamic Step Size
+ ["DoG is SGD's Best Friend: A Parameter-Free Dynamic Step Size
Schedule"](https://arxiv.org/pdf/2302.12022.pdf)
Args:
@@ -1420,3 +1419,46 @@ def _tx(g, d, g_sos):
return updates, state
return base.GradientTransformation(init_fn, update_fn)
+
+
+def scale_by_polyak(
+ f_min: float = 0.0,
+ max_learning_rate: float = 1.0,
+ eps: float = 0.0,
+) -> base.GradientTransformationExtraArgs:
+ """Scales the update by Polyak's step-size."""
+
+ def update_fn(
+ updates: base.Updates,
+ state: base.EmptyState,
+ params: Optional[base.Params] = None,
+ *,
+ value: float,
+ **extra_args,
+ ) -> tuple[base.Updates, base.EmptyState]:
+ """Scales the update by the Polyak step-size.
+
+ Args:
+ updates: the updates to be scaled.
+ state: the state of the transformation.
+ params: the parameters of the model.
+ value: the value of the loss function.
+ **extra_args: additional keyword arguments. They are ignored by this
+ transformation.
+ Returns:
+ The scaled updates and the state of the transformation.
+ """
+ del params, extra_args
+ grad_sq_norm = tree_utils.tree_l2_norm(updates, squared=True)
+ # avoid division by zero
+ step = jnp.where(
+ grad_sq_norm + eps <= jnp.finfo(float).eps,
+ jnp.array(0.0),
+ jnp.minimum(
+ (value - f_min) / (grad_sq_norm + eps), max_learning_rate
+ ),
+ )
+ updates = tree_utils.tree_scalar_mul(step, updates)
+ return updates, state
+
+ return base.GradientTransformationExtraArgs(_init_empty_state, update_fn)
| diff --git a/optax/_src/alias_test.py b/optax/_src/alias_test.py
index f80d9b644..c55ca3c2f 100644
--- a/optax/_src/alias_test.py
+++ b/optax/_src/alias_test.py
@@ -60,6 +60,7 @@
dict(opt_name='rprop', opt_kwargs=dict(learning_rate=1e-1)),
dict(opt_name='sm3', opt_kwargs=dict(learning_rate=1.0)),
dict(opt_name='yogi', opt_kwargs=dict(learning_rate=1e-1)),
+ dict(opt_name='polyak_sgd', opt_kwargs=dict(max_learning_rate=1.))
)
@@ -71,11 +72,10 @@ def _setup_parabola(dtype):
if jnp.iscomplexobj(dtype):
final_params *= 1 + 1j
- @jax.grad
- def get_updates(params):
+ def objective(params):
return jnp.sum(numerics.abs_sq(params - final_params))
- return initial_params, final_params, get_updates
+ return initial_params, final_params, objective
def _setup_rosenbrock(dtype):
@@ -89,12 +89,11 @@ def _setup_rosenbrock(dtype):
initial_params = jnp.array([0.0, 0.0], dtype=dtype)
final_params = jnp.array([a, a**2], dtype=dtype)
- @jax.grad
- def get_updates(params):
+ def objective(params):
return (numerics.abs_sq(a - params[0]) +
b * numerics.abs_sq(params[1] - params[0]**2))
- return initial_params, final_params, get_updates
+ return initial_params, final_params, objective
class AliasTest(chex.TestCase):
@@ -113,21 +112,26 @@ def test_optimization(self, opt_name, opt_kwargs, target, dtype):
'lion',
'rprop',
'adadelta',
+ 'polyak_sgd',
) and jnp.iscomplexobj(dtype):
raise absltest.SkipTest(
f'{opt_name} does not support complex parameters.'
)
opt = getattr(alias, opt_name)(**opt_kwargs)
- initial_params, final_params, get_updates = target(dtype)
+ initial_params, final_params, objective = target(dtype)
@jax.jit
def step(params, state):
- updates = get_updates(params)
+ value, updates = jax.value_and_grad(objective)(params)
# Complex gradients need to be conjugated before being added to parameters
# https://gist.github.com/wdphy16/118aef6fb5f82c49790d7678cf87da29
updates = jax.tree_util.tree_map(lambda x: x.conj(), updates)
- updates, state = opt.update(updates, state, params)
+ if opt_name == 'polyak_sgd':
+ update_kwargs = {'value': value}
+ else:
+ update_kwargs = {}
+ updates, state = opt.update(updates, state, params, **update_kwargs)
params = update.apply_updates(params, updates)
return params, state
@@ -146,13 +150,13 @@ def step(params, state):
def test_optimizers_can_be_wrapped_in_inject_hyperparams(
self, opt_name, opt_kwargs):
"""Checks that optimizers can be wrapped in inject_hyperparams."""
- # See also https://github.com/deepmind/optax/issues/412.
+ # See also https://github.com/google-deepmind/optax/issues/412.
opt_factory = getattr(alias, opt_name)
opt = opt_factory(**opt_kwargs)
if opt_name == 'adafactor':
# Adafactor wrapped in inject_hyperparams currently needs a static
# argument to be specified in order to be jittable. See issue
- # https://github.com/deepmind/optax/issues/412.
+ # https://github.com/google-deepmind/optax/issues/412.
opt_inject = _inject.inject_hyperparams(
opt_factory, static_args=('min_dim_size_to_factor',))(**opt_kwargs)
else:
@@ -162,11 +166,17 @@ def test_optimizers_can_be_wrapped_in_inject_hyperparams(
grads = [jnp.ones((2, 3)), jnp.negative(jnp.ones((2, 5, 2)))]
state = self.variant(opt.init)(params)
- updates, new_state = self.variant(opt.update)(grads, state, params)
+ if opt_name == 'polyak_sgd':
+ update_kwargs = {'value': jnp.array(0.)}
+ else:
+ update_kwargs = {}
+ updates, new_state = self.variant(opt.update)(
+ grads, state, params, **update_kwargs
+ )
state_inject = self.variant(opt_inject.init)(params)
updates_inject, new_state_inject = self.variant(opt_inject.update)(
- grads, state_inject, params)
+ grads, state_inject, params, **update_kwargs)
with self.subTest('Equality of updates.'):
chex.assert_trees_all_close(updates_inject, updates, rtol=1e-4)
diff --git a/optax/_src/transform_test.py b/optax/_src/transform_test.py
index feb05ce48..3b6cd4604 100644
--- a/optax/_src/transform_test.py
+++ b/optax/_src/transform_test.py
@@ -46,6 +46,7 @@ def setUp(self):
('adam', transform.scale_by_adam),
('adamax', transform.scale_by_adamax),
('lion', transform.scale_by_lion),
+ ('polyak', transform.scale_by_polyak),
('rmsprop', transform.scale_by_rms),
('stddev', transform.scale_by_stddev),
('trust_ratio', transform.scale_by_trust_ratio),
@@ -63,7 +64,13 @@ def test_scalers(self, scaler_constr):
state = init_fn(params)
chex.assert_tree_all_finite(state)
- updates, state = transform_fn(self.per_step_updates, state, params)
+ if scaler_constr.__name__ == 'scale_by_polyak':
+ extra_args = {'value': jnp.array(0.0)}
+ else:
+ extra_args = {}
+ updates, state = transform_fn(
+ self.per_step_updates, state, params, **extra_args
+ )
chex.assert_tree_all_finite((params, updates, state))
jax.tree_util.tree_map(
lambda *args: chex.assert_equal_shape(args), params, updates)
@@ -288,11 +295,32 @@ def f(params: jnp.ndarray) -> jnp.ndarray:
g = jax.grad(f)(initial_params)
og_true = 2 * g['x'] - getattr(og_state, 'trace')['x']
- og, og_state = og.update(g, og_state)
+ og, _ = og.update(g, og_state)
# Compare transformation output with manually computed optimistic gradient.
chex.assert_trees_all_close(og_true, og['x'])
+ def test_scale_by_polyak_l1_norm(self, tol=1e-10):
+ """Polyak step-size on L1 norm."""
+ # for this objective, the Polyak step-size has an exact model and should
+ # converge to the minimizer in one step
+ objective = lambda x: jnp.abs(x).sum()
+
+ init_params = jnp.array([1.0, -1.0])
+ polyak = transform.scale_by_polyak()
+ polyak_state = polyak.init(init_params)
+ # check that polyak state raises an error if it called without a value
+ with self.assertRaises(TypeError):
+ polyak.update(self.per_step_updates, polyak_state, init_params)
+
+ value, grad = jax.value_and_grad(objective)(init_params)
+ updates, _ = polyak.update(
+ grad, polyak_state, init_params, value=value
+ )
+ # check that objective at (init_params - updates) is smaller than tol
+ print(grad, value, updates)
+ self.assertLess(objective(init_params - updates), tol)
+
@chex.all_variants
def test_bias_correction_bf16(self):
bias_correction_fn = self.variant(transform.bias_correction)
| diff --git a/docs/api/optimizers.rst b/docs/api/optimizers.rst
index faf536084..5bc93b1f7 100644
--- a/docs/api/optimizers.rst
+++ b/docs/api/optimizers.rst
@@ -22,6 +22,7 @@ Optimizers
noisy_sgd
novograd
optimistic_gradient_descent
+ polyak_sgd
radam
rmsprop
sgd
@@ -101,6 +102,10 @@ Optimistic GD
~~~~~~~~~~~~~
.. autofunction:: optimistic_gradient_descent
+Polyak step-size SGD
+~~~~~~~~~~~~~~~~~~~~
+.. autofunction:: polyak_sgd
+
RAdam
~~~~~
.. autofunction:: radam
diff --git a/docs/api/transformations.rst b/docs/api/transformations.rst
index 58bfe962d..667e35848 100644
--- a/docs/api/transformations.rst
+++ b/docs/api/transformations.rst
@@ -55,6 +55,7 @@ Transformations
scale_by_optimistic_gradient
scale_by_param_block_norm
scale_by_param_block_rms
+ scale_by_polyak
scale_by_radam
scale_by_rms
ScaleByRmsState
@@ -208,6 +209,8 @@ Transformations and states
.. autofunction:: scale_by_radam
+.. autofunction:: scale_by_polyak
+
.. autofunction:: scale_by_rms
.. autoclass:: ScaleByRmsState
:members:
| [
{
"components": [
{
"doc": "SGD with Polyak step-size.\n\nThis solver implements the SGD with Polyak step size of (Loizou et al. 2021).\nIt sets the step-size as\n\n.. math::\n s \\min\\left\\{\\frac{f(x) - f^\\star}{\\|\\nabla f(x)\\|^2 + \\epsilon},\n \\gamma_{\\max}\\right\\}\\,,\n\nwhere :... | [
"optax/_src/alias_test.py::AliasTest::test_explicit_dtype_None",
"optax/_src/alias_test.py::AliasTest::test_explicit_dtype_bfloat16",
"optax/_src/alias_test.py::AliasTest::test_explicit_dtype_complex64",
"optax/_src/alias_test.py::AliasTest::test_explicit_dtype_float32",
"optax/_src/alias_test.py::AliasTest... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Implementation of the Polyak SGD solver
Implementation of the Polyak SGD solver
Other changes done to accommodate this solver:
* In the tests, I exposed the objective function instead of just the gradients as this solver requires access to the objective function.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in optax/_src/alias.py]
(definition of polyak_sgd:)
def polyak_sgd( max_learning_rate: float = 1., scaling: base.ScalarOrSchedule = 1., f_min: float = 0.0, eps: float = 0.0, ) -> base.GradientTransformationExtraArgs:
"""SGD with Polyak step-size.
This solver implements the SGD with Polyak step size of (Loizou et al. 2021).
It sets the step-size as
.. math::
s \min\left\{\frac{f(x) - f^\star}{\|\nabla f(x)\|^2 + \epsilon},
\gamma_{\max}\right\}\,,
where :math:`f` is the function from which a gradient is computed,
:math:`\gamma_{\max}` is a maximal acceptable learning rate set by
``max_learning_rate``, :math:`\epsilon` is a constant preventing division by
zero set with ``eps``, :math:`s` scales the formula by ``scaling``, and
:math:`f^\star` is a guess of the minimum value of the function set with
``f_min``.
Examples:
>>> import optax
>>> import jax
>>> import jax.numpy as jnp
>>> def f(x): return jnp.sum(x ** 2) # simple quadratic function
>>> solver = optax.polyak_sgd()
>>> params = jnp.array([1., 2., 3.])
>>> print('Objective function: ', f(params))
Objective function: 14.0
>>> opt_state = solver.init(params)
>>> for _ in range(5):
... value, grad = jax.value_and_grad(f)(params)
... params, opt_state = solver.update(grad, opt_state, params, value=value)
... print('Objective function: ', f(params))
Objective function: 3.5
Objective function: 0.875
Objective function: 0.21875
Objective function: 0.0546875
Objective function: 0.013671875
.. warning::
This method requires knowledge of an approximate value of the of the
objective function minimum, passed through the ``f_min`` argument.
For models that interpolate the data, this can be set to 0 (default
value).
Failing to set an appropriate value for ``f_min`` can lead to
divergence or convergence to a suboptimal solution.
References:
Loizou et al. `Stochastic polyak step-size for SGD: An adaptive learning
rate for fast convergence <https://arxiv.org/abs/2002.10542>`_, 2021
Berrada et al., `Training neural networks for and by interpolation
<https://arxiv.org/pdf/1906.05661.pdf>`_, 2020
Args:
max_learning_rate: a maximum step size to use (defaults to 1).
scaling: A global scaling factor, either fixed or evolving along
iterations with a scheduler (defaults to 1).
f_min: a lower bound on the objective function (defaults to 0). Corresponds
to :math:`f^\star` in the formula above.
eps: a value to add in the denominator of the update (defaults to 0).
Returns:
A :class:`GradientTransformationExtraArgs`."""
[end of new definitions in optax/_src/alias.py]
[start of new definitions in optax/_src/transform.py]
(definition of _init_empty_state:)
def _init_empty_state(params: base.Params) -> base.EmptyState:
"""Init function for an empty state."""
(definition of scale_by_polyak:)
def scale_by_polyak( f_min: float = 0.0, max_learning_rate: float = 1.0, eps: float = 0.0, ) -> base.GradientTransformationExtraArgs:
"""Scales the update by Polyak's step-size."""
(definition of scale_by_polyak.update_fn:)
def update_fn( updates: base.Updates, state: base.EmptyState, params: Optional[base.Params] = None, *, value: float, **extra_args, ) -> tuple[base.Updates, base.EmptyState]:
"""Scales the update by the Polyak step-size.
Args:
updates: the updates to be scaled.
state: the state of the transformation.
params: the parameters of the model.
value: the value of the loss function.
**extra_args: additional keyword arguments. They are ignored by this
transformation.
Returns:
The scaled updates and the state of the transformation."""
[end of new definitions in optax/_src/transform.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 1e08bccf195ac54e7d9d766eb5e69345bf0e3230 | |
conan-io__conan-15453 | 15,453 | conan-io/conan | null | 98d68db7f3a7d6eec4dc463a24839af542fb70cc | 2024-01-14T16:46:58Z | diff --git a/conans/client/graph/compute_pid.py b/conans/client/graph/compute_pid.py
index 0948a03fe3a..a81497bb002 100644
--- a/conans/client/graph/compute_pid.py
+++ b/conans/client/graph/compute_pid.py
@@ -21,7 +21,7 @@ def compute_package_id(node, new_config):
python_requires = getattr(conanfile, "python_requires", None)
if python_requires:
- python_requires = python_requires.all_refs()
+ python_requires = python_requires.info_requires()
data = OrderedDict()
build_data = OrderedDict()
diff --git a/conans/client/graph/python_requires.py b/conans/client/graph/python_requires.py
index dc24ff7fb51..a861e53df44 100644
--- a/conans/client/graph/python_requires.py
+++ b/conans/client/graph/python_requires.py
@@ -31,6 +31,10 @@ def serialize(self):
def all_refs(self):
return [r.ref for r in self._pyrequires.values()]
+ def info_requires(self):
+ return {pyreq.ref: getattr(pyreq.conanfile, "package_id_python_mode", None)
+ for pyreq in self._pyrequires.values()}
+
def items(self):
return self._pyrequires.items()
diff --git a/conans/model/info.py b/conans/model/info.py
index 00743eb3884..a2b9ee85387 100644
--- a/conans/model/info.py
+++ b/conans/model/info.py
@@ -66,6 +66,7 @@ def __init__(self, ref, package_id, default_package_id_mode):
self._package_id = package_id
self.name = self.version = self.user = self.channel = self.package_id = None
self.recipe_revision = None
+ self.package_id_mode = default_package_id_mode
try:
func_package_id_mode = getattr(self, default_package_id_mode)
@@ -234,14 +235,15 @@ class PythonRequiresInfo:
def __init__(self, refs, default_package_id_mode):
self._default_package_id_mode = default_package_id_mode
if refs:
- self._refs = [RequirementInfo(r, None, default_package_id_mode=default_package_id_mode)
- for r in sorted(refs)]
+ self._refs = [RequirementInfo(r, None,
+ default_package_id_mode=mode or default_package_id_mode)
+ for r, mode in sorted(refs.items())]
else:
self._refs = None
def copy(self):
# For build_id() implementation
- refs = [r._ref for r in self._refs] if self._refs else None
+ refs = {r._ref: r.package_id_mode for r in self._refs} if self._refs else None
return PythonRequiresInfo(refs, self._default_package_id_mode)
def __bool__(self):
| diff --git a/conans/test/integration/package_id/package_id_modes_test.py b/conans/test/integration/package_id/package_id_modes_test.py
index 16b01a1284b..f8555058658 100644
--- a/conans/test/integration/package_id/package_id_modes_test.py
+++ b/conans/test/integration/package_id/package_id_modes_test.py
@@ -104,3 +104,37 @@ class Dep(ConanFile):
c.run("create dep --version=0.2")
c.run("create pkg")
c.assert_listed_binary({"pkg/0.1": ("56934f87c11792e356423e081c7cd490f3c1fbe0", "Build")})
+
+ def test_dep_python_require_defined(self):
+ c = TestClient()
+ dep = textwrap.dedent("""
+ from conan import ConanFile
+ class Dep(ConanFile):
+ name = "dep"
+ package_type = "python-require"
+ package_id_python_mode = "major_mode"
+ """)
+ c.save({"dep/conanfile.py": dep,
+ "pkg/conanfile.py": GenConanfile("pkg", "0.1").with_python_requires("dep/[*]")})
+ c.run("create dep --version=0.1")
+ c.run("create pkg")
+ c.assert_listed_binary({"pkg/0.1": ("331c17383dcdf37f79bc2b86fa55ac56afdc6fec", "Build")})
+
+ # using dep 0.2, still same, because dependency chose "major"
+ c.run("create dep --version=0.1.1")
+ c.run("create pkg")
+ c.assert_listed_binary({"pkg/0.1": ("331c17383dcdf37f79bc2b86fa55ac56afdc6fec", "Build")})
+
+ # using dep 0.2, still same, because dependency chose "major"
+ c.run("create dep --version=0.2")
+ c.run("create pkg")
+ c.assert_listed_binary({"pkg/0.1": ("331c17383dcdf37f79bc2b86fa55ac56afdc6fec", "Build")})
+ c.run("list *:*")
+ assert "dep/0.Y.Z" in c.out
+
+ # using dep 0.2, new package_id, because dependency chose "major"
+ c.run("create dep --version=1.0")
+ c.run("create pkg")
+ c.assert_listed_binary({"pkg/0.1": ("9b015e30b768df0217ffa2c270f60227c998e609", "Build")})
+ c.run("list *:*")
+ assert "dep/1.Y.Z" in c.out
| [
{
"components": [
{
"doc": "",
"lines": [
34,
36
],
"name": "PyRequires.info_requires",
"signature": "def info_requires(self):",
"type": "function"
}
],
"file": "conans/client/graph/python_requires.py"
}
] | [
"conans/test/integration/package_id/package_id_modes_test.py::TestDepDefinedMode::test_dep_python_require_defined"
] | [
"conans/test/integration/package_id/package_id_modes_test.py::test_basic_default_modes_unknown",
"conans/test/integration/package_id/package_id_modes_test.py::test_basic_default_modes_application",
"conans/test/integration/package_id/package_id_modes_test.py::TestDepDefinedMode::test_dep_defined",
"conans/tes... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Feature/python id mode
Changelog: Feature: Recipe ``python_package_id_mode`` for ``python_requires`` recipes, to define per-recipe effect on consumers ``package_id``.
Docs: https://github.com/conan-io/docs/pull/3542
Close https://github.com/conan-io/conan/issues/15327
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conans/client/graph/python_requires.py]
(definition of PyRequires.info_requires:)
def info_requires(self):
[end of new definitions in conans/client/graph/python_requires.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
Textualize__textual-4012 | 4,012 | Textualize/textual | null | 225fa24bdf9fba4e2352962d531d29820da3e761 | 2024-01-12T17:28:09Z | diff --git a/CHANGELOG.md b/CHANGELOG.md
index bc2657ad15..0da115172e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -18,6 +18,9 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
- Added `DOMNode.has_pseudo_classes` https://github.com/Textualize/textual/pull/3970
- Added `Widget.allow_focus` and `Widget.allow_focus_children` https://github.com/Textualize/textual/pull/3989
+- Added `Query.blur` and `Query.focus` https://github.com/Textualize/textual/pull/4012
+- Added `MessagePump.message_queue_size` https://github.com/Textualize/textual/pull/4012
+- Added `TabbedContent.active_pane` https://github.com/Textualize/textual/pull/4012
### Fixed
@@ -27,6 +30,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
- `SelectionList` option IDs are usable as soon as the widget is instantiated https://github.com/Textualize/textual/issues/3903
- Fix issue with `Strip.crop` when crop window start aligned with strip end https://github.com/Textualize/textual/pull/3998
- Fixed Strip.crop_extend https://github.com/Textualize/textual/pull/4011
+- Fixed declaration after nested rule set causing a parse error https://github.com/Textualize/textual/pull/4012
- ID and class validation was too lenient https://github.com/Textualize/textual/issues/3954
- Fixed a crash if the `TextArea` language was set but tree-sitter lanuage binaries were not installed https://github.com/Textualize/textual/issues/4045
diff --git a/src/textual/css/query.py b/src/textual/css/query.py
index 7df103e7a3..bf585bd0c3 100644
--- a/src/textual/css/query.py
+++ b/src/textual/css/query.py
@@ -430,3 +430,28 @@ def refresh(
for node in self:
node.refresh(repaint=repaint, layout=layout)
return self
+
+ def focus(self) -> DOMQuery[QueryType]:
+ """Focus the first matching node that permits focus.
+
+ Returns:
+ Query for chaining.
+ """
+ for node in self:
+ if node.allow_focus():
+ node.focus()
+ break
+ return self
+
+ def blur(self) -> DOMQuery[QueryType]:
+ """Blur the first matching node that is focused.
+
+ Returns:
+ Query for chaining.
+ """
+ focused = self._node.screen.focused
+ if focused is not None:
+ nodes: list[Widget] = list(self)
+ if focused in nodes:
+ self._node.screen._reset_focus(focused, avoiding=nodes)
+ return self
diff --git a/src/textual/css/tokenize.py b/src/textual/css/tokenize.py
index 7cf1556613..0d0df40002 100644
--- a/src/textual/css/tokenize.py
+++ b/src/textual/css/tokenize.py
@@ -210,7 +210,7 @@ def __call__(self, code: str, read_from: CSSLocation) -> Iterable[Token]:
nest_level += 1
elif name == "declaration_set_end":
nest_level -= 1
- expect = expect_root_nested if nest_level else expect_root_scope
+ expect = expect_declaration if nest_level else expect_root_scope
yield token
continue
expect = get_state(name, expect)
diff --git a/src/textual/css/tokenizer.py b/src/textual/css/tokenizer.py
index 44e5716e3e..dc68a9e575 100644
--- a/src/textual/css/tokenizer.py
+++ b/src/textual/css/tokenizer.py
@@ -248,11 +248,14 @@ def get_token(self, expect: Expect) -> Token:
line = self.lines[line_no]
match = expect.match(line, col_no)
if match is None:
+ error_line = line[col_no:].rstrip()
+ error_message = (
+ f"{expect.description} (found {error_line.split(';')[0]!r})."
+ )
+ if not error_line.endswith(";"):
+ error_message += "; Did you forget a semicolon at the end of a line?"
raise TokenError(
- self.read_from,
- self.code,
- (line_no + 1, col_no + 1),
- f"{expect.description} (found {line[col_no:].rstrip()!r}).; Did you forget a semicolon at the end of a line?",
+ self.read_from, self.code, (line_no + 1, col_no + 1), error_message
)
iter_groups = iter(match.groups())
diff --git a/src/textual/message.py b/src/textual/message.py
index 931c5aa21b..becdb374e5 100644
--- a/src/textual/message.py
+++ b/src/textual/message.py
@@ -14,8 +14,8 @@
from .case import camel_to_snake
if TYPE_CHECKING:
+ from .dom import DOMNode
from .message_pump import MessagePump
- from .widget import Widget
@rich.repr.auto
@@ -77,7 +77,7 @@ def __init_subclass__(
cls.handler_name = f"on_{namespace}_{name}" if namespace else f"on_{name}"
@property
- def control(self) -> Widget | None:
+ def control(self) -> DOMNode | None:
"""The widget associated with this message, or None by default."""
return None
diff --git a/src/textual/message_pump.py b/src/textual/message_pump.py
index 808422cdce..28095cf079 100644
--- a/src/textual/message_pump.py
+++ b/src/textual/message_pump.py
@@ -188,6 +188,11 @@ def has_parent(self) -> bool:
"""Does this object have a parent?"""
return self._parent is not None
+ @property
+ def message_queue_size(self) -> int:
+ """The current size of the message queue."""
+ return self._message_queue.qsize()
+
@property
def app(self) -> "App[object]":
"""
diff --git a/src/textual/signal.py b/src/textual/signal.py
new file mode 100644
index 0000000000..cbf7a8e1dc
--- /dev/null
+++ b/src/textual/signal.py
@@ -0,0 +1,88 @@
+"""
+Signals are a simple pub-sub mechanism.
+
+DOMNodes can subscribe to a signal, which will invoke a callback when the signal is published.
+
+This is experimental for now, for internal use. It may be part of the public API in a future release.
+
+"""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+from weakref import WeakKeyDictionary
+
+import rich.repr
+
+from textual import log
+
+if TYPE_CHECKING:
+ from ._types import IgnoreReturnCallbackType
+ from .dom import DOMNode
+
+
+class SignalError(Exception):
+ """Base class for a signal."""
+
+
+@rich.repr.auto(angular=True)
+class Signal:
+ """A signal that a widget may subscribe to, in order to invoke callbacks when an associated event occurs."""
+
+ def __init__(self, owner: DOMNode, name: str) -> None:
+ """Initialize a signal.
+
+ Args:
+ owner: The owner of this signal.
+ name: An identifier for debugging purposes.
+ """
+ self._owner = owner
+ self._name = name
+ self._subscriptions: WeakKeyDictionary[
+ DOMNode, list[IgnoreReturnCallbackType]
+ ] = WeakKeyDictionary()
+
+ def __rich_repr__(self) -> rich.repr.Result:
+ yield "owner", self._owner
+ yield "name", self._name
+ yield "subscriptions", list(self._subscriptions.keys())
+
+ def subscribe(self, node: DOMNode, callback: IgnoreReturnCallbackType) -> None:
+ """Subscribe a node to this signal.
+
+ When the signal is published, the callback will be invoked.
+
+ Args:
+ node: Node to subscribe.
+ callback: A callback function which takes no arguments, and returns anything (return type ignored).
+ """
+ if not node.is_running:
+ raise SignalError(
+ f"Node must be running to subscribe to a signal (has {node} been mounted)?"
+ )
+ callbacks = self._subscriptions.setdefault(node, [])
+ if callback not in callbacks:
+ callbacks.append(callback)
+
+ def unsubscribe(self, node: DOMNode) -> None:
+ """Unsubscribe a node from this signal.
+
+ Args:
+ node: Node to unsubscribe,
+ """
+ self._subscriptions.pop(node, None)
+
+ def publish(self) -> None:
+ """Publish the signal (invoke subscribed callbacks)."""
+
+ for node, callbacks in list(self._subscriptions.items()):
+ if not node.is_running:
+ # Removed nodes that are no longer running
+ self._subscriptions.pop(node)
+ else:
+ # Call callbacks
+ for callback in callbacks:
+ try:
+ callback()
+ except Exception as error:
+ log.error(f"error publishing signal to {node} ignored; {error}")
diff --git a/src/textual/widget.py b/src/textual/widget.py
index cbff95e1c3..494e3f19ba 100644
--- a/src/textual/widget.py
+++ b/src/textual/widget.py
@@ -2806,10 +2806,10 @@ def _get_scrollable_region(self, region: Region) -> Region:
scrollbar_size_horizontal = styles.scrollbar_size_horizontal
scrollbar_size_vertical = styles.scrollbar_size_vertical
- show_vertical_scrollbar: bool = (
+ show_vertical_scrollbar: bool = bool(
show_vertical_scrollbar and scrollbar_size_vertical
)
- show_horizontal_scrollbar: bool = (
+ show_horizontal_scrollbar: bool = bool(
show_horizontal_scrollbar and scrollbar_size_horizontal
)
@@ -2843,10 +2843,10 @@ def _arrange_scrollbars(self, region: Region) -> Iterable[tuple[Widget, Region]]
scrollbar_size_horizontal = self.scrollbar_size_horizontal
scrollbar_size_vertical = self.scrollbar_size_vertical
- show_vertical_scrollbar: bool = (
+ show_vertical_scrollbar: bool = bool(
show_vertical_scrollbar and scrollbar_size_vertical
)
- show_horizontal_scrollbar: bool = (
+ show_horizontal_scrollbar: bool = bool(
show_horizontal_scrollbar and scrollbar_size_horizontal
)
diff --git a/src/textual/widgets/_tabbed_content.py b/src/textual/widgets/_tabbed_content.py
index 47fa6865c0..e79fbe1b1d 100644
--- a/src/textual/widgets/_tabbed_content.py
+++ b/src/textual/widgets/_tabbed_content.py
@@ -321,6 +321,11 @@ def __init__(
self._initial = initial
super().__init__(name=name, id=id, classes=classes, disabled=disabled)
+ @property
+ def active_pane(self) -> TabPane | None:
+ """The currently active pane, or `None` if no pane is active."""
+ return self.get_pane(self.active)
+
def validate_active(self, active: str) -> str:
"""It doesn't make sense for `active` to be an empty string.
| diff --git a/tests/snapshot_tests/snapshot_apps/nested_specificity.py b/tests/snapshot_tests/snapshot_apps/nested_specificity.py
index 0705abfabc..da67cc8dcd 100644
--- a/tests/snapshot_tests/snapshot_apps/nested_specificity.py
+++ b/tests/snapshot_tests/snapshot_apps/nested_specificity.py
@@ -31,14 +31,15 @@ class NestedCSS(BaseTester):
DEFAULT_CSS = """
NestedCSS {
width: 1fr;
- height: 1fr;
- background: green 10%;
- border: blank;
+ height: 1fr;
&:focus {
background: green 20%;
border: round green;
}
+
+ background: green 10%;
+ border: blank;
}
"""
diff --git a/tests/test_message_pump.py b/tests/test_message_pump.py
index c6f9d921ca..a25bd53a79 100644
--- a/tests/test_message_pump.py
+++ b/tests/test_message_pump.py
@@ -3,6 +3,7 @@
from textual.app import App, ComposeResult
from textual.errors import DuplicateKeyHandlers
from textual.events import Key
+from textual.message import Message
from textual.widget import Widget
from textual.widgets import Input
@@ -70,6 +71,25 @@ def on_input_changed(self, event: Input.Changed) -> None:
self.input_changed_events.append(event)
+async def test_message_queue_size():
+ """Test message queue size property."""
+ app = App()
+ assert app.message_queue_size == 0
+
+ class TestMessage(Message):
+ pass
+
+ async with app.run_test() as pilot:
+ assert app.message_queue_size == 0
+ app.post_message(TestMessage())
+ assert app.message_queue_size == 1
+ app.post_message(TestMessage())
+ assert app.message_queue_size == 2
+ # A pause will process all the messages
+ await pilot.pause()
+ assert app.message_queue_size == 0
+
+
async def test_prevent() -> None:
app = PreventTestApp()
diff --git a/tests/test_query.py b/tests/test_query.py
index d09599cdf2..a80cf99639 100644
--- a/tests/test_query.py
+++ b/tests/test_query.py
@@ -11,7 +11,7 @@
WrongType,
)
from textual.widget import Widget
-from textual.widgets import Label
+from textual.widgets import Input, Label
def test_query():
@@ -313,3 +313,33 @@ def compose(self):
async with app.run_test() as pilot:
app.query(MyWidget).refresh(repaint=args[0], layout=args[1])
assert refreshes[-1] == args
+
+
+async def test_query_focus_blur():
+ class FocusApp(App):
+ AUTO_FOCUS = None
+
+ def compose(self) -> ComposeResult:
+ yield Input(id="foo")
+ yield Input(id="bar")
+ yield Input(id="baz")
+
+ app = FocusApp()
+ async with app.run_test() as pilot:
+ # Nothing focused
+ assert app.focused is None
+ # Focus first input
+ app.query(Input).focus()
+ await pilot.pause()
+ assert app.focused.id == "foo"
+ # Blur inputs
+ app.query(Input).blur()
+ await pilot.pause()
+ assert app.focused is None
+ # Focus another
+ app.query("#bar").focus()
+ await pilot.pause()
+ assert app.focused.id == "bar"
+ # Focus non existing
+ app.query("#egg").focus()
+ assert app.focused.id == "bar"
diff --git a/tests/test_signal.py b/tests/test_signal.py
new file mode 100644
index 0000000000..7833ae70f4
--- /dev/null
+++ b/tests/test_signal.py
@@ -0,0 +1,75 @@
+import pytest
+
+from textual.app import App, ComposeResult
+from textual.signal import Signal, SignalError
+from textual.widgets import Label
+
+
+async def test_signal():
+ """Test signal subscribe"""
+ called = 0
+
+ class TestLabel(Label):
+ def on_mount(self) -> None:
+ def signal_result():
+ nonlocal called
+ called += 1
+
+ assert isinstance(self.app, TestApp)
+ self.app.test_signal.subscribe(self, signal_result)
+
+ class TestApp(App):
+ BINDINGS = [("space", "signal")]
+
+ def __init__(self) -> None:
+ self.test_signal = Signal(self, "coffee ready")
+ super().__init__()
+
+ def compose(self) -> ComposeResult:
+ yield TestLabel()
+
+ def action_signal(self) -> None:
+ self.test_signal.publish()
+
+ app = TestApp()
+ async with app.run_test() as pilot:
+ # Check default called is 0
+ assert called == 0
+ # Action should publish signal
+ await pilot.press("space")
+ assert called == 1
+ # Check a second time
+ await pilot.press("space")
+ assert called == 2
+ # Removed the owner object
+ await app.query_one(TestLabel).remove()
+ # Check nothing is called
+ await pilot.press("space")
+ assert called == 2
+ # Add a new test label
+ await app.mount(TestLabel())
+ # Check callback again
+ await pilot.press("space")
+ assert called == 3
+ # Unsubscribe
+ app.test_signal.unsubscribe(app.query_one(TestLabel))
+ # Check nothing to update
+ await pilot.press("space")
+ assert called == 3
+
+
+def test_signal_errors():
+ """Check exceptions raised by Signal class."""
+ app = App()
+ test_signal = Signal(app, "test")
+ label = Label()
+ # Check subscribing a non-running widget is an error
+ with pytest.raises(SignalError):
+ test_signal.subscribe(label, lambda: None)
+
+
+def test_repr():
+ """Check the repr doesn't break."""
+ app = App()
+ test_signal = Signal(app, "test")
+ assert isinstance(repr(test_signal), str)
diff --git a/tests/test_tabbed_content.py b/tests/test_tabbed_content.py
index 5b67cc508a..28765e8fc3 100644
--- a/tests/test_tabbed_content.py
+++ b/tests/test_tabbed_content.py
@@ -24,6 +24,7 @@ def compose(self) -> ComposeResult:
tabbed_content = app.query_one(TabbedContent)
# Check first tab
assert tabbed_content.active == "foo"
+ assert tabbed_content.active_pane.id == "foo"
await pilot.pause()
assert app.query_one("#foo-label").region
assert not app.query_one("#bar-label").region
@@ -32,6 +33,7 @@ def compose(self) -> ComposeResult:
# Click second tab
await pilot.click(f"Tab#{ContentTab.add_prefix('bar')}")
assert tabbed_content.active == "bar"
+ assert tabbed_content.active_pane.id == "bar"
await pilot.pause()
assert not app.query_one("#foo-label").region
assert app.query_one("#bar-label").region
@@ -40,6 +42,7 @@ def compose(self) -> ComposeResult:
# Click third tab
await pilot.click(f"Tab#{ContentTab.add_prefix('baz')}")
assert tabbed_content.active == "baz"
+ assert tabbed_content.active_pane.id == "baz"
await pilot.pause()
assert not app.query_one("#foo-label").region
assert not app.query_one("#bar-label").region
| diff --git a/CHANGELOG.md b/CHANGELOG.md
index bc2657ad15..0da115172e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -18,6 +18,9 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
- Added `DOMNode.has_pseudo_classes` https://github.com/Textualize/textual/pull/3970
- Added `Widget.allow_focus` and `Widget.allow_focus_children` https://github.com/Textualize/textual/pull/3989
+- Added `Query.blur` and `Query.focus` https://github.com/Textualize/textual/pull/4012
+- Added `MessagePump.message_queue_size` https://github.com/Textualize/textual/pull/4012
+- Added `TabbedContent.active_pane` https://github.com/Textualize/textual/pull/4012
### Fixed
@@ -27,6 +30,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
- `SelectionList` option IDs are usable as soon as the widget is instantiated https://github.com/Textualize/textual/issues/3903
- Fix issue with `Strip.crop` when crop window start aligned with strip end https://github.com/Textualize/textual/pull/3998
- Fixed Strip.crop_extend https://github.com/Textualize/textual/pull/4011
+- Fixed declaration after nested rule set causing a parse error https://github.com/Textualize/textual/pull/4012
- ID and class validation was too lenient https://github.com/Textualize/textual/issues/3954
- Fixed a crash if the `TextArea` language was set but tree-sitter lanuage binaries were not installed https://github.com/Textualize/textual/issues/4045
| [
{
"components": [
{
"doc": "Focus the first matching node that permits focus.\n\nReturns:\n Query for chaining.",
"lines": [
434,
444
],
"name": "DOMQuery.focus",
"signature": "def focus(self) -> DOMQuery[QueryType]:",
"type": "functio... | [
"tests/test_query.py::test_query",
"tests/test_query.py::test_query_classes",
"tests/test_query.py::test_invalid_query",
"tests/test_signal.py::test_signal_errors",
"tests/test_signal.py::test_repr"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Signal
This is mainly for a new Signal class, and also some fixes / new method
- Adds a Signal class, which will be part of a new "pub sub" system.
- Adds Query.blur and Query.focus
- Fixes rule appearing after nested CSS
- Adds a message_queue_size property
- Adds TabbedContent.active_pane property
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in src/textual/css/query.py]
(definition of DOMQuery.focus:)
def focus(self) -> DOMQuery[QueryType]:
"""Focus the first matching node that permits focus.
Returns:
Query for chaining."""
(definition of DOMQuery.blur:)
def blur(self) -> DOMQuery[QueryType]:
"""Blur the first matching node that is focused.
Returns:
Query for chaining."""
[end of new definitions in src/textual/css/query.py]
[start of new definitions in src/textual/message_pump.py]
(definition of MessagePump.message_queue_size:)
def message_queue_size(self) -> int:
"""The current size of the message queue."""
[end of new definitions in src/textual/message_pump.py]
[start of new definitions in src/textual/signal.py]
(definition of SignalError:)
class SignalError(Exception):
"""Base class for a signal."""
(definition of Signal:)
class Signal:
"""A signal that a widget may subscribe to, in order to invoke callbacks when an associated event occurs."""
(definition of Signal.__init__:)
def __init__(self, owner: DOMNode, name: str) -> None:
"""Initialize a signal.
Args:
owner: The owner of this signal.
name: An identifier for debugging purposes."""
(definition of Signal.__rich_repr__:)
def __rich_repr__(self) -> rich.repr.Result:
(definition of Signal.subscribe:)
def subscribe(self, node: DOMNode, callback: IgnoreReturnCallbackType) -> None:
"""Subscribe a node to this signal.
When the signal is published, the callback will be invoked.
Args:
node: Node to subscribe.
callback: A callback function which takes no arguments, and returns anything (return type ignored)."""
(definition of Signal.unsubscribe:)
def unsubscribe(self, node: DOMNode) -> None:
"""Unsubscribe a node from this signal.
Args:
node: Node to unsubscribe,"""
(definition of Signal.publish:)
def publish(self) -> None:
"""Publish the signal (invoke subscribed callbacks)."""
[end of new definitions in src/textual/signal.py]
[start of new definitions in src/textual/widgets/_tabbed_content.py]
(definition of TabbedContent.active_pane:)
def active_pane(self) -> TabPane | None:
"""The currently active pane, or `None` if no pane is active."""
[end of new definitions in src/textual/widgets/_tabbed_content.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 86e93536b991014e0ea4bf993068202b446bb698 | |
joke2k__faker-1973 | 1,973 | joke2k/faker | null | 9371a08fe8f7cfe18296fbf363f33e3978898626 | 2024-01-12T13:09:40Z | diff --git a/faker/providers/bank/uk_UA/__init__.py b/faker/providers/bank/uk_UA/__init__.py
new file mode 100644
index 0000000000..3b53dd45f4
--- /dev/null
+++ b/faker/providers/bank/uk_UA/__init__.py
@@ -0,0 +1,11 @@
+from .. import Provider as BankProvider
+
+
+class Provider(BankProvider):
+ """Implement bank provider for ``uk_UA`` locale.
+ Source for rules for bban format:
+ https://bank.gov.ua/en/iban
+ """
+
+ bban_format = "#" * 27
+ country_code = "UA"
| diff --git a/tests/providers/test_bank.py b/tests/providers/test_bank.py
index 3826b82eb3..fe37cb5a1c 100644
--- a/tests/providers/test_bank.py
+++ b/tests/providers/test_bank.py
@@ -22,6 +22,7 @@
from faker.providers.bank.pt_PT import Provider as PtPtBankProvider
from faker.providers.bank.th_TH import Provider as ThThBankProvider
from faker.providers.bank.tr_TR import Provider as TrTrBankProvider
+from faker.providers.bank.uk_UA import Provider as UkUaBankProvider
def is_valid_iban(iban):
@@ -136,6 +137,21 @@ def test_iban(self, faker, num_samples):
assert re.fullmatch(r"\d{2}\d{24}", iban[2:])
+class TestUkUa:
+ """Test uk_UA bank provider"""
+
+ def test_bban(self, faker, num_samples):
+ for _ in range(num_samples):
+ assert re.fullmatch(r"\d{27}", faker.bban())
+
+ def test_iban(self, faker, num_samples):
+ for _ in range(num_samples):
+ iban = faker.iban()
+ assert is_valid_iban(iban)
+ assert iban[:2] == UkUaBankProvider.country_code
+ assert re.fullmatch(r"\d{2}\d{27}", iban[2:])
+
+
class TestEnGb:
"""Test en_GB bank provider"""
| [
{
"components": [
{
"doc": "Implement bank provider for ``uk_UA`` locale.\nSource for rules for bban format:\nhttps://bank.gov.ua/en/iban",
"lines": [
4,
11
],
"name": "Provider",
"signature": "class Provider(BankProvider):",
"type": "cla... | [
"tests/providers/test_bank.py::TestAzAz::test_bban",
"tests/providers/test_bank.py::TestAzAz::test_iban",
"tests/providers/test_bank.py::TestAzAz::test_bank",
"tests/providers/test_bank.py::TestCsCz::test_bban",
"tests/providers/test_bank.py::TestCsCz::test_iban",
"tests/providers/test_bank.py::TestNoNo::... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
add bank uk_UA
add provider.bank for uk_UA localization
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in faker/providers/bank/uk_UA/__init__.py]
(definition of Provider:)
class Provider(BankProvider):
"""Implement bank provider for ``uk_UA`` locale.
Source for rules for bban format:
https://bank.gov.ua/en/iban"""
[end of new definitions in faker/providers/bank/uk_UA/__init__.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 6edfdbf6ae90b0153309e3bf066aa3b2d16494a7 | ||
conan-io__conan-15447 | 15,447 | conan-io/conan | null | 98d68db7f3a7d6eec4dc463a24839af542fb70cc | 2024-01-12T09:21:41Z | diff --git a/conan/tools/cmake/presets.py b/conan/tools/cmake/presets.py
index 27792af73b9..c6fd71bd851 100644
--- a/conan/tools/cmake/presets.py
+++ b/conan/tools/cmake/presets.py
@@ -14,9 +14,11 @@
def write_cmake_presets(conanfile, toolchain_file, generator, cache_variables,
- user_presets_path=None, preset_prefix=None, buildenv=None, runenv=None):
+ user_presets_path=None, preset_prefix=None, buildenv=None, runenv=None,
+ cmake_executable=None):
preset_path, preset_data = _CMakePresets.generate(conanfile, toolchain_file, generator,
- cache_variables, preset_prefix, buildenv, runenv)
+ cache_variables, preset_prefix, buildenv, runenv,
+ cmake_executable)
_IncludingPresets.generate(conanfile, preset_path, user_presets_path, preset_prefix, preset_data)
@@ -24,7 +26,8 @@ class _CMakePresets:
""" Conan generated main CMakePresets.json inside the generators_folder
"""
@staticmethod
- def generate(conanfile, toolchain_file, generator, cache_variables, preset_prefix, buildenv, runenv):
+ def generate(conanfile, toolchain_file, generator, cache_variables, preset_prefix, buildenv, runenv,
+ cmake_executable):
cache_variables = cache_variables or {}
if platform.system() == "Windows" and generator == "MinGW Makefiles":
if "CMAKE_SH" not in cache_variables:
@@ -61,12 +64,13 @@ def generate(conanfile, toolchain_file, generator, cache_variables, preset_prefi
_CMakePresets._insert_preset(data, "testPresets", test_preset)
configure_preset = _CMakePresets._configure_preset(conanfile, generator, cache_variables,
toolchain_file, multiconfig,
- preset_prefix, buildenv)
+ preset_prefix, buildenv,
+ cmake_executable)
# Conan generated presets should have only 1 configurePreset, no more, overwrite it
data["configurePresets"] = [configure_preset]
else:
data = _CMakePresets._contents(conanfile, toolchain_file, cache_variables, generator,
- preset_prefix, buildenv, runenv)
+ preset_prefix, buildenv, runenv, cmake_executable)
preset_content = json.dumps(data, indent=4)
save(preset_path, preset_content)
@@ -85,14 +89,14 @@ def _insert_preset(data, preset_type, preset):
@staticmethod
def _contents(conanfile, toolchain_file, cache_variables, generator, preset_prefix, buildenv,
- runenv):
+ runenv, cmake_executable):
"""
Contents for the CMakePresets.json
It uses schema version 3 unless it is forced to 2
"""
multiconfig = is_multi_configuration(generator)
conf = _CMakePresets._configure_preset(conanfile, generator, cache_variables, toolchain_file,
- multiconfig, preset_prefix, buildenv)
+ multiconfig, preset_prefix, buildenv, cmake_executable)
build = _CMakePresets._build_preset_fields(conanfile, multiconfig, preset_prefix)
test = _CMakePresets._test_preset_fields(conanfile, multiconfig, preset_prefix, runenv)
ret = {"version": 3,
@@ -106,7 +110,7 @@ def _contents(conanfile, toolchain_file, cache_variables, generator, preset_pref
@staticmethod
def _configure_preset(conanfile, generator, cache_variables, toolchain_file, multiconfig,
- preset_prefix, buildenv):
+ preset_prefix, buildenv, cmake_executable):
build_type = conanfile.settings.get_safe("build_type")
name = _CMakePresets._configure_preset_name(conanfile, multiconfig)
if preset_prefix:
@@ -124,6 +128,9 @@ def _configure_preset(conanfile, generator, cache_variables, toolchain_file, mul
if buildenv:
ret["environment"] = buildenv
+ if cmake_executable:
+ ret["cmakeExecutable"] = cmake_executable
+
if is_msvc(conanfile):
# We can force the generator Visual even if it is Ninja, to define the toolset
toolset = GenericSystemBlock.get_toolset("Visual", conanfile)
diff --git a/conan/tools/cmake/toolchain/toolchain.py b/conan/tools/cmake/toolchain/toolchain.py
index f60259c4525..52e446a6f21 100644
--- a/conan/tools/cmake/toolchain/toolchain.py
+++ b/conan/tools/cmake/toolchain/toolchain.py
@@ -184,6 +184,18 @@ def content(self):
content = relativize_generated_file(content, self._conanfile, "${CMAKE_CURRENT_LIST_DIR}")
return content
+ def _find_cmake_exe(self):
+ for req in self._conanfile.dependencies.direct_build.values():
+ if req.ref.name == "cmake":
+ for bindir in req.cpp_info.bindirs:
+ cmake_path = os.path.join(bindir, "cmake")
+ cmake_exe_path = os.path.join(bindir, "cmake.exe")
+
+ if os.path.exists(cmake_path):
+ return cmake_path
+ elif os.path.exists(cmake_exe_path):
+ return cmake_exe_path
+
def generate(self):
"""
This method will save the generated files to the conanfile.generators_folder
@@ -215,7 +227,7 @@ def generate(self):
else:
cache_variables[name] = value
- buildenv, runenv = None, None
+ buildenv, runenv, cmake_executable = None, None, None
if self._conanfile.conf.get("tools.cmake.cmaketoolchain:presets_environment", default="",
check_type=str, choices=("disabled", "")) != "disabled":
@@ -228,8 +240,11 @@ def generate(self):
runenv = {name: value for name, value in
run_env.items(variable_reference="$penv{{{name}}}")}
+ cmake_executable = self._conanfile.conf.get("tools.cmake:cmake_program", None) or self._find_cmake_exe()
+
write_cmake_presets(self._conanfile, toolchain, self.generator, cache_variables,
- self.user_presets_path, self.presets_prefix, buildenv, runenv)
+ self.user_presets_path, self.presets_prefix, buildenv, runenv,
+ cmake_executable)
def _get_generator(self, recipe_generator):
# Returns the name of the generator to be used by CMake
| diff --git a/conans/test/integration/toolchains/cmake/test_cmaketoolchain.py b/conans/test/integration/toolchains/cmake/test_cmaketoolchain.py
index 604aaf01d75..3940befd7c9 100644
--- a/conans/test/integration/toolchains/cmake/test_cmaketoolchain.py
+++ b/conans/test/integration/toolchains/cmake/test_cmaketoolchain.py
@@ -1222,3 +1222,60 @@ def test_presets_njobs():
c.run('install . -g CMakeToolchain -c tools.build:jobs=42')
presets = json.loads(c.load("CMakePresets.json"))
assert presets["buildPresets"][0]["jobs"] == 42
+
+
+def test_add_cmakeexe_to_presets():
+ c = TestClient()
+
+ tool = textwrap.dedent(r"""
+ import os
+ from conan import ConanFile
+ from conan.tools.files import chdir, save
+ class Tool(ConanFile):
+ name = "cmake"
+ version = "3.27"
+ settings = "os", "compiler", "arch", "build_type"
+ def package(self):
+ with chdir(self, self.package_folder):
+ save(self, "bin/{}", "")
+ """)
+
+ profile = textwrap.dedent("""
+ include(default)
+ [platform_tool_requires]
+ cmake/3.27
+ """)
+
+ consumer = textwrap.dedent("""
+ [tool_requires]
+ cmake/3.27
+ [layout]
+ cmake_layout
+ """)
+
+ cmake_exe = "cmake.exe" if platform.system() == "Windows" else "cmake"
+
+ c.save({"tool.py": tool.format(cmake_exe),
+ "conanfile.txt": consumer,
+ "myprofile": profile})
+ c.run("create tool.py")
+ c.run("install . -g CMakeToolchain -g CMakeDeps")
+
+ presets_path = os.path.join("build", "Release", "generators", "CMakePresets.json") \
+ if platform.system() != "Windows" else os.path.join("build", "generators", "CMakePresets.json")
+ presets = json.loads(c.load(presets_path))
+
+ assert cmake_exe == os.path.basename(presets["configurePresets"][0].get("cmakeExecutable"))
+
+ # if we set "tools.cmake:cmake_program" that will have preference
+ c.run("install . -g CMakeToolchain -g CMakeDeps -c tools.cmake:cmake_program='/other/path/cmake'")
+ presets = json.loads(c.load(presets_path))
+
+ assert '/other/path/cmake' == presets["configurePresets"][0].get("cmakeExecutable")
+
+ # if we have a platform_tool_requires it will not be set because it is filtered before
+ # so it will not be in direct_build dependencies
+ c.run("install . -g CMakeToolchain -g CMakeDeps -pr:h=./myprofile")
+
+ presets = json.loads(c.load(presets_path))
+ assert presets["configurePresets"][0].get("cmakeExecutable") is None
| [
{
"components": [
{
"doc": "",
"lines": [
187,
197
],
"name": "CMakeToolchain._find_cmake_exe",
"signature": "def _find_cmake_exe(self):",
"type": "function"
}
],
"file": "conan/tools/cmake/toolchain/toolchain.py"
}
] | [
"conans/test/integration/toolchains/cmake/test_cmaketoolchain.py::test_add_cmakeexe_to_presets"
] | [
"conans/test/integration/toolchains/cmake/test_cmaketoolchain.py::test_cross_build",
"conans/test/integration/toolchains/cmake/test_cmaketoolchain.py::test_cross_build_linux_to_macos",
"conans/test/integration/toolchains/cmake/test_cmaketoolchain.py::test_cross_build_user_toolchain",
"conans/test/integration/... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Consider adding cmakeExecutable to presets
Changelog: Feature: Add cmakeExecutable to configure preset.
Docs: https://github.com/conan-io/docs/pull/3548
Closes: https://github.com/conan-io/conan/issues/15427
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conan/tools/cmake/toolchain/toolchain.py]
(definition of CMakeToolchain._find_cmake_exe:)
def _find_cmake_exe(self):
[end of new definitions in conan/tools/cmake/toolchain/toolchain.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
huggingface__huggingface_hub-1967 | 1,967 | huggingface/huggingface_hub | null | 6b202f198a94609d4a907fd26c65307d275a2d3d | 2024-01-11T14:02:26Z | diff --git a/src/huggingface_hub/__init__.py b/src/huggingface_hub/__init__.py
index 3041e6110f..f4aac31a80 100644
--- a/src/huggingface_hub/__init__.py
+++ b/src/huggingface_hub/__init__.py
@@ -245,6 +245,7 @@
"HfFileSystem",
"HfFileSystemFile",
"HfFileSystemResolvedPath",
+ "HfFileSystemStreamFile",
],
"hub_mixin": [
"ModelHubMixin",
@@ -592,6 +593,7 @@ def __dir__():
HfFileSystem, # noqa: F401
HfFileSystemFile, # noqa: F401
HfFileSystemResolvedPath, # noqa: F401
+ HfFileSystemStreamFile, # noqa: F401
)
from .hub_mixin import (
ModelHubMixin, # noqa: F401
diff --git a/src/huggingface_hub/hf_file_system.py b/src/huggingface_hub/hf_file_system.py
index 2407daf9dd..a78ab0fd80 100644
--- a/src/huggingface_hub/hf_file_system.py
+++ b/src/huggingface_hub/hf_file_system.py
@@ -10,6 +10,7 @@
from urllib.parse import quote, unquote
import fsspec
+from requests import Response
from ._commit_api import CommitOperationCopy, CommitOperationDelete
from .constants import DEFAULT_REVISION, ENDPOINT, REPO_TYPE_MODEL, REPO_TYPES_MAPPING, REPO_TYPES_URL_PREFIXES
@@ -216,11 +217,15 @@ def _open(
path: str,
mode: str = "rb",
revision: Optional[str] = None,
+ block_size: Optional[int] = None,
**kwargs,
) -> "HfFileSystemFile":
if "a" in mode:
raise NotImplementedError("Appending to remote files is not yet supported.")
- return HfFileSystemFile(self, path, mode=mode, revision=revision, **kwargs)
+ if block_size == 0:
+ return HfFileSystemStreamFile(self, path, mode=mode, revision=revision, block_size=block_size, **kwargs)
+ else:
+ return HfFileSystemFile(self, path, mode=mode, revision=revision, block_size=block_size, **kwargs)
def _rm(self, path: str, revision: Optional[str] = None, **kwargs) -> None:
resolved_path = self.resolve_path(path, revision=revision)
@@ -649,6 +654,102 @@ def _upload_chunk(self, final: bool = False) -> None:
)
+class HfFileSystemStreamFile(fsspec.spec.AbstractBufferedFile):
+ def __init__(
+ self,
+ fs: HfFileSystem,
+ path: str,
+ mode: str = "rb",
+ revision: Optional[str] = None,
+ block_size: int = 0,
+ cache_type: str = "none",
+ **kwargs,
+ ):
+ if block_size != 0:
+ raise ValueError(f"HfFileSystemStreamFile only supports block_size=0 but got {block_size}")
+ if cache_type != "none":
+ raise ValueError(f"HfFileSystemStreamFile only supports cache_type='none' but got {cache_type}")
+ if "w" in mode:
+ raise ValueError(f"HfFileSystemStreamFile only supports reading but got mode='{mode}'")
+ try:
+ self.resolved_path = fs.resolve_path(path, revision=revision)
+ except FileNotFoundError as e:
+ if "w" in kwargs.get("mode", ""):
+ raise FileNotFoundError(
+ f"{e}.\nMake sure the repository and revision exist before writing data."
+ ) from e
+ # avoid an unecessary .info() call to instantiate .details
+ self.details = {"name": self.resolved_path.unresolve(), "size": None}
+ super().__init__(
+ fs, self.resolved_path.unresolve(), mode=mode, block_size=block_size, cache_type=cache_type, **kwargs
+ )
+ self.response: Optional[Response] = None
+ self.fs: HfFileSystem
+
+ def seek(self, loc: int, whence: int = 0):
+ if loc == 0 and whence == 1:
+ return
+ if loc == self.loc and whence == 0:
+ return
+ raise ValueError("Cannot seek streaming HF file")
+
+ def read(self, length: int = -1):
+ read_args = (length,) if length >= 0 else ()
+ if self.response is None or self.response.raw.isclosed():
+ url = hf_hub_url(
+ repo_id=self.resolved_path.repo_id,
+ revision=self.resolved_path.revision,
+ filename=self.resolved_path.path_in_repo,
+ repo_type=self.resolved_path.repo_type,
+ endpoint=self.fs.endpoint,
+ )
+ self.response = http_backoff(
+ "GET",
+ url,
+ headers=self.fs._api._build_hf_headers(),
+ retry_on_status_codes=(502, 503, 504),
+ stream=True,
+ )
+ hf_raise_for_status(self.response)
+ try:
+ out = self.response.raw.read(*read_args)
+ except Exception:
+ self.response.close()
+
+ # Retry by recreating the connection
+ url = hf_hub_url(
+ repo_id=self.resolved_path.repo_id,
+ revision=self.resolved_path.revision,
+ filename=self.resolved_path.path_in_repo,
+ repo_type=self.resolved_path.repo_type,
+ endpoint=self.fs.endpoint,
+ )
+ self.response = http_backoff(
+ "GET",
+ url,
+ headers={"Range": "bytes=%d-" % self.loc, **self.fs._api._build_hf_headers()},
+ retry_on_status_codes=(502, 503, 504),
+ stream=True,
+ )
+ hf_raise_for_status(self.response)
+ try:
+ out = self.response.raw.read(*read_args)
+ except Exception:
+ self.response.close()
+ raise
+ self.loc += len(out)
+ return out
+
+ def __del__(self):
+ if not hasattr(self, "resolved_path"):
+ # Means that the constructor failed. Nothing to do.
+ return
+ return super().__del__()
+
+ def __reduce__(self):
+ return reopen, (self.fs, self.path, self.mode, self.blocksize, self.cache.name)
+
+
def safe_revision(revision: str) -> str:
return revision if SPECIAL_REFS_REVISION_REGEX.match(revision) else safe_quote(revision)
@@ -666,3 +767,7 @@ def _raise_file_not_found(path: str, err: Optional[Exception]) -> NoReturn:
elif isinstance(err, HFValidationError):
msg = f"{path} (invalid repository id)"
raise FileNotFoundError(msg) from err
+
+
+def reopen(fs: HfFileSystem, path: str, mode: str, block_size: int, cache_type: str):
+ return fs.open(path, mode=mode, block_size=block_size, cache_type=cache_type)
| diff --git a/tests/test_hf_file_system.py b/tests/test_hf_file_system.py
index 00e389a24b..02cf913515 100644
--- a/tests/test_hf_file_system.py
+++ b/tests/test_hf_file_system.py
@@ -1,4 +1,5 @@
import datetime
+import io
import unittest
from typing import Optional
from unittest.mock import patch
@@ -6,7 +7,7 @@
import fsspec
import pytest
-from huggingface_hub.hf_file_system import HfFileSystem
+from huggingface_hub.hf_file_system import HfFileSystem, HfFileSystemFile, HfFileSystemStreamFile
from huggingface_hub.utils import RepositoryNotFoundError, RevisionNotFoundError
from .testing_constants import ENDPOINT_STAGING, TOKEN
@@ -152,8 +153,24 @@ def test_remove_directory(self):
def test_read_file(self):
with self.hffs.open(self.hf_path + "/data/text_data.txt", "r") as f:
+ self.assertIsInstance(f, io.TextIOWrapper)
+ self.assertIsInstance(f.buffer, HfFileSystemFile)
self.assertEqual(f.read(), "dummy text data")
+ def test_stream_file(self):
+ with self.hffs.open(self.hf_path + "/data/binary_data.bin", block_size=0) as f:
+ self.assertIsInstance(f, HfFileSystemStreamFile)
+ self.assertEqual(f.read(), b"dummy binary data")
+
+ def test_stream_file_retry(self):
+ with self.hffs.open(self.hf_path + "/data/binary_data.bin", block_size=0) as f:
+ self.assertIsInstance(f, HfFileSystemStreamFile)
+ self.assertEqual(f.read(6), b"dummy ")
+ # Simulate that streaming fails mid-way
+ f.response.raw.read = None
+ self.assertEqual(f.read(6), b"binary")
+ self.assertIsNotNone(f.response.raw.read) # a new connection has been created
+
def test_read_file_with_revision(self):
with self.hffs.open(self.hf_path + "/data/binary_data_for_pr.bin", "rb", revision="refs/pr/1") as f:
self.assertEqual(f.read(), b"dummy binary data on pr")
| [
{
"components": [
{
"doc": "",
"lines": [
657,
750
],
"name": "HfFileSystemStreamFile",
"signature": "class HfFileSystemStreamFile(fsspec.spec.AbstractBufferedFile):",
"type": "class"
},
{
"doc": "",
"lines": [... | [
"tests/test_hf_file_system.py::HfFileSystemTests::test_copy_file",
"tests/test_hf_file_system.py::HfFileSystemTests::test_file_type",
"tests/test_hf_file_system.py::HfFileSystemTests::test_find_data_file_no_revision",
"tests/test_hf_file_system.py::HfFileSystemTests::test_find_root_directory_no_revision",
"... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add HfFileSystemStreamFile
This allows faster file streaming. This is useful for streaming WebDatasets for example
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in src/huggingface_hub/hf_file_system.py]
(definition of HfFileSystemStreamFile:)
class HfFileSystemStreamFile(fsspec.spec.AbstractBufferedFile):
(definition of HfFileSystemStreamFile.__init__:)
def __init__( self, fs: HfFileSystem, path: str, mode: str = "rb", revision: Optional[str] = None, block_size: int = 0, cache_type: str = "none", **kwargs, ):
(definition of HfFileSystemStreamFile.seek:)
def seek(self, loc: int, whence: int = 0):
(definition of HfFileSystemStreamFile.read:)
def read(self, length: int = -1):
(definition of HfFileSystemStreamFile.__del__:)
def __del__(self):
(definition of HfFileSystemStreamFile.__reduce__:)
def __reduce__(self):
(definition of reopen:)
def reopen(fs: HfFileSystem, path: str, mode: str, block_size: int, cache_type: str):
[end of new definitions in src/huggingface_hub/hf_file_system.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4058e1f97ebe256b2f3006d4bc31be275c66df6b | ||
joke2k__faker-1971 | 1,971 | joke2k/faker | null | 40ed6691b3c0feb230bbdfeb0a47f066f51e7a56 | 2024-01-10T07:38:34Z | diff --git a/faker/providers/currency/fa_IR/__init__.py b/faker/providers/currency/fa_IR/__init__.py
new file mode 100644
index 0000000000..747313be32
--- /dev/null
+++ b/faker/providers/currency/fa_IR/__init__.py
@@ -0,0 +1,8 @@
+from .. import Provider as CurrencyProvider
+
+
+class Provider(CurrencyProvider):
+ price_formats = ["###,###,000","#,###,000,000", "%,###,###,###,###", "%,###,###,###,000,000"]
+
+ def pricetag(self) -> str:
+ return self.numerify(self.random_element(self.price_formats)) + "\uFDFC"
| diff --git a/tests/providers/test_currency.py b/tests/providers/test_currency.py
index 751117e7e0..862a508141 100644
--- a/tests/providers/test_currency.py
+++ b/tests/providers/test_currency.py
@@ -247,6 +247,23 @@ def test_pricetag(self, faker, num_samples):
assert isinstance(pricetag, str)
+class TestFaIr:
+ """Test fa_IR currency provider"""
+
+ num_samples = 100
+
+ @classmethod
+ def setup_class(cls):
+ from faker.providers.currency.fa_IR import Provider as FaIrCurrencyProvider
+
+ cls.provider = FaIrCurrencyProvider
+
+ def test_pricetag(self, faker, num_samples):
+ for _ in range(num_samples):
+ pricetag = faker.pricetag()
+ assert isinstance(pricetag, str)
+
+
class TestFrCa:
"""Test fr_CA currency provider"""
| [
{
"components": [
{
"doc": "",
"lines": [
4,
8
],
"name": "Provider",
"signature": "class Provider(CurrencyProvider):",
"type": "class"
},
{
"doc": "",
"lines": [
7,
8
],
"na... | [
"tests/providers/test_currency.py::TestFaIr::test_pricetag"
] | [
"tests/providers/test_currency.py::TestCurrencyProvider::test_currency",
"tests/providers/test_currency.py::TestCurrencyProvider::test_currency_code",
"tests/providers/test_currency.py::TestCurrencyProvider::test_currency_name",
"tests/providers/test_currency.py::TestCurrencyProvider::test_currency_code_has_s... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add fa_IR localization for currency provider
### Add ``fa_IR`` localization in ``currency`` provider
``IRR`` is Iran main currency and looks something like ``1,000,000,000 IRR``,
This PR will add better localize faker for Iran currency.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in faker/providers/currency/fa_IR/__init__.py]
(definition of Provider:)
class Provider(CurrencyProvider):
(definition of Provider.pricetag:)
def pricetag(self) -> str:
[end of new definitions in faker/providers/currency/fa_IR/__init__.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 6edfdbf6ae90b0153309e3bf066aa3b2d16494a7 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.