language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | django__django | tests/prefetch_related/tests.py | {
"start": 62341,
"end": 68031
} | class ____(TestCase):
databases = {"default", "other"}
def test_using_is_honored_m2m(self):
B = Book.objects.using("other")
A = Author.objects.using("other")
book1 = B.create(title="Poems")
book2 = B.create(title="Jane Eyre")
book3 = B.create(title="Wuthering Heights")
book4 = B.create(title="Sense and Sensibility")
author1 = A.create(name="Charlotte", first_book=book1)
author2 = A.create(name="Anne", first_book=book1)
author3 = A.create(name="Emily", first_book=book1)
author4 = A.create(name="Jane", first_book=book4)
book1.authors.add(author1, author2, author3)
book2.authors.add(author1)
book3.authors.add(author3)
book4.authors.add(author4)
# Forward
qs1 = B.prefetch_related("authors")
with self.assertNumQueries(2, using="other"):
books = "".join(
"%s (%s)\n"
% (book.title, ", ".join(a.name for a in book.authors.all()))
for book in qs1
)
self.assertEqual(
books,
"Poems (Charlotte, Anne, Emily)\n"
"Jane Eyre (Charlotte)\n"
"Wuthering Heights (Emily)\n"
"Sense and Sensibility (Jane)\n",
)
# Reverse
qs2 = A.prefetch_related("books")
with self.assertNumQueries(2, using="other"):
authors = "".join(
"%s: %s\n"
% (author.name, ", ".join(b.title for b in author.books.all()))
for author in qs2
)
self.assertEqual(
authors,
"Charlotte: Poems, Jane Eyre\n"
"Anne: Poems\n"
"Emily: Poems, Wuthering Heights\n"
"Jane: Sense and Sensibility\n",
)
def test_using_is_honored_fkey(self):
B = Book.objects.using("other")
A = Author.objects.using("other")
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Forward
with self.assertNumQueries(2, using="other"):
books = ", ".join(
a.first_book.title for a in A.prefetch_related("first_book")
)
self.assertEqual("Poems, Sense and Sensibility", books)
# Reverse
with self.assertNumQueries(2, using="other"):
books = "".join(
"%s (%s)\n"
% (b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related("first_time_authors")
)
self.assertEqual(
books,
"Poems (Charlotte Bronte)\nSense and Sensibility (Jane Austen)\n",
)
def test_using_is_honored_inheritance(self):
B = BookWithYear.objects.using("other")
A = AuthorWithAge.objects.using("other")
book1 = B.create(title="Poems", published_year=2010)
B.create(title="More poems", published_year=2011)
A.create(name="Jane", first_book=book1, age=50)
A.create(name="Tom", first_book=book1, age=49)
# parent link
with self.assertNumQueries(2, using="other"):
authors = ", ".join(a.author.name for a in A.prefetch_related("author"))
self.assertEqual(authors, "Jane, Tom")
# child link
with self.assertNumQueries(2, using="other"):
ages = ", ".join(
str(a.authorwithage.age) for a in A.prefetch_related("authorwithage")
)
self.assertEqual(ages, "50, 49")
def test_using_is_honored_custom_qs(self):
B = Book.objects.using("other")
A = Author.objects.using("other")
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Implicit hinting
with self.assertNumQueries(2, using="other"):
prefetch = Prefetch("first_time_authors", queryset=Author.objects.all())
books = "".join(
"%s (%s)\n"
% (b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch)
)
self.assertEqual(
books,
"Poems (Charlotte Bronte)\nSense and Sensibility (Jane Austen)\n",
)
# Explicit using on the same db.
with self.assertNumQueries(2, using="other"):
prefetch = Prefetch(
"first_time_authors", queryset=Author.objects.using("other")
)
books = "".join(
"%s (%s)\n"
% (b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch)
)
self.assertEqual(
books,
"Poems (Charlotte Bronte)\nSense and Sensibility (Jane Austen)\n",
)
# Explicit using on a different db.
with (
self.assertNumQueries(1, using="default"),
self.assertNumQueries(1, using="other"),
):
prefetch = Prefetch(
"first_time_authors", queryset=Author.objects.using("default")
)
books = "".join(
"%s (%s)\n"
% (b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch)
)
self.assertEqual(books, "Poems ()\n" "Sense and Sensibility ()\n")
| MultiDbTests |
python | allegroai__clearml | clearml/automation/optuna/optuna.py | {
"start": 4760,
"end": 12261
} | class ____(SearchStrategy):
def __init__(
self,
base_task_id: str,
hyper_parameters: Sequence[Parameter],
objective_metric: Objective,
execution_queue: str,
num_concurrent_workers: int,
max_iteration_per_job: Optional[int],
total_max_jobs: Optional[int],
pool_period_min: float = 2.0,
min_iteration_per_job: Optional[int] = None,
time_limit_per_job: Optional[float] = None,
compute_time_limit: Optional[float] = None,
optuna_sampler: Optional[Any] = None,
optuna_pruner: Optional[Any] = None,
continue_previous_study: Optional[optuna.Study] = None,
**optuna_kwargs: Any
) -> None:
"""
Initialize an Optuna search strategy optimizer
Optuna performs robust and efficient hyperparameter optimization at scale by combining.
Specific hyperparameter pruning strategy can be selected via `sampler` and `pruner` arguments
:param str base_task_id: Task ID (str)
:param list hyper_parameters: list of Parameter objects to optimize over
:param Objective objective_metric: Objective metric to maximize / minimize
:param str execution_queue: execution queue to use for launching Tasks (experiments).
:param int num_concurrent_workers: Limit number of concurrent running Tasks (machines)
:param int max_iteration_per_job: number of iteration per job
'iterations' are the reported iterations for the specified objective,
not the maximum reported iteration of the Task.
:param int total_max_jobs: total maximum job for the optimization process.
Must be provided in order to calculate the total budget for the optimization process.
The total budget is measured by "iterations" (see above)
and will be set to `max_iteration_per_job * total_max_jobs`
This means more than total_max_jobs could be created, as long as the cumulative iterations
(summed over all created jobs) will not exceed `max_iteration_per_job * total_max_jobs`
:param float pool_period_min: time in minutes between two consecutive pools
:param int min_iteration_per_job: The minimum number of iterations (of the Objective metric) per single job,
before early stopping the Job. (Optional)
:param float time_limit_per_job: Optional, maximum execution time per single job in minutes,
when time limit is exceeded job is aborted
:param float compute_time_limit: The maximum compute time in minutes. When time limit is exceeded,
all jobs aborted. (Optional)
:param optuna_kwargs: arguments passed directly to the Optuna object
"""
super(OptimizerOptuna, self).__init__(
base_task_id=base_task_id,
hyper_parameters=hyper_parameters,
objective_metric=objective_metric,
execution_queue=execution_queue,
num_concurrent_workers=num_concurrent_workers,
pool_period_min=pool_period_min,
time_limit_per_job=time_limit_per_job,
compute_time_limit=compute_time_limit,
max_iteration_per_job=max_iteration_per_job,
min_iteration_per_job=min_iteration_per_job,
total_max_jobs=total_max_jobs,
)
self._optuna_sampler = optuna_sampler
self._optuna_pruner = optuna_pruner
verified_optuna_kwargs = []
self._optuna_kwargs = dict((k, v) for k, v in optuna_kwargs.items() if k in verified_optuna_kwargs)
self._param_iterator = None
self._objective = None
self._study = continue_previous_study if continue_previous_study else None
self.parameter_override_history = []
def start(self) -> None:
"""
Start the Optimizer controller function loop()
If the calling process is stopped, the controller will stop as well.
.. important::
This function returns only after optimization is completed or :meth:`stop` was called.
"""
if self._objective_metric.len != 1:
self._study = optuna.create_study(
directions=[
"minimize" if sign_ < 0 else "maximize" for sign_ in self._objective_metric.get_objective_sign()
],
load_if_exists=False,
sampler=self._optuna_sampler,
pruner=self._optuna_pruner,
study_name=self._optimizer_task.id if self._optimizer_task else None,
)
else:
self._study = optuna.create_study(
direction="minimize" if self._objective_metric.get_objective_sign()[0] < 0 else "maximize",
load_if_exists=False,
sampler=self._optuna_sampler,
pruner=self._optuna_pruner,
study_name=self._optimizer_task.id if self._optimizer_task else None,
)
config_space = self._convert_hyper_parameters_to_optuna()
self._objective = OptunaObjective(
base_task_id=self._base_task_id,
queue_name=self._execution_queue,
optimizer=self,
max_iteration_per_job=self.max_iteration_per_job,
min_iteration_per_job=self.min_iteration_per_job,
sleep_interval=int(self.pool_period_minutes * 60),
config_space=config_space,
)
self._study.optimize(
self._objective.objective,
n_trials=self.total_max_jobs,
n_jobs=self._num_concurrent_workers,
)
def stop(self) -> None:
"""
Stop the current running optimization loop,
Called from a different thread than the :meth:`start`.
"""
if self._study:
try:
self._study.stop()
except Exception as ex:
print(ex)
self._stop_event.set()
def _convert_hyper_parameters_to_optuna(self) -> dict:
cs = {}
for p in self._hyper_parameters:
if isinstance(p, LogUniformParameterRange):
hp_type = "suggest_float"
hp_params = dict(
low=p.base**p.min_value,
high=p.base**p.max_value,
log=True,
step=None,
)
elif isinstance(p, UniformParameterRange):
if p.include_max and p.step_size:
hp_type = "suggest_discrete_uniform"
hp_params = dict(low=p.min_value, high=p.max_value, q=p.step_size)
else:
hp_type = "suggest_float"
hp_params = dict(low=p.min_value, high=p.max_value, log=False, step=p.step_size)
elif isinstance(p, UniformIntegerParameterRange):
hp_type = "suggest_int"
hp_params = dict(
low=p.min_value,
high=p.max_value if p.include_max else p.max_value - p.step_size,
log=False,
step=p.step_size,
)
elif isinstance(p, DiscreteParameterRange):
hp_type = "suggest_categorical"
hp_params = dict(choices=p.values)
else:
raise ValueError("HyperParameter type {} not supported yet with OptimizerBOHB".format(type(p)))
cs[p.name] = (hp_type, hp_params)
return cs
| OptimizerOptuna |
python | allegroai__clearml | clearml/backend_api/services/v2_13/models.py | {
"start": 110344,
"end": 113198
} | class ____(Request):
"""
Set the model ready flag to True. If the model is an output model of a task then try to publish the task.
:param model: Model id
:type model: str
:param force_publish_task: Publish the associated task (if exists) even if it
is not in the 'stopped' state. Optional, the default value is False.
:type force_publish_task: bool
:param publish_task: Indicates that the associated task (if exists) should be
published. Optional, the default value is True.
:type publish_task: bool
"""
_service = "models"
_action = "set_ready"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"force_publish_task": {
"description": "Publish the associated task (if exists) even if it is not in the 'stopped' state. Optional, the default value is False.",
"type": "boolean",
},
"model": {"description": "Model id", "type": "string"},
"publish_task": {
"description": "Indicates that the associated task (if exists) should be published. Optional, the default value is True.",
"type": "boolean",
},
},
"required": ["model"],
"type": "object",
}
def __init__(
self, model: str, force_publish_task: Optional[bool] = None, publish_task: Optional[bool] = None, **kwargs: Any
) -> None:
super(SetReadyRequest, self).__init__(**kwargs)
self.model = model
self.force_publish_task = force_publish_task
self.publish_task = publish_task
@schema_property("model")
def model(self) -> str:
return self._property_model
@model.setter
def model(self, value: str) -> None:
if value is None:
self._property_model = None
return
self.assert_isinstance(value, "model", six.string_types)
self._property_model = value
@schema_property("force_publish_task")
def force_publish_task(self) -> Optional[bool]:
return self._property_force_publish_task
@force_publish_task.setter
def force_publish_task(self, value: Optional[bool]) -> None:
if value is None:
self._property_force_publish_task = None
return
self.assert_isinstance(value, "force_publish_task", (bool,))
self._property_force_publish_task = value
@schema_property("publish_task")
def publish_task(self) -> Optional[bool]:
return self._property_publish_task
@publish_task.setter
def publish_task(self, value: Optional[bool]) -> None:
if value is None:
self._property_publish_task = None
return
self.assert_isinstance(value, "publish_task", (bool,))
self._property_publish_task = value
| SetReadyRequest |
python | PyCQA__pylint | tests/functional/g/generic_class_syntax.py | {
"start": 127,
"end": 262
} | class ____(Generic[_T]):
last_update: Optional[int] = None
def __init__(self, data: _T) -> None:
self.data = data
| Entity |
python | astropy__astropy | astropy/cosmology/_src/core.py | {
"start": 2944,
"end": 3550
} | class ____:
"""Descriptor for the `Cosmology.name` field."""
default: str | None = None
def __get__(
self, instance: Union["Cosmology", None], owner: type["Cosmology"] | None
) -> str:
# Called from the class. `dataclass` uses this to create ``__init__``.
if instance is None:
return self.default
# Called from the instance
return instance._name
def __set__(self, instance: "Cosmology", value: str | None) -> None:
object.__setattr__(instance, "_name", (None if value is None else str(value)))
@dataclass_decorator
| _NameField |
python | readthedocs__readthedocs.org | readthedocs/projects/tests/mockers.py | {
"start": 327,
"end": 9868
} | class ____:
def __init__(self, project, version, build, requestsmock):
self.project = project
self.version = version
self.build = build
self.requestsmock = requestsmock
self.patches = {}
self.mocks = {}
def start(self):
self._mock_api()
self._mock_environment()
self._mock_git_repository()
self._mock_artifact_builders()
self._mock_storage()
# Save the mock instances to be able to check them later from inside
# each test case.
for k, p in self.patches.items():
self.mocks[k] = p.start()
def stop(self):
for k, m in self.patches.items():
m.stop()
def add_file_in_repo_checkout(self, path, content):
"""
A quick way to emulate that a file is in the repo.
Does not change git data.
"""
destination = os.path.join(self.project_repository_path, path)
open(destination, "w").write(content)
return destination
def _mock_artifact_builders(self):
# TODO: save the mock instances to be able to check them later
# self.patches['builder.localmedia.move'] = mock.patch(
# 'readthedocs.doc_builder.backends.sphinx.LocalMediaBuilder.move',
# )
# TODO: would be good to patch just `.run` but doing that, we are
# raising a `BuildAppError('No TeX files were found')`
# currently on the `.build` method
#
# self.patches['builder.pdf.run'] = mock.patch(
# 'readthedocs.doc_builder.backends.sphinx.PdfBuilder.run',
# )
# self.patches['builder.pdf.run'] = mock.patch(
# 'readthedocs.doc_builder.backends.sphinx.PdfBuilder.build',
# )
self.patches["builder.pdf.PdfBuilder.pdf_file_name"] = mock.patch(
"readthedocs.doc_builder.backends.sphinx.PdfBuilder.pdf_file_name",
"project-slug.pdf",
)
self.patches["builder.pdf.LatexBuildCommand.run"] = mock.patch(
"readthedocs.doc_builder.backends.sphinx.LatexBuildCommand.run",
return_value=mock.MagicMock(output="stdout", successful=True),
)
# self.patches['builder.pdf.LatexBuildCommand.output'] = mock.patch(
# 'readthedocs.doc_builder.backends.sphinx.LatexBuildCommand.output',
# )
self.patches["builder.pdf.glob"] = mock.patch(
"readthedocs.doc_builder.backends.sphinx.glob",
return_value=["output.file"],
)
self.patches["builder.pdf.os.path.getmtime"] = mock.patch(
"readthedocs.doc_builder.backends.sphinx.os.path.getmtime",
return_value=1,
)
# NOTE: this is a problem, because it does not execute
# `run_command_class` which does other extra stuffs, like appending the
# commands to `environment.commands` which is used later
self.patches["environment.run_command_class"] = mock.patch(
"readthedocs.projects.tasks.builds.LocalBuildEnvironment.run_command_class",
return_value=mock.MagicMock(output="stdout", successful=True),
)
# TODO: find a way to not mock this one and mock `open()` used inside
# it instead to make the mock more granularly and be able to execute
# `get_final_doctype` normally.
self.patches["builder.html.mkdocs.MkdocsHTML.get_final_doctype"] = mock.patch(
"readthedocs.doc_builder.backends.mkdocs.MkdocsHTML.get_final_doctype",
return_value=MKDOCS,
)
# NOTE: another approach would be to make these files are in the tmpdir
# used for testing (see ``apply_fs`` util function)
self.patches["builder.html.sphinx.HtmlBuilder.show_conf"] = mock.patch(
"readthedocs.doc_builder.backends.sphinx.HtmlBuilder.show_conf",
)
def _mock_git_repository(self):
self.patches["git.Backend.run"] = mock.patch(
"readthedocs.vcs_support.backends.git.Backend.run",
return_value=(0, "stdout", "stderr"),
)
# TODO: improve this
self._counter = 0
# The tmp project repository should be at a unique location, but we need
# to hook into test setup and teardown such that we can clean up nicely.
# This probably means that the tmp dir should be handed to the mocker from
# outside.
self.project_repository_path = "/tmp/readthedocs-tests/git-repository"
shutil.rmtree(self.project_repository_path, ignore_errors=True)
os.makedirs(self.project_repository_path)
self.patches["models.Project.checkout_path"] = mock.patch(
"readthedocs.projects.models.Project.checkout_path",
return_value=self.project_repository_path,
)
self.patches["git.Backend.make_clean_working_dir"] = mock.patch(
"readthedocs.vcs_support.backends.git.Backend.make_clean_working_dir",
)
# Make a the backend to return 3 submodules when asked
self.patches["git.Backend.submodules"] = mock.patch(
"readthedocs.vcs_support.backends.git.Backend.submodules",
new_callable=mock.PropertyMock,
return_value=[
"one",
"two",
"three",
],
)
self.patches["git.Backend.has_ssh_key_with_write_access"] = mock.patch(
"readthedocs.vcs_support.backends.git.Backend.has_ssh_key_with_write_access",
return_value=False,
)
def _mock_environment(self):
# NOTE: by mocking `.run` we are not calling `.run_command_class`,
# where some magic happens (passing environment variables, for
# example). So, there are some things we cannot check with this mock
#
# It would be good to find a way to mock `BuildCommand.run` instead
self.patches["environment.run"] = mock.patch(
"readthedocs.projects.tasks.builds.LocalBuildEnvironment.run",
return_value=mock.MagicMock(successful=True),
)
# self.patches['environment.run'] = mock.patch(
# 'readthedocs.doc_builder.environments.BuildCommand.run',
# return_value=mock.MagicMock(successful=True)
# )
def _mock_storage(self):
self.patches["get_build_media_storage_class"] = mock.patch("readthedocs.projects.tasks.storage._get_build_media_storage_class")
def _mock_api(self):
headers = {"Content-Type": "application/json"}
self.requestsmock.get(
f"{settings.SLUMBER_API_HOST}/api/v2/version/{self.version.pk}/",
json=lambda requests, context: VersionAdminSerializer(self.version).data,
headers=headers,
)
self.requestsmock.patch(
f"{settings.SLUMBER_API_HOST}/api/v2/version/{self.version.pk}/",
status_code=201,
)
self.requestsmock.get(
f"{settings.SLUMBER_API_HOST}/api/v2/build/{self.build.pk}/",
json=lambda request, context: {
"id": self.build.pk,
"state": BUILD_STATE_TRIGGERED,
"commit": self.build.commit,
"task_executed_at": self.build.task_executed_at,
},
headers=headers,
)
self.requestsmock.post(
f"{settings.SLUMBER_API_HOST}/api/v2/command/",
status_code=201,
)
self.requestsmock.patch(
f"{settings.SLUMBER_API_HOST}/api/v2/build/{self.build.pk}/",
status_code=201,
)
self.requestsmock.post(
f"{settings.SLUMBER_API_HOST}/api/v2/build/{self.build.pk}/reset/",
status_code=201,
)
self.requestsmock.get(
f"{settings.SLUMBER_API_HOST}/api/v2/build/concurrent/?project__slug={self.project.slug}",
json=lambda request, context: {
"limit_reached": False,
"max_concurrent": settings.RTD_MAX_CONCURRENT_BUILDS,
"concurrent": 0,
},
headers=headers,
)
self.requestsmock.get(
f"{settings.SLUMBER_API_HOST}/api/v2/project/{self.project.pk}/active_versions/",
json=lambda request, context: {
"versions": [
{
"id": self.version.pk,
"slug": self.version.slug,
},
]
},
headers=headers,
)
self.requestsmock.post(
f"{settings.SLUMBER_API_HOST}/api/v2/build/{self.build.pk}/credentials/storage/",
status_code=201,
json={
"s3": {
"access_key_id": "some-access-key",
"secret_access_key": "some-secret-key",
"session_token": "some-session-token",
"bucket_name": "some-bucket-name",
"region_name": "us-east-1",
}
},
headers=headers,
)
self.requestsmock.patch(
f"{settings.SLUMBER_API_HOST}/api/v2/project/{self.project.pk}/",
status_code=201,
)
self.requestsmock.post(
f"{settings.SLUMBER_API_HOST}/api/v2/revoke/",
status_code=204,
)
self.requestsmock.post(
f"{settings.SLUMBER_API_HOST}/api/v2/notifications/",
status_code=204,
)
| BuildEnvironmentMocker |
python | wandb__wandb | wandb/vendor/pygments/lexers/textfmts.py | {
"start": 7000,
"end": 10852
} | class ____(RegexLexer):
"""
Lexer for `Todo.txt <http://todotxt.com/>`_ todo list format.
.. versionadded:: 2.0
"""
name = 'Todotxt'
aliases = ['todotxt']
# *.todotxt is not a standard extension for Todo.txt files; including it
# makes testing easier, and also makes autodetecting file type easier.
filenames = ['todo.txt', '*.todotxt']
mimetypes = ['text/x-todo']
# Aliases mapping standard token types of Todo.txt format concepts
CompleteTaskText = Operator # Chosen to de-emphasize complete tasks
IncompleteTaskText = Text # Incomplete tasks should look like plain text
# Priority should have most emphasis to indicate importance of tasks
Priority = Generic.Heading
# Dates should have next most emphasis because time is important
Date = Generic.Subheading
# Project and context should have equal weight, and be in different colors
Project = Generic.Error
Context = String
# If tag functionality is added, it should have the same weight as Project
# and Context, and a different color. Generic.Traceback would work well.
# Regex patterns for building up rules; dates, priorities, projects, and
# contexts are all atomic
# TODO: Make date regex more ISO 8601 compliant
date_regex = r'\d{4,}-\d{2}-\d{2}'
priority_regex = r'\([A-Z]\)'
project_regex = r'\+\S+'
context_regex = r'@\S+'
# Compound regex expressions
complete_one_date_regex = r'(x )(' + date_regex + r')'
complete_two_date_regex = (complete_one_date_regex + r'( )(' +
date_regex + r')')
priority_date_regex = r'(' + priority_regex + r')( )(' + date_regex + r')'
tokens = {
# Should parse starting at beginning of line; each line is a task
'root': [
# Complete task entry points: two total:
# 1. Complete task with two dates
(complete_two_date_regex, bygroups(CompleteTaskText, Date,
CompleteTaskText, Date),
'complete'),
# 2. Complete task with one date
(complete_one_date_regex, bygroups(CompleteTaskText, Date),
'complete'),
# Incomplete task entry points: six total:
# 1. Priority plus date
(priority_date_regex, bygroups(Priority, IncompleteTaskText, Date),
'incomplete'),
# 2. Priority only
(priority_regex, Priority, 'incomplete'),
# 3. Leading date
(date_regex, Date, 'incomplete'),
# 4. Leading context
(context_regex, Context, 'incomplete'),
# 5. Leading project
(project_regex, Project, 'incomplete'),
# 6. Non-whitespace catch-all
('\S+', IncompleteTaskText, 'incomplete'),
],
# Parse a complete task
'complete': [
# Newline indicates end of task, should return to root
(r'\s*\n', CompleteTaskText, '#pop'),
# Tokenize contexts and projects
(context_regex, Context),
(project_regex, Project),
# Tokenize non-whitespace text
('\S+', CompleteTaskText),
# Tokenize whitespace not containing a newline
('\s+', CompleteTaskText),
],
# Parse an incomplete task
'incomplete': [
# Newline indicates end of task, should return to root
(r'\s*\n', IncompleteTaskText, '#pop'),
# Tokenize contexts and projects
(context_regex, Context),
(project_regex, Project),
# Tokenize non-whitespace text
('\S+', IncompleteTaskText),
# Tokenize whitespace not containing a newline
('\s+', IncompleteTaskText),
],
}
| TodotxtLexer |
python | ray-project__ray | rllib/env/tests/test_multi_agent_env.py | {
"start": 16615,
"end": 30362
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_basic_mock(self):
env = BasicMultiAgent(4)
obs, info = env.reset()
check(obs, {0: 0, 1: 0, 2: 0, 3: 0})
for _ in range(24):
obs, rew, done, truncated, info = env.step({0: 0, 1: 0, 2: 0, 3: 0})
check(obs, {0: 0, 1: 0, 2: 0, 3: 0})
check(rew, {0: 1, 1: 1, 2: 1, 3: 1})
check(done, {0: False, 1: False, 2: False, 3: False, "__all__": False})
obs, rew, done, truncated, info = env.step({0: 0, 1: 0, 2: 0, 3: 0})
check(done, {0: True, 1: True, 2: True, 3: True, "__all__": True})
def test_round_robin_mock(self):
env = RoundRobinMultiAgent(2)
obs, info = env.reset()
check(obs, {0: 0})
for _ in range(5):
obs, rew, done, truncated, info = env.step({0: 0})
check(obs, {1: 0})
check(done["__all__"], False)
obs, rew, done, truncated, info = env.step({1: 0})
check(obs, {0: 0})
check(done["__all__"], False)
obs, rew, done, truncated, info = env.step({0: 0})
check(done["__all__"], True)
def test_no_reset_until_poll(self):
env = MultiAgentEnvWrapper(lambda v: BasicMultiAgent(2), [], 1)
self.assertFalse(env.get_sub_environments()[0].resetted)
env.poll()
self.assertTrue(env.get_sub_environments()[0].resetted)
def test_vectorize_basic(self):
env = MultiAgentEnvWrapper(lambda v: BasicMultiAgent(2), [], 2)
obs, rew, terminateds, truncateds, _, _ = env.poll()
check(obs, {0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
check(rew, {0: {}, 1: {}})
check(terminateds, {0: {"__all__": False}, 1: {"__all__": False}})
check(truncateds, terminateds)
for _ in range(24):
env.send_actions({0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
obs, rew, terminateds, truncateds, _, _ = env.poll()
check(obs, {0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
check(rew, {0: {0: 1, 1: 1}, 1: {0: 1, 1: 1}})
check(
terminateds,
{
0: {0: False, 1: False, "__all__": False},
1: {0: False, 1: False, "__all__": False},
},
)
check(truncateds, terminateds)
env.send_actions({0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
obs, rew, terminateds, truncateds, _, _ = env.poll()
check(
terminateds,
{
0: {0: True, 1: True, "__all__": True},
1: {0: True, 1: True, "__all__": True},
},
)
check(truncateds, terminateds)
# Reset processing
self.assertRaises(
ValueError, lambda: env.send_actions({0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
)
init_obs, init_infos = env.try_reset(0)
check(init_obs, {0: {0: 0, 1: 0}})
check(init_infos, {0: {0: {}, 1: {}}})
init_obs, init_infos = env.try_reset(1)
check(init_obs, {1: {0: 0, 1: 0}})
check(init_infos, {1: {0: {}, 1: {}}})
env.send_actions({0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
obs, rew, terminateds, truncateds, _, _ = env.poll()
check(obs, {0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
check(rew, {0: {0: 1, 1: 1}, 1: {0: 1, 1: 1}})
check(
terminateds,
{
0: {0: False, 1: False, "__all__": False},
1: {0: False, 1: False, "__all__": False},
},
)
check(truncateds, terminateds)
def test_vectorize_round_robin(self):
env = MultiAgentEnvWrapper(lambda v: RoundRobinMultiAgent(2), [], 2)
obs, rew, terminateds, truncateds, _, _ = env.poll()
check(obs, {0: {0: 0}, 1: {0: 0}})
check(rew, {0: {}, 1: {}})
check(truncateds, {0: {"__all__": False}, 1: {"__all__": False}})
env.send_actions({0: {0: 0}, 1: {0: 0}})
obs, rew, terminateds, truncateds, _, _ = env.poll()
check(obs, {0: {1: 0}, 1: {1: 0}})
check(
truncateds,
{0: {"__all__": False, 1: False}, 1: {"__all__": False, 1: False}},
)
env.send_actions({0: {1: 0}, 1: {1: 0}})
obs, rew, terminateds, truncateds, _, _ = env.poll()
check(obs, {0: {0: 0}, 1: {0: 0}})
check(
truncateds,
{0: {"__all__": False, 0: False}, 1: {"__all__": False, 0: False}},
)
def test_multi_agent_sample(self):
def policy_mapping_fn(agent_id, episode, worker, **kwargs):
return "p{}".format(agent_id % 2)
ev = RolloutWorker(
env_creator=lambda _: BasicMultiAgent(5),
default_policy_class=MockPolicy,
config=AlgorithmConfig()
.env_runners(rollout_fragment_length=50, num_env_runners=0)
.multi_agent(
policies={"p0", "p1"},
policy_mapping_fn=policy_mapping_fn,
),
)
batch = ev.sample()
check(batch.count, 50)
check(batch.policy_batches["p0"].count, 150)
check(batch.policy_batches["p1"].count, 100)
check(batch.policy_batches["p0"]["t"].tolist(), list(range(25)) * 6)
def test_multi_agent_sample_sync_remote(self):
ev = RolloutWorker(
env_creator=lambda _: BasicMultiAgent(5),
default_policy_class=MockPolicy,
# This signature will raise a soft-deprecation warning due
# to the new signature we are using (agent_id, episode, **kwargs),
# but should not break this test.
config=AlgorithmConfig()
.env_runners(
rollout_fragment_length=50,
num_env_runners=0,
num_envs_per_env_runner=4,
remote_worker_envs=True,
remote_env_batch_wait_ms=99999999,
)
.multi_agent(
policies={"p0", "p1"},
policy_mapping_fn=lambda agent_id, episode, worker, **kwargs: (
"p{}".format(agent_id % 2)
),
),
)
batch = ev.sample()
check(batch.count, 200)
def test_multi_agent_sample_async_remote(self):
ev = RolloutWorker(
env_creator=lambda _: BasicMultiAgent(5),
default_policy_class=MockPolicy,
config=AlgorithmConfig()
.env_runners(
rollout_fragment_length=50,
num_env_runners=0,
num_envs_per_env_runner=4,
remote_worker_envs=True,
)
.multi_agent(
policies={"p0", "p1"},
policy_mapping_fn=lambda agent_id, episode, worker, **kwargs: (
"p{}".format(agent_id % 2)
),
),
)
batch = ev.sample()
check(batch.count, 200)
def test_sample_from_early_done_env(self):
ev = RolloutWorker(
env_creator=lambda _: EarlyDoneMultiAgent(),
default_policy_class=MockPolicy,
config=AlgorithmConfig()
.env_runners(
rollout_fragment_length=1,
num_env_runners=0,
batch_mode="complete_episodes",
)
.multi_agent(
policies={"p0", "p1"},
policy_mapping_fn=lambda agent_id, episode, worker, **kwargs: (
"p{}".format(agent_id % 2)
),
),
)
# This used to raise an Error due to the EarlyDoneMultiAgent
# terminating at e.g. agent0 w/o publishing the observation for
# agent1 anymore. This limitation is fixed and an env may
# terminate at any time (as well as return rewards for any agent
# at any time, even when that agent doesn't have an obs returned
# in the same call to `step()`).
ma_batch = ev.sample()
# Make sure that agents took the correct (alternating timesteps)
# path. Except for the last timestep, where both agents got
# terminated.
ag0_ts = ma_batch.policy_batches["p0"]["t"]
ag1_ts = ma_batch.policy_batches["p1"]["t"]
self.assertTrue(np.all(np.abs(ag0_ts[:-1] - ag1_ts[:-1]) == 1.0))
self.assertTrue(ag0_ts[-1] == ag1_ts[-1])
def test_multi_agent_with_flex_agents(self):
register_env("flex_agents_multi_agent", lambda _: FlexAgentsMultiAgent())
config = (
PPOConfig()
.api_stack(
enable_env_runner_and_connector_v2=False,
enable_rl_module_and_learner=False,
)
.environment("flex_agents_multi_agent")
.env_runners(num_env_runners=0)
.training(train_batch_size=50, minibatch_size=50, num_epochs=1)
)
algo = config.build()
for i in range(10):
result = algo.train()
print(
"Iteration {}, reward {}, timesteps {}".format(
i,
result[ENV_RUNNER_RESULTS][EPISODE_RETURN_MEAN],
result[NUM_ENV_STEPS_SAMPLED_LIFETIME],
)
)
algo.stop()
def test_multi_agent_with_sometimes_zero_agents_observing(self):
register_env(
"sometimes_zero_agents", lambda _: SometimesZeroAgentsMultiAgent(num=4)
)
config = (
PPOConfig()
.api_stack(
enable_rl_module_and_learner=False,
enable_env_runner_and_connector_v2=False,
)
.environment("sometimes_zero_agents")
.env_runners(num_env_runners=0)
)
algo = config.build()
for i in range(4):
result = algo.train()
print(
"Iteration {}, reward {}, timesteps {}".format(
i,
result[ENV_RUNNER_RESULTS][EPISODE_RETURN_MEAN],
result[NUM_ENV_STEPS_SAMPLED_LIFETIME],
)
)
algo.stop()
def test_multi_agent_sample_round_robin(self):
ev = RolloutWorker(
env_creator=lambda _: RoundRobinMultiAgent(5, increment_obs=True),
default_policy_class=MockPolicy,
config=AlgorithmConfig()
.env_runners(
rollout_fragment_length=50,
num_env_runners=0,
)
.multi_agent(
policies={"p0"},
policy_mapping_fn=lambda agent_id, episode, worker, **kwargs: "p0",
),
)
batch = ev.sample()
check(batch.count, 50)
# since we round robin introduce agents into the env, some of the env
# steps don't count as proper transitions
check(batch.policy_batches["p0"].count, 42)
check(
batch.policy_batches["p0"]["obs"][:10],
one_hot(np.array([0, 1, 2, 3, 4] * 2), 10),
)
check(
batch.policy_batches["p0"]["new_obs"][:10],
one_hot(np.array([1, 2, 3, 4, 5] * 2), 10),
)
check(
batch.policy_batches["p0"]["rewards"].tolist()[:10],
[100, 100, 100, 100, 0] * 2,
)
check(
batch.policy_batches["p0"]["terminateds"].tolist()[:10],
[False, False, False, False, True] * 2,
)
check(
batch.policy_batches["p0"]["truncateds"].tolist()[:10],
[False, False, False, False, True] * 2,
)
check(
batch.policy_batches["p0"]["t"].tolist()[:10],
[4, 9, 14, 19, 24, 5, 10, 15, 20, 25],
)
def test_custom_rnn_state_values(self):
h = {"some": {"here": np.array([1.0, 2.0, 3.0])}}
class StatefulPolicy(RandomPolicy):
def compute_actions(
self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None,
explore=True,
timestep=None,
**kwargs,
):
obs_shape = (len(obs_batch),)
actions = np.zeros(obs_shape, dtype=np.int32)
states = tree.map_structure(
lambda x: np.ones(obs_shape + x.shape) * x, h
)
return actions, [states], {}
def get_initial_state(self):
return [{}] # empty dict
def is_recurrent(self):
return True
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v1"),
default_policy_class=StatefulPolicy,
config=(
AlgorithmConfig().env_runners(
rollout_fragment_length=5,
num_env_runners=0,
)
# Force `state_in_0` to be repeated every ts in the collected batch
# (even though we don't even have a model that would care about this).
.training(model={"max_seq_len": 1})
),
)
batch = ev.sample()
batch = convert_ma_batch_to_sample_batch(batch)
check(batch.count, 5)
check(batch["state_in_0"][0], {})
check(batch["state_out_0"][0], h)
for i in range(1, 5):
check(batch["state_in_0"][i], h)
check(batch["state_out_0"][i], h)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestMultiAgentEnv |
python | walkccc__LeetCode | solutions/3141. Maximum Hamming Distances/3141.py | {
"start": 0,
"end": 464
} | class ____:
def maxHammingDistances(self, nums: list[int], m: int) -> list[int]:
MAX_MASK = 1 << m
# dp[i] := the maximum hamming distance from i to any number in `nums`
dp = [-math.inf] * MAX_MASK
for num in nums:
dp[num] = 0
for bit in range(m):
newDp = [0] * MAX_MASK
for mask in range(MAX_MASK):
newDp[mask] = max(dp[mask], dp[mask ^ (1 << bit)] + 1)
dp = newDp
return [dp[num] for num in nums]
| Solution |
python | getsentry__sentry | tests/snuba/rules/conditions/test_event_frequency.py | {
"start": 51849,
"end": 52095
} | class ____(
ErrorEventMixin,
EventUniqueUserFrequencyConditionTestCase,
):
pass
@freeze_time(
(timezone.now() - timedelta(days=2)).replace(hour=12, minute=40, second=0, microsecond=0)
)
| ErrorIssueUniqueUserFrequencyConditionTestCase |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/log.py | {
"start": 942,
"end": 1321
} | class ____(BaseModel):
"""An individual log message."""
# Not every message has a timestamp.
timestamp: Annotated[
datetime | None,
# Schema level, say this is always a datetime if it exists
WithJsonSchema({"type": "string", "format": "date-time"}),
] = None
event: str
model_config = ConfigDict(extra="allow")
| StructuredLogMessage |
python | tensorflow__tensorflow | tensorflow/python/keras/metrics.py | {
"start": 64085,
"end": 66912
} | class ____(SensitivitySpecificityBase):
"""Computes best precision where recall is >= specified value.
This metric creates four local variables, `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` that are used to compute the
precision at the given recall. The threshold for the given recall
value is computed and used to evaluate the corresponding precision.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is above the threshold predictions,
and computing the fraction of them for which `class_id` is indeed a correct
label.
Args:
recall: A scalar value in range `[0, 1]`.
num_thresholds: (Optional) Defaults to 200. The number of thresholds to
use for matching the given recall.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.PrecisionAtRecall(0.5)
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])
>>> m.result().numpy()
0.5
>>> m.reset_state()
>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8],
... sample_weight=[2, 2, 2, 1, 1])
>>> m.result().numpy()
0.33333333
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.PrecisionAtRecall(recall=0.8)])
```
"""
def __init__(self,
recall,
num_thresholds=200,
class_id=None,
name=None,
dtype=None):
if recall < 0 or recall > 1:
raise ValueError('`recall` must be in the range [0, 1].')
self.recall = recall
self.num_thresholds = num_thresholds
super(PrecisionAtRecall, self).__init__(
value=recall,
num_thresholds=num_thresholds,
class_id=class_id,
name=name,
dtype=dtype)
def result(self):
recalls = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_negatives)
precisions = math_ops.div_no_nan(
self.true_positives, self.true_positives + self.false_positives)
return self._find_max_under_constraint(
recalls, precisions, math_ops.greater_equal)
def get_config(self):
config = {'num_thresholds': self.num_thresholds, 'recall': self.recall}
base_config = super(PrecisionAtRecall, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| PrecisionAtRecall |
python | scipy__scipy | scipy/signal/_filter_design.py | {
"start": 1339,
"end": 204382
} | class ____(UserWarning):
"""Warning about badly conditioned filter coefficients"""
pass
abs = np.absolute
def _is_int_type(x):
"""
Check if input is of a scalar integer type (so ``5`` and ``array(5)`` will
pass, while ``5.0`` and ``array([5])`` will fail.
"""
if np.ndim(x) != 0:
# Older versions of NumPy did not raise for np.array([1]).__index__()
# This is safe to remove when support for those versions is dropped
return False
try:
operator.index(x)
except TypeError:
return False
else:
return True
def _real_dtype_for_complex(dtyp, *, xp):
if xp.isdtype(dtyp, 'real floating'):
return dtyp
if dtyp == xp.complex64:
return xp.float32
elif dtyp == xp.complex128:
return xp.float64
else:
raise ValueError(f"Unknown dtype {dtyp}.")
# https://github.com/numpy/numpy/blob/v2.2.0/numpy/_core/function_base.py#L195-L302
def _logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, *, xp):
if not isinstance(base, float | int) and xp.asarray(base).ndim > 0:
# If base is non-scalar, broadcast it with the others, since it
# may influence how axis is interpreted.
start, stop, base = map(xp.asarray, (start, stop, base))
ndmax = xp.broadcast_arrays(start, stop, base).ndim
start, stop, base = (
xpx.atleast_nd(a, ndim=ndmax)
for a in (start, stop, base)
)
base = xp.expand_dims(base)
try:
result_dt = xp.result_type(start, stop, base)
except ValueError:
# all of start, stop and base are python scalars
result_dt = xp_default_dtype(xp)
y = xp.linspace(start, stop, num=num, endpoint=endpoint, dtype=result_dt)
yp = xp.pow(base, y)
if dtype is None:
return yp
return xp.astype(yp, dtype, copy=False)
def findfreqs(num, den, N, kind='ba'):
"""
Find array of frequencies for computing the response of an analog filter.
Parameters
----------
num, den : array_like, 1-D
The polynomial coefficients of the numerator and denominator of the
transfer function of the filter or LTI system, where the coefficients
are ordered from highest to lowest degree. Or, the roots of the
transfer function numerator and denominator (i.e., zeroes and poles).
N : int
The length of the array to be computed.
kind : str {'ba', 'zp'}, optional
Specifies whether the numerator and denominator are specified by their
polynomial coefficients ('ba'), or their roots ('zp').
Returns
-------
w : (N,) ndarray
A 1-D array of frequencies, logarithmically spaced.
Examples
--------
Find a set of nine frequencies that span the "interesting part" of the
frequency response for the filter with the transfer function
H(s) = s / (s^2 + 8s + 25)
>>> from scipy import signal
>>> signal.findfreqs([1, 0], [1, 8, 25], N=9)
array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01,
3.16227766e-01, 1.00000000e+00, 3.16227766e+00,
1.00000000e+01, 3.16227766e+01, 1.00000000e+02])
"""
xp = array_namespace(num, den)
num, den = map(xp.asarray, (num, den))
if kind == 'ba':
ep = xpx.atleast_nd(_pu.polyroots(den, xp=xp), ndim=1, xp=xp)
tz = xpx.atleast_nd(_pu.polyroots(num, xp=xp), ndim=1, xp=xp)
elif kind == 'zp':
ep = xpx.atleast_nd(den, ndim=1, xp=xp)
tz = xpx.atleast_nd(num, ndim=1, xp=xp)
else:
raise ValueError("input must be one of {'ba', 'zp'}")
ep = xp_float_to_complex(ep, xp=xp)
tz = xp_float_to_complex(tz, xp=xp)
if ep.shape[0] == 0:
ep = xp.asarray([-1000], dtype=ep.dtype)
ez = xp.concat((
ep[xp.imag(ep) >= 0],
tz[(xp.abs(tz) < 1e5) & (xp.imag(tz) >= 0)]
))
integ = xp.astype(xp.abs(ez) < 1e-10, ez.dtype) # XXX True->1, False->0
hfreq = xp.round(
xp.log10(xp.max(3*xp.abs(xp.real(ez) + integ) + 1.5*xp.imag(ez))) + 0.5
)
# the fudge factor is for backwards compatibility: round(-1.5) can be -1 or -2
# depending on the floating-point jitter in -1.5
fudge = 1e-14 if is_jax(xp) else 0
lfreq = xp.round(
xp.log10(0.1*xp.min(xp.abs(xp.real(ez + integ)) + 2*xp.imag(ez))) - 0.5 - fudge
)
w = _logspace(lfreq, hfreq, N, xp=xp)
return w
def freqs(b, a, worN=200, plot=None):
"""
Compute frequency response of analog filter.
Given the M-order numerator `b` and N-order denominator `a` of an analog
filter, compute its frequency response::
b[0]*(jw)**M + b[1]*(jw)**(M-1) + ... + b[M]
H(w) = ----------------------------------------------
a[0]*(jw)**N + a[1]*(jw)**(N-1) + ... + a[N]
Parameters
----------
b : array_like
Numerator of a linear filter.
a : array_like
Denominator of a linear filter.
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g., rad/s) given in `worN`.
plot : callable, optional
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqs`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy.signal import freqs, iirfilter
>>> import numpy as np
>>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1')
>>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency [rad/s]')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid(True)
>>> plt.show()
"""
xp = array_namespace(b, a, worN)
if worN is None:
# For backwards compatibility
w = findfreqs(b, a, 200)
elif _is_int_type(worN):
w = findfreqs(b, a, worN)
else:
w = xpx.atleast_nd(xp.asarray(worN), ndim=1, xp=xp)
s = 1j * w
h = _pu.polyval(b, s, xp=xp) / _pu.polyval(a, s, xp=xp)
if plot is not None:
plot(w, h)
return w, h
def freqs_zpk(z, p, k, worN=200):
"""
Compute frequency response of analog filter.
Given the zeros `z`, poles `p`, and gain `k` of a filter, compute its
frequency response::
(jw-z[0]) * (jw-z[1]) * ... * (jw-z[-1])
H(w) = k * ----------------------------------------
(jw-p[0]) * (jw-p[1]) * ... * (jw-p[-1])
Parameters
----------
z : array_like
Zeroes of a linear filter
p : array_like
Poles of a linear filter
k : scalar
Gain of a linear filter
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g., rad/s) given in `worN`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqs : Compute the frequency response of an analog filter in TF form
freqz : Compute the frequency response of a digital filter in TF form
freqz_zpk : Compute the frequency response of a digital filter in ZPK form
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
>>> import numpy as np
>>> from scipy.signal import freqs_zpk, iirfilter
>>> z, p, k = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1',
... output='zpk')
>>> w, h = freqs_zpk(z, p, k, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency [rad/s]')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid(True)
>>> plt.show()
"""
xp = array_namespace(z, p)
z, p = map(xp.asarray, (z, p))
# NB: k is documented to be a scalar; for backwards compat we keep allowing it
# to be a size-1 array, but it does not influence the namespace calculation.
k = xp.asarray(k, dtype=xp_default_dtype(xp))
if xp_size(k) > 1:
raise ValueError('k must be a single scalar gain')
if worN is None:
# For backwards compatibility
w = findfreqs(z, p, 200, kind='zp')
elif _is_int_type(worN):
w = findfreqs(z, p, worN, kind='zp')
else:
w = worN
w = xpx.atleast_nd(xp.asarray(w), ndim=1, xp=xp)
s = 1j * w
num = _pu.npp_polyvalfromroots(s, z, xp=xp)
den = _pu.npp_polyvalfromroots(s, p, xp=xp)
h = k * num/den
return w, h
def freqz(b, a=1, worN=512, whole=False, plot=None, fs=2*pi,
include_nyquist=False):
"""
Compute the frequency response of a digital filter.
Given the M-order numerator `b` and N-order denominator `a` of a digital
filter, compute its frequency response::
jw -jw -jwM
jw B(e ) b[0] + b[1]e + ... + b[M]e
H(e ) = ------ = -----------------------------------
jw -jw -jwN
A(e ) a[0] + a[1]e + ... + a[N]e
Parameters
----------
b : array_like
Numerator of a linear filter. If `b` has dimension greater than 1,
it is assumed that the coefficients are stored in the first dimension,
and ``b.shape[1:]``, ``a.shape[1:]``, and the shape of the frequencies
array must be compatible for broadcasting.
a : array_like
Denominator of a linear filter. If `b` has dimension greater than 1,
it is assumed that the coefficients are stored in the first dimension,
and ``b.shape[1:]``, ``a.shape[1:]``, and the shape of the frequencies
array must be compatible for broadcasting.
worN : {None, int, array_like}, optional
If a single integer, then compute at that many frequencies (default is
N=512). This is a convenient alternative to::
np.linspace(0, fs if whole else fs/2, N, endpoint=include_nyquist)
Using a number that is fast for FFT computations can result in
faster computations (see Notes).
If an array_like, compute the response at the frequencies given.
These are in the same units as `fs`.
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
fs/2 (upper-half of unit-circle). If `whole` is True, compute
frequencies from 0 to fs. Ignored if worN is array_like.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
fs : float, optional
The sampling frequency of the digital system. Defaults to 2*pi
radians/sample (so w is from 0 to pi).
.. versionadded:: 1.2.0
include_nyquist : bool, optional
If `whole` is False and `worN` is an integer, setting `include_nyquist`
to True will include the last frequency (Nyquist frequency) and is
otherwise ignored.
.. versionadded:: 1.5.0
Returns
-------
w : ndarray
The frequencies at which `h` was computed, in the same units as `fs`.
By default, `w` is normalized to the range [0, pi) (radians/sample).
h : ndarray
The frequency response, as complex numbers.
See Also
--------
freqz_zpk
freqz_sos
Notes
-----
Using Matplotlib's :func:`matplotlib.pyplot.plot` function as the callable
for `plot` produces unexpected results, as this plots the real part of the
complex transfer function, not the magnitude.
Try ``lambda w, h: plot(w, np.abs(h))``.
A direct computation via (R)FFT is used to compute the frequency response
when the following conditions are met:
1. An integer value is given for `worN`.
2. `worN` is fast to compute via FFT (i.e.,
`next_fast_len(worN) <scipy.fft.next_fast_len>` equals `worN`).
3. The denominator coefficients are a single value (``a.shape[0] == 1``).
4. `worN` is at least as long as the numerator coefficients
(``worN >= b.shape[0]``).
5. If ``b.ndim > 1``, then ``b.shape[-1] == 1``.
For long FIR filters, the FFT approach can have lower error and be much
faster than the equivalent direct polynomial calculation.
Examples
--------
>>> from scipy import signal
>>> import numpy as np
>>> taps, f_c = 80, 1.0 # number of taps and cut-off frequency
>>> b = signal.firwin(taps, f_c, window=('kaiser', 8), fs=2*np.pi)
>>> w, h = signal.freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig, ax1 = plt.subplots(tight_layout=True)
>>> ax1.set_title(f"Frequency Response of {taps} tap FIR Filter" +
... f"($f_c={f_c}$ rad/sample)")
>>> ax1.axvline(f_c, color='black', linestyle=':', linewidth=0.8)
>>> ax1.plot(w, 20 * np.log10(abs(h)), 'C0')
>>> ax1.set_ylabel("Amplitude in dB", color='C0')
>>> ax1.set(xlabel="Frequency in rad/sample", xlim=(0, np.pi))
>>> ax2 = ax1.twinx()
>>> phase = np.unwrap(np.angle(h))
>>> ax2.plot(w, phase, 'C1')
>>> ax2.set_ylabel('Phase [rad]', color='C1')
>>> ax2.grid(True)
>>> ax2.axis('tight')
>>> plt.show()
Broadcasting Examples
Suppose we have two FIR filters whose coefficients are stored in the
rows of an array with shape (2, 25). For this demonstration, we'll
use random data:
>>> rng = np.random.default_rng()
>>> b = rng.random((2, 25))
To compute the frequency response for these two filters with one call
to `freqz`, we must pass in ``b.T``, because `freqz` expects the first
axis to hold the coefficients. We must then extend the shape with a
trivial dimension of length 1 to allow broadcasting with the array
of frequencies. That is, we pass in ``b.T[..., np.newaxis]``, which has
shape (25, 2, 1):
>>> w, h = signal.freqz(b.T[..., np.newaxis], worN=1024)
>>> w.shape
(1024,)
>>> h.shape
(2, 1024)
Now, suppose we have two transfer functions, with the same numerator
coefficients ``b = [0.5, 0.5]``. The coefficients for the two denominators
are stored in the first dimension of the 2-D array `a`::
a = [ 1 1 ]
[ -0.25, -0.5 ]
>>> b = np.array([0.5, 0.5])
>>> a = np.array([[1, 1], [-0.25, -0.5]])
Only `a` is more than 1-D. To make it compatible for
broadcasting with the frequencies, we extend it with a trivial dimension
in the call to `freqz`:
>>> w, h = signal.freqz(b, a[..., np.newaxis], worN=1024)
>>> w.shape
(1024,)
>>> h.shape
(2, 1024)
"""
xp = array_namespace(b, a)
b, a = map(xp.asarray, (b, a))
if xp.isdtype(a.dtype, 'integral'):
a = xp.astype(a, xp_default_dtype(xp))
res_dtype = xp.result_type(b, a)
real_dtype = _real_dtype_for_complex(res_dtype, xp=xp)
b = xpx.atleast_nd(b, ndim=1, xp=xp)
a = xpx.atleast_nd(a, ndim=1, xp=xp)
fs = _validate_fs(fs, allow_none=False)
if worN is None:
# For backwards compatibility
worN = 512
h = None
if _is_int_type(worN):
N = operator.index(worN)
del worN
if N < 0:
raise ValueError(f'worN must be nonnegative, got {N}')
lastpoint = 2 * pi if whole else pi
# if include_nyquist is true and whole is false, w should
# include end point
w = xp.linspace(0, lastpoint, N,
endpoint=include_nyquist and not whole, dtype=real_dtype)
n_fft = N if whole else 2 * (N - 1) if include_nyquist else 2 * N
if (xp_size(a) == 1 and (b.ndim == 1 or (b.shape[-1] == 1))
and n_fft >= b.shape[0]
and n_fft > 0): # TODO: review threshold acc. to benchmark?
if (xp.isdtype(b.dtype, "real floating") and
xp.isdtype(a.dtype, "real floating")
):
fft_func = sp_fft.rfft
else:
fft_func = sp_fft.fft
h = fft_func(b, n=n_fft, axis=0)
h = h[:min(N, h.shape[0]), ...]
h /= a
if fft_func is sp_fft.rfft and whole:
# exclude DC and maybe Nyquist (no need to use axis_reverse
# here because we can build reversal with the truncation)
stop = None if n_fft % 2 == 1 else -1
h_flipped = xp.flip(h[1:stop, ...], axis=0)
h = xp.concat((h, xp.conj(h_flipped)))
if b.ndim > 1:
# Last axis of h has length 1, so drop it.
h = h[..., 0]
# Move the first axis of h to the end.
h = xp.moveaxis(h, 0, -1)
else:
if isinstance(worN, complex):
# backwards compat
worN = worN.real
w = xpx.atleast_nd(xp.asarray(worN, dtype=res_dtype), ndim=1, xp=xp)
if xp.isdtype(w.dtype, 'integral'):
w = xp.astype(w, xp_default_dtype(xp))
del worN
w = 2 * pi * w / fs
if h is None: # still need to compute using freqs w
zm1 = xp.exp(-1j * w)
h = (_pu.npp_polyval(zm1, b, tensor=False, xp=xp) /
_pu.npp_polyval(zm1, a, tensor=False, xp=xp))
w = w * (fs / (2 * pi))
if plot is not None:
plot(w, h)
return w, h
def freqz_zpk(z, p, k, worN=512, whole=False, fs=2*pi):
r"""
Compute the frequency response of a digital filter in ZPK form.
Given the Zeros, Poles and Gain of a digital filter, compute its frequency
response:
:math:`H(z)=k \prod_i (z - Z[i]) / \prod_j (z - P[j])`
where :math:`k` is the `gain`, :math:`Z` are the `zeros` and :math:`P` are
the `poles`.
Parameters
----------
z : array_like
Zeroes of a linear filter
p : array_like
Poles of a linear filter
k : scalar
Gain of a linear filter
worN : {None, int, array_like}, optional
If a single integer, then compute at that many frequencies (default is
N=512).
If an array_like, compute the response at the frequencies given.
These are in the same units as `fs`.
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
fs/2 (upper-half of unit-circle). If `whole` is True, compute
frequencies from 0 to fs. Ignored if w is array_like.
fs : float, optional
The sampling frequency of the digital system. Defaults to 2*pi
radians/sample (so w is from 0 to pi).
.. versionadded:: 1.2.0
Returns
-------
w : ndarray
The frequencies at which `h` was computed, in the same units as `fs`.
By default, `w` is normalized to the range [0, pi) (radians/sample).
h : ndarray
The frequency response, as complex numbers.
See Also
--------
freqs : Compute the frequency response of an analog filter in TF form
freqs_zpk : Compute the frequency response of an analog filter in ZPK form
freqz : Compute the frequency response of a digital filter in TF form
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
Design a 4th-order digital Butterworth filter with cut-off of 100 Hz in a
system with sample rate of 1000 Hz, and plot the frequency response:
>>> import numpy as np
>>> from scipy import signal
>>> z, p, k = signal.butter(4, 100, output='zpk', fs=1000)
>>> w, h = signal.freqz_zpk(z, p, k, fs=1000)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(1, 1, 1)
>>> ax1.set_title('Digital filter frequency response')
>>> ax1.plot(w, 20 * np.log10(abs(h)), 'b')
>>> ax1.set_ylabel('Amplitude [dB]', color='b')
>>> ax1.set_xlabel('Frequency [Hz]')
>>> ax1.grid(True)
>>> ax2 = ax1.twinx()
>>> phase = np.unwrap(np.angle(h))
>>> ax2.plot(w, phase, 'g')
>>> ax2.set_ylabel('Phase [rad]', color='g')
>>> plt.axis('tight')
>>> plt.show()
"""
xp = array_namespace(z, p)
z, p = map(xp.asarray, (z, p))
z = xpx.atleast_nd(z, ndim=1, xp=xp)
p = xpx.atleast_nd(p, ndim=1, xp=xp)
res_dtype = xp.result_type(z, p)
res_dtype = xp.float64 if res_dtype in (xp.float64, xp.complex128) else xp.float32
fs = _validate_fs(fs, allow_none=False)
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
# For backwards compatibility
w = xp.linspace(0, lastpoint, 512, endpoint=False, dtype=res_dtype)
elif _is_int_type(worN):
w = xp.linspace(0, lastpoint, worN, endpoint=False, dtype=res_dtype)
else:
w = xp.asarray(worN)
if xp.isdtype(w.dtype, 'integral'):
w = xp.astype(w, xp_default_dtype(xp))
w = xpx.atleast_nd(w, ndim=1, xp=xp)
w = 2 * pi * w / fs
zm1 = xp.exp(1j * w)
func = _pu.npp_polyvalfromroots
h = xp.asarray(k, dtype=res_dtype) * func(zm1, z, xp=xp) / func(zm1, p, xp=xp)
w = w*(fs/(2*pi))
return w, h
def group_delay(system, w=512, whole=False, fs=2*pi):
r"""Compute the group delay of a digital filter.
The group delay measures by how many samples amplitude envelopes of
various spectral components of a signal are delayed by a filter.
It is formally defined as the derivative of continuous (unwrapped) phase::
d jw
D(w) = - -- arg H(e)
dw
Parameters
----------
system : tuple of array_like (b, a)
Numerator and denominator coefficients of a filter transfer function.
w : {None, int, array_like}, optional
If a single integer, then compute at that many frequencies (default is
N=512).
If an array_like, compute the delay at the frequencies given. These
are in the same units as `fs`.
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
fs/2 (upper-half of unit-circle). If `whole` is True, compute
frequencies from 0 to fs. Ignored if w is array_like.
fs : float, optional
The sampling frequency of the digital system. Defaults to 2*pi
radians/sample (so w is from 0 to pi).
.. versionadded:: 1.2.0
Returns
-------
w : ndarray
The frequencies at which group delay was computed, in the same units
as `fs`. By default, `w` is normalized to the range [0, pi)
(radians/sample).
gd : ndarray
The group delay.
See Also
--------
freqz : Frequency response of a digital filter
Notes
-----
The similar function in MATLAB is called `grpdelay`.
If the transfer function :math:`H(z)` has zeros or poles on the unit
circle, the group delay at corresponding frequencies is undefined.
When such a case arises the warning is raised and the group delay
is set to 0 at those frequencies.
For the details of numerical computation of the group delay refer to [1]_ or [2]_.
.. versionadded:: 0.16.0
References
----------
.. [1] Richard G. Lyons, "Understanding Digital Signal Processing,
3rd edition", p. 830.
.. [2] Julius O. Smith III, "Numerical Computation of Group Delay",
in "Introduction to Digital Filters with Audio Applications",
online book, 2007,
https://ccrma.stanford.edu/~jos/fp/Numerical_Computation_Group_Delay.html
Examples
--------
>>> from scipy import signal
>>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1')
>>> w, gd = signal.group_delay((b, a))
>>> import matplotlib.pyplot as plt
>>> plt.title('Digital filter group delay')
>>> plt.plot(w, gd)
>>> plt.ylabel('Group delay [samples]')
>>> plt.xlabel('Frequency [rad/sample]')
>>> plt.show()
"""
xp = array_namespace(*system, w)
b, a = map(np.atleast_1d, system)
if w is None:
# For backwards compatibility
w = 512
fs = _validate_fs(fs, allow_none=False)
if _is_int_type(w):
if whole:
w = np.linspace(0, 2 * pi, w, endpoint=False)
else:
w = np.linspace(0, pi, w, endpoint=False)
else:
w = np.atleast_1d(w)
w = 2*pi*w/fs
c = np.convolve(b, np.conjugate(a[::-1]))
cr = c * np.arange(c.size)
z = np.exp(-1j * w)
num = np.polyval(cr[::-1], z)
den = np.polyval(c[::-1], z)
gd = np.real(num / den) - a.size + 1
singular = ~np.isfinite(gd)
near_singular = np.absolute(den) < 10 * EPSILON
if np.any(singular):
gd[singular] = 0
warnings.warn(
"The group delay is singular at frequencies "
f"[{', '.join(f'{ws:.3f}' for ws in w[singular])}], setting to 0",
stacklevel=2
)
elif np.any(near_singular):
warnings.warn(
"The filter's denominator is extremely small at frequencies "
f"[{', '.join(f'{ws:.3f}' for ws in w[near_singular])}], "
"around which a singularity may be present",
stacklevel=2
)
w = w * (fs / (2 * xp.pi))
return xp.asarray(w), xp.asarray(gd)
def _validate_sos(sos, xp=None):
"""Helper to validate a SOS input"""
if xp is None:
xp = np # backcompat, cf sosfilt, sosfiltfilt
sos = xp.asarray(sos)
sos = xpx.atleast_nd(sos, ndim=2, xp=xp)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
if not xp.all(sos[:, 3] == 1):
raise ValueError('sos[:, 3] should be all ones')
return sos, n_sections
def freqz_sos(sos, worN=512, whole=False, fs=2*pi):
r"""
Compute the frequency response of a digital filter in SOS format.
Given `sos`, an array with shape (n, 6) of second order sections of
a digital filter, compute the frequency response of the system function::
B0(z) B1(z) B{n-1}(z)
H(z) = ----- * ----- * ... * ---------
A0(z) A1(z) A{n-1}(z)
for z = exp(omega*1j), where B{k}(z) and A{k}(z) are numerator and
denominator of the transfer function of the k-th second order section.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
worN : {None, int, array_like}, optional
If a single integer, then compute at that many frequencies (default is
N=512). Using a number that is fast for FFT computations can result
in faster computations (see Notes of `freqz`).
If an array_like, compute the response at the frequencies given (must
be 1-D). These are in the same units as `fs`.
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
fs/2 (upper-half of unit-circle). If `whole` is True, compute
frequencies from 0 to fs.
fs : float, optional
The sampling frequency of the digital system. Defaults to 2*pi
radians/sample (so w is from 0 to pi).
.. versionadded:: 1.2.0
Returns
-------
w : ndarray
The frequencies at which `h` was computed, in the same units as `fs`.
By default, `w` is normalized to the range [0, pi) (radians/sample).
h : ndarray
The frequency response, as complex numbers.
See Also
--------
freqz, sosfilt, sosfreqz
Notes
-----
This function used to be called ``sosfreqz`` in older versions (≥ 0.19.0)
.. versionadded:: 1.15.0
Examples
--------
Design a 15th-order bandpass filter in SOS format.
>>> from scipy import signal
>>> import numpy as np
>>> sos = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='sos')
Compute the frequency response at 1500 points from DC to Nyquist.
>>> w, h = signal.freqz_sos(sos, worN=1500)
Plot the response.
>>> import matplotlib.pyplot as plt
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.maximum(np.abs(h), 1e-5))
>>> plt.plot(w/np.pi, db)
>>> plt.ylim(-75, 5)
>>> plt.grid(True)
>>> plt.yticks([0, -20, -40, -60])
>>> plt.ylabel('Gain [dB]')
>>> plt.title('Frequency Response')
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.grid(True)
>>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi],
... [r'$-\pi$', r'$-\pi/2$', '0', r'$\pi/2$', r'$\pi$'])
>>> plt.ylabel('Phase [rad]')
>>> plt.xlabel('Normalized frequency (1.0 = Nyquist)')
>>> plt.show()
If the same filter is implemented as a single transfer function,
numerical error corrupts the frequency response:
>>> b, a = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='ba')
>>> w, h = signal.freqz(b, a, worN=1500)
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.maximum(np.abs(h), 1e-5))
>>> plt.plot(w/np.pi, db)
>>> plt.ylim(-75, 5)
>>> plt.grid(True)
>>> plt.yticks([0, -20, -40, -60])
>>> plt.ylabel('Gain [dB]')
>>> plt.title('Frequency Response')
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.grid(True)
>>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi],
... [r'$-\pi$', r'$-\pi/2$', '0', r'$\pi/2$', r'$\pi$'])
>>> plt.ylabel('Phase [rad]')
>>> plt.xlabel('Normalized frequency (1.0 = Nyquist)')
>>> plt.show()
"""
xp = array_namespace(sos)
fs = _validate_fs(fs, allow_none=False)
sos, n_sections = _validate_sos(sos, xp)
if n_sections == 0:
raise ValueError('Cannot compute frequencies with no sections')
h = 1.
for j in range(sos.shape[0]):
row = sos[j, :]
w, rowh = freqz(row[:3], row[3:], worN=worN, whole=whole, fs=fs)
h *= rowh
return w, h
def sosfreqz(*args, **kwargs):
"""
Compute the frequency response of a digital filter in SOS format (legacy).
.. legacy:: function
This function is an alias, provided for backward compatibility.
New code should use the function :func:`scipy.signal.freqz_sos`.
This function became obsolete from version 1.15.0.
"""
return freqz_sos(*args, **kwargs)
def _cplxreal(z, tol=None):
"""
Split into complex and real parts, combining conjugate pairs.
The 1-D input vector `z` is split up into its complex (`zc`) and real (`zr`)
elements. Every complex element must be part of a complex-conjugate pair,
which are combined into a single number (with positive imaginary part) in
the output. Two complex numbers are considered a conjugate pair if their
real and imaginary parts differ in magnitude by less than ``tol * abs(z)``.
Parameters
----------
z : array_like
Vector of complex numbers to be sorted and split
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e., 2e-14 for
float64)
Returns
-------
zc : ndarray
Complex elements of `z`, with each pair represented by a single value
having positive imaginary part, sorted first by real part, and then
by magnitude of imaginary part. The pairs are averaged when combined
to reduce error.
zr : ndarray
Real elements of `z` (those having imaginary part less than
`tol` times their magnitude), sorted by value.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxpair
Examples
--------
>>> from scipy.signal._filter_design import _cplxreal
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> zc, zr = _cplxreal(a)
>>> print(zc)
[ 1.+1.j 2.+1.j 2.+1.j 2.+2.j]
>>> print(zr)
[ 1. 3. 4.]
"""
z = np.atleast_1d(z)
if z.size == 0:
return z, z
elif z.ndim != 1:
raise ValueError('_cplxreal only accepts 1-D input')
if tol is None:
# Get tolerance from dtype of input
tol = 100 * np.finfo((1.0 * z).dtype).eps
# Sort by real part, magnitude of imaginary part (speed up further sorting)
z = z[np.lexsort((abs(z.imag), z.real))]
# Split reals from conjugate pairs
real_indices = abs(z.imag) <= tol * abs(z)
zr = z[real_indices].real
if len(zr) == len(z):
# Input is entirely real
return np.array([]), zr
# Split positive and negative halves of conjugates
z = z[~real_indices]
zp = z[z.imag > 0]
zn = z[z.imag < 0]
if len(zp) != len(zn):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Find runs of (approximately) the same real part
same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
diffs = np.diff(np.concatenate(([0], same_real, [0])))
run_starts = np.nonzero(diffs > 0)[0]
run_stops = np.nonzero(diffs < 0)[0]
# Sort each run by their imaginary parts
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]
# Check that negatives match positives
if any(abs(zp - zn.conj()) > tol * abs(zn)):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Average out numerical inaccuracy in real vs imag parts of pairs
zc = (zp + zn.conj()) / 2
return zc, zr
def _cplxpair(z, tol=None):
"""
Sort into pairs of complex conjugates.
Complex conjugates in `z` are sorted by increasing real part. In each
pair, the number with negative imaginary part appears first.
If pairs have identical real parts, they are sorted by increasing
imaginary magnitude.
Two complex numbers are considered a conjugate pair if their real and
imaginary parts differ in magnitude by less than ``tol * abs(z)``. The
pairs are forced to be exact complex conjugates by averaging the positive
and negative values.
Purely real numbers are also sorted, but placed after the complex
conjugate pairs. A number is considered real if its imaginary part is
smaller than `tol` times the magnitude of the number.
Parameters
----------
z : array_like
1-D input array to be sorted.
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e., 2e-14 for
float64)
Returns
-------
y : ndarray
Complex conjugate pairs followed by real numbers.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxreal
Examples
--------
>>> from scipy.signal._filter_design import _cplxpair
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> z = _cplxpair(a)
>>> print(z)
[ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j
3.+0.j 4.+0.j]
"""
z = np.atleast_1d(z)
if z.size == 0 or np.isrealobj(z):
return np.sort(z)
if z.ndim != 1:
raise ValueError('z must be 1-D')
zc, zr = _cplxreal(z, tol)
# Interleave complex values and their conjugates, with negative imaginary
# parts first in each pair
zc = np.dstack((zc.conj(), zc)).flatten()
z = np.append(zc, zr)
return z
def tf2zpk(b, a):
r"""Return zero, pole, gain (z, p, k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
If some values of `b` are too close to 0, they are removed. In that case,
a BadCoefficients warning is emitted.
The `b` and `a` arrays are interpreted as coefficients for positive,
descending powers of the transfer function variable. So the inputs
:math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]`
can represent an analog filter of the form:
.. math::
H(s) = \frac
{b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M}
{a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N}
or a discrete-time filter of the form:
.. math::
H(z) = \frac
{b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M}
{a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N}
This "positive powers" form is found more commonly in controls
engineering. If `M` and `N` are equal (which is true for all filters
generated by the bilinear transform), then this happens to be equivalent
to the "negative powers" discrete-time form preferred in DSP:
.. math::
H(z) = \frac
{b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}}
{a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}}
Although this is true for common filters, remember that this is not true
in the general case. If `M` and `N` are not equal, the discrete-time
transfer function coefficients must first be converted to the "positive
powers" form before finding the poles and zeros.
Examples
--------
Find the zeroes, poles and gain of
a filter with the transfer function
.. math::
H(s) = \frac{3s^2}{s^2 + 5s + 13}
>>> from scipy.signal import tf2zpk
>>> tf2zpk([3, 0, 0], [1, 5, 13])
( array([ 0. , 0. ]),
array([ -2.5+2.59807621j , -2.5-2.59807621j]),
3.0)
"""
xp = array_namespace(b, a)
b, a = normalize(b, a)
a, b = xp_promote(a, b, xp=xp, force_floating=True)
k = b[0]
b = b / b[0]
z = _pu.polyroots(b, xp=xp)
p = _pu.polyroots(a, xp=xp)
return z, p, k
def zpk2tf(z, p, k):
r"""
Return polynomial transfer function representation from zeros and poles
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
Examples
--------
Find the polynomial representation of a transfer function H(s)
using its 'zpk' (Zero-Pole-Gain) representation.
.. math::
H(z) = 5 \frac
{ (s - 2)(s - 6) }
{ (s - 1)(s - 8) }
>>> from scipy.signal import zpk2tf
>>> z = [2, 6]
>>> p = [1, 8]
>>> k = 5
>>> zpk2tf(z, p, k)
( array([ 5., -40., 60.]), array([ 1., -9., 8.]))
"""
xp = array_namespace(z, p)
z, p = map(xp.asarray, (z, p))
k = xp.asarray(k, dtype=xp.result_type(xp.real(z), xp.real(p), k))
if xp.isdtype(k.dtype, "integral"):
k = xp.astype(k, xp.float64)
z = xpx.atleast_nd(z, ndim=1, xp=xp)
k = xpx.atleast_nd(k, ndim=1, xp=xp)
if z.ndim > 1:
temp = _pu.poly(z[0, ...], xp=xp)
b = xp.empty((z.shape[0], z.shape[1] + 1), dtype=temp.dtype)
if k.shape[0] == 1:
k = [k[0]] * z.shape[0]
for i in range(z.shape[0]):
k_i = xp.asarray(k[i], dtype=xp.int64)
b[i, ...] = xp.multiply(k_i, _pu.poly(z[i, ...], xp=xp))
else:
# Use xp.multiply to work around torch type promotion
# non-compliance for operations between 0d and higher
# dimensional arrays.
b = xp.multiply(k, _pu.poly(z, xp=xp))
a = _pu.poly(p, xp=xp)
a = xpx.atleast_nd(xp.asarray(a), ndim=1, xp=xp)
return b, a
def tf2sos(b, a, pairing=None, *, analog=False):
r"""
Return second-order sections from transfer function representation
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
pairing : {None, 'nearest', 'keep_odd', 'minimal'}, optional
The method to use to combine pairs of poles and zeros into sections.
See `zpk2sos` for information and restrictions on `pairing` and
`analog` arguments.
analog : bool, optional
If True, system is analog, otherwise discrete.
.. versionadded:: 1.8.0
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
zpk2sos, sosfilt
Notes
-----
It is generally discouraged to convert from TF to SOS format, since doing
so usually will not improve numerical precision errors. Instead, consider
designing filters in ZPK format and converting directly to SOS. TF is
converted to SOS by first converting to ZPK format, then converting
ZPK to SOS.
.. versionadded:: 0.16.0
Examples
--------
Find the 'sos' (second-order sections) of the transfer function H(s)
using its polynomial representation.
.. math::
H(s) = \frac{s^2 - 3.5s - 2}{s^4 + 3s^3 - 15s^2 - 19s + 30}
>>> from scipy.signal import tf2sos
>>> tf2sos([1, -3.5, -2], [1, 3, -15, -19, 30], analog=True)
array([[ 0. , 0. , 1. , 1. , 2. , -15. ],
[ 1. , -3.5, -2. , 1. , 1. , -2. ]])
"""
return zpk2sos(*tf2zpk(b, a), pairing=pairing, analog=analog)
def sos2tf(sos):
r"""
Return a single transfer function from a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
Find the polynomial representation of an elliptic filter
using its 'sos' (second-order sections) format.
>>> from scipy.signal import sos2tf
>>> from scipy import signal
>>> sos = signal.ellip(1, 0.001, 50, 0.1, output='sos')
>>> sos2tf(sos)
( array([0.91256522, 0.91256522, 0. ]),
array([1. , 0.82513043, 0. ]))
"""
xp = array_namespace(sos)
sos = xp.asarray(sos)
result_type = sos.dtype
if xp.isdtype(result_type, 'integral'):
result_type = xp_default_dtype(xp)
b = xp.asarray([1], dtype=result_type)
a = xp.asarray([1], dtype=result_type)
n_sections = sos.shape[0]
for section in range(n_sections):
b = _pu.polymul(b, sos[section, :3], xp=xp)
a = _pu.polymul(a, sos[section, 3:], xp=xp)
return b, a
def sos2zpk(sos):
"""
Return zeros, poles, and gain of a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
The number of zeros and poles returned will be ``n_sections * 2``
even if some of these are (effectively) zero.
.. versionadded:: 0.16.0
"""
xp = array_namespace(sos)
sos = xp.asarray(sos)
n_sections = sos.shape[0]
z = xp.zeros(n_sections*2, dtype=xp.complex128)
p = xp.zeros(n_sections*2, dtype=xp.complex128)
k = 1.
for section in range(n_sections):
zpk = tf2zpk(sos[section, :3], sos[section, 3:])
z = xpx.at(z, slice(2*section, 2*section + zpk[0].shape[0])).set(zpk[0])
p = xpx.at(p, slice(2*section, 2*section + zpk[1].shape[0])).set(zpk[1])
k *= zpk[2]
return z, p, k
def _nearest_real_complex_idx(fro, to, which):
"""Get the next closest real or complex element based on distance"""
assert which in ('real', 'complex', 'any')
order = np.argsort(np.abs(fro - to))
if which == 'any':
return order[0]
else:
mask = np.isreal(fro[order])
if which == 'complex':
mask = ~mask
return order[np.nonzero(mask)[0][0]]
def _single_zpksos(z, p, k):
"""Create one second-order section from up to two zeros and poles"""
sos = np.zeros(6)
b, a = zpk2tf(z, p, k)
sos[3-len(b):3] = b
sos[6-len(a):6] = a
return sos
def zpk2sos(z, p, k, pairing=None, *, analog=False):
"""Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {None, 'nearest', 'keep_odd', 'minimal'}, optional
The method to use to combine pairs of poles and zeros into sections.
If analog is False and pairing is None, pairing is set to 'nearest';
if analog is True, pairing must be 'minimal', and is set to that if
it is None.
analog : bool, optional
If True, system is analog, otherwise discrete.
.. versionadded:: 1.8.0
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle for discrete-time systems, and
poles closest to the imaginary axis for continuous-time systems.
``pairing='minimal'`` outputs may not be suitable for `sosfilt`,
and ``analog=True`` outputs will never be suitable for `sosfilt`.
*Algorithms*
The steps in the ``pairing='nearest'``, ``pairing='keep_odd'``,
and ``pairing='minimal'`` algorithms are mostly shared. The
``'nearest'`` algorithm attempts to minimize the peak gain, while
``'keep_odd'`` minimizes peak gain under the constraint that
odd-order systems should retain one section as first order.
``'minimal'`` is similar to ``'keep_odd'``, but no additional
poles or zeros are introduced
The algorithm steps are as follows:
As a pre-processing step for ``pairing='nearest'``,
``pairing='keep_odd'``, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for
pairing. If ``pairing == 'nearest'`` and there are an odd number
of poles, add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle (or imaginary axis, for ``analog=True``) to
begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing = 'keep_odd'`` or ``'minimal'`` methods.
.. versionadded:: 0.16.0
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert
to SOS format with `zpk2sos`:
>>> from scipy import signal
>>> import numpy as np
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = signal.zpk2sos(z, p, k)
The coefficients of the numerators of the sections:
>>> sos[:, :3]
array([[0.0014152 , 0.00248677, 0.0014152 ],
[1. , 0.72976874, 1. ],
[1. , 0.17607852, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32544025, 0.46989976],
[ 1. , -1.26118294, 0.62625924],
[ 1. , -1.2570723 , 0.8619958 ]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> signal.zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
With ``pairing='minimal'``, the first-order section doesn't have
the extra pole and zero at the origin:
>>> signal.zpk2sos(z1, p1, 1, pairing='minimal')
array([[ 0. , 1. , 1. , 0. , 1. , -0.75],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
xp = array_namespace(z, p)
# convert to numpy, convert back on exit XXX
z, p = map(np.asarray, (z, p))
k = np.asarray(k)
if pairing is None:
pairing = 'minimal' if analog else 'nearest'
valid_pairings = ['nearest', 'keep_odd', 'minimal']
if pairing not in valid_pairings:
raise ValueError(f'pairing must be one of {valid_pairings}, not {pairing}')
if analog and pairing != 'minimal':
raise ValueError('for analog zpk2sos conversion, '
'pairing must be "minimal"')
if len(z) == len(p) == 0:
if not analog:
return xp.asarray(np.asarray([[k, 0., 0., 1., 0., 0.]]))
else:
return xp.asarray(np.asarray([[0., 0., k, 0., 0., 1.]]))
if pairing != 'minimal':
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = np.concatenate((z, [0.]))
assert len(p) == len(z)
else:
if len(p) < len(z):
raise ValueError('for analog zpk2sos conversion, '
'must have len(p)>=len(z)')
n_sections = (len(p) + 1) // 2
# Ensure we have complex conjugate pairs
# (note that _cplxreal only gives us one element of each complex pair):
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
if not np.isreal(k):
raise ValueError('k must be real')
k = k.real
if not analog:
# digital: "worst" is the closest to the unit circle
def idx_worst(p):
return np.argmin(np.abs(1 - np.abs(p)))
else:
# analog: "worst" is the closest to the imaginary axis
def idx_worst(p):
return np.argmin(np.abs(np.real(p)))
sos = np.zeros((n_sections, 6))
# Construct the system, reversing order so the "worst" are last
for si in range(n_sections-1, -1, -1):
# Select the next "worst" pole
p1_idx = idx_worst(p)
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# Pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# Special case (1): last remaining real pole
if pairing != 'minimal':
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
sos[si] = _single_zpksos([z1, 0], [p1, 0], 1)
elif len(z) > 0:
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
sos[si] = _single_zpksos([z1], [p1], 1)
else:
sos[si] = _single_zpksos([], [p1], 1)
elif (len(p) + 1 == len(z)
and not np.isreal(p1)
and np.isreal(p).sum() == 1
and np.isreal(z).sum() == 1):
# Special case (2): there's one real pole and one real zero
# left, and an equal number of poles and zeros to pair up.
# We *must* pair with a complex zero
z1_idx = _nearest_real_complex_idx(z, p1, 'complex')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
sos[si] = _single_zpksos([z1, z1.conj()], [p1, p1.conj()], 1)
else:
if np.isreal(p1):
prealidx = np.flatnonzero(np.isreal(p))
p2_idx = prealidx[idx_worst(p[prealidx])]
p2 = p[p2_idx]
p = np.delete(p, p2_idx)
else:
p2 = p1.conj()
# find closest zero
if len(z) > 0:
z1_idx = _nearest_real_complex_idx(z, p1, 'any')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
if not np.isreal(z1):
sos[si] = _single_zpksos([z1, z1.conj()], [p1, p2], 1)
else:
if len(z) > 0:
z2_idx = _nearest_real_complex_idx(z, p1, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
sos[si] = _single_zpksos([z1, z2], [p1, p2], 1)
else:
sos[si] = _single_zpksos([z1], [p1, p2], 1)
else:
# no more zeros
sos[si] = _single_zpksos([], [p1, p2], 1)
assert len(p) == len(z) == 0 # we've consumed all poles and zeros
del p, z
# put gain in first sos
sos[0][:3] *= k
return xp.asarray(sos)
def _align_nums(nums, xp):
"""Aligns the shapes of multiple numerators.
Given an array of numerator coefficient arrays [[a_1, a_2,...,
a_n],..., [b_1, b_2,..., b_m]], this function pads shorter numerator
arrays with zero's so that all numerators have the same length. Such
alignment is necessary for functions like 'tf2ss', which needs the
alignment when dealing with SIMO transfer functions.
Parameters
----------
nums: array_like
Numerator or list of numerators. Not necessarily with same length.
Returns
-------
nums: array
The numerator. If `nums` input was a list of numerators then a 2-D
array with padded zeros for shorter numerators is returned. Otherwise
returns ``np.asarray(nums)``.
"""
try:
# The statement can throw a ValueError if one
# of the numerators is a single digit and another
# is array-like e.g. if nums = [5, [1, 2, 3]]
nums = xp.asarray(nums)
if not xp.isdtype(nums.dtype, "numeric"):
raise ValueError("dtype of numerator is non-numeric")
return nums
except ValueError:
nums = [xpx.atleast_nd(xp.asarray(num), ndim=1) for num in nums]
max_width = max(xp_size(num) for num in nums)
# pre-allocate
aligned_nums = xp.zeros((len(nums), max_width))
# Create numerators with padded zeros
for index, num in enumerate(nums):
aligned_nums[index, -num.size:] = num
return aligned_nums
def normalize(b, a):
"""Normalize numerator/denominator of a continuous-time transfer function.
If values of `b` are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
Parameters
----------
b: array_like
Numerator of the transfer function. Can be a 2-D array to normalize
multiple transfer functions.
a: array_like
Denominator of the transfer function. At most 1-D.
Returns
-------
num: array
The numerator of the normalized transfer function. At least a 1-D
array. A 2-D array if the input `num` is a 2-D array.
den: 1-D array
The denominator of the normalized transfer function.
Notes
-----
Coefficients for both the numerator and denominator should be specified in
descending exponent order (e.g., ``s^2 + 3s + 5`` would be represented as
``[1, 3, 5]``).
Examples
--------
>>> from scipy.signal import normalize
Normalize the coefficients of the transfer function
``(3*s^2 - 2*s + 5) / (2*s^2 + 3*s + 1)``:
>>> b = [3, -2, 5]
>>> a = [2, 3, 1]
>>> normalize(b, a)
(array([ 1.5, -1. , 2.5]), array([1. , 1.5, 0.5]))
A warning is generated if, for example, the first coefficient of
`b` is 0. In the following example, the result is as expected:
>>> import warnings
>>> with warnings.catch_warnings(record=True) as w:
... num, den = normalize([0, 3, 6], [2, -5, 4])
>>> num
array([1.5, 3. ])
>>> den
array([ 1. , -2.5, 2. ])
>>> print(w[0].message)
Badly conditioned filter coefficients (numerator): the results may be meaningless
"""
try:
xp = array_namespace(b, a)
except TypeError:
# object arrays, test_ltisys.py::TestSS2TF::test_simo_round_trip
xp = np_compat
den = xp.asarray(a)
den = xpx.atleast_nd(den, ndim=1, xp=xp)
num = xp.asarray(b)
num = xpx.atleast_nd(_align_nums(num, xp), ndim=2, xp=xp)
if den.ndim != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if num.ndim > 2:
raise ValueError("Numerator polynomial must be rank-1 or"
" rank-2 array.")
if xp.all(den == 0):
raise ValueError("Denominator must have at least on nonzero element.")
# Trim leading zeros in denominator, leave at least one.
den = _pu._trim_zeros(den, 'f')
# Normalize transfer function
num, den = num / den[0], den / den[0]
# Count numerator columns that are all zero
leading_zeros = 0
for j in range(num.shape[-1]):
col = num[:, j]
if xp.all(xp.abs(col) <= 1e-14):
leading_zeros += 1
else:
break
# Trim leading zeros of numerator
if leading_zeros > 0:
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless",
BadCoefficients, stacklevel=2)
# Make sure at least one column remains
if leading_zeros == num.shape[1]:
leading_zeros -= 1
num = num[:, leading_zeros:]
# Squeeze first dimension if singular
if num.shape[0] == 1:
num = num[0, :]
return num, den
def lp2lp(b, a, wo=1.0):
r"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
b : array_like
Numerator polynomial coefficients of the transformed low-pass filter.
a : array_like
Denominator polynomial coefficients of the transformed low-pass filter.
See Also
--------
lp2hp, lp2bp, lp2bs, bilinear
lp2lp_zpk
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s}{\omega_0}
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> lp = signal.lti([1.0], [1.0, 1.0])
>>> lp2 = signal.lti(*signal.lp2lp(lp.num, lp.den, 2))
>>> w, mag_lp, p_lp = lp.bode()
>>> w, mag_lp2, p_lp2 = lp2.bode(w)
>>> plt.plot(w, mag_lp, label='Lowpass')
>>> plt.plot(w, mag_lp2, label='Transformed Lowpass')
>>> plt.semilogx()
>>> plt.grid(True)
>>> plt.xlabel('Frequency [rad/s]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.legend()
"""
xp = array_namespace(a, b)
a, b = map(xp.asarray, (a, b))
a, b = xp_promote(a, b, force_floating=True, xp=xp)
a = xpx.atleast_nd(a, ndim=1, xp=xp)
b = xpx.atleast_nd(b, ndim=1, xp=xp)
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = a.shape[0]
n = b.shape[0]
M = max((d, n))
pwo = wo ** xp.arange(M - 1, -1, -1, dtype=xp.float64)
start1 = max((n - d, 0))
start2 = max((d - n, 0))
b = b * pwo[start1] / pwo[start2:]
a = a * pwo[start1] / pwo[start1:]
return normalize(b, a)
def _resize(a, new_shape, xp):
# https://github.com/numpy/numpy/blob/v2.2.4/numpy/_core/fromnumeric.py#L1535
a = xp.reshape(a, (-1,))
new_size = 1
for dim_length in new_shape:
new_size *= dim_length
if dim_length < 0:
raise ValueError(
'all elements of `new_shape` must be non-negative'
)
if xp_size(a) == 0 or new_size == 0:
# First case must zero fill. The second would have repeats == 0.
return xp.zeros_like(a, shape=new_shape)
repeats = -(-new_size // xp_size(a)) # ceil division
a = xp.concat((a,) * repeats)[:new_size]
return xp.reshape(a, new_shape)
def lp2hp(b, a, wo=1.0):
r"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
wo : float
Desired cutoff, as angular frequency (e.g., rad/s).
Defaults to no change.
Returns
-------
b : array_like
Numerator polynomial coefficients of the transformed high-pass filter.
a : array_like
Denominator polynomial coefficients of the transformed high-pass filter.
See Also
--------
lp2lp, lp2bp, lp2bs, bilinear
lp2hp_zpk
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{\omega_0}{s}
This maintains symmetry of the lowpass and highpass responses on a
logarithmic scale.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> lp = signal.lti([1.0], [1.0, 1.0])
>>> hp = signal.lti(*signal.lp2hp(lp.num, lp.den))
>>> w, mag_lp, p_lp = lp.bode()
>>> w, mag_hp, p_hp = hp.bode(w)
>>> plt.plot(w, mag_lp, label='Lowpass')
>>> plt.plot(w, mag_hp, label='Highpass')
>>> plt.semilogx()
>>> plt.grid(True)
>>> plt.xlabel('Frequency [rad/s]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.legend()
"""
xp = array_namespace(a, b)
a, b = map(xp.asarray, (a, b))
a, b = xp_promote(a, b, force_floating=True, xp=xp)
a = xpx.atleast_nd(a, ndim=1, xp=xp)
b = xpx.atleast_nd(b, ndim=1, xp=xp)
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = a.shape[0]
n = b.shape[0]
if wo != 1:
pwo = wo ** xp.arange(max((d, n)), dtype=b.dtype)
else:
pwo = xp.ones(max((d, n)), dtype=b.dtype)
if d >= n:
outa = xp.flip(a) * pwo
outb = _resize(b, (d,), xp=xp)
outb[n:] = 0.0
outb[:n] = xp.flip(b) * pwo[:n]
else:
outb = xp.flip(b) * pwo
outa = _resize(a, (n,), xp=xp)
outa[d:] = 0.0
outa[:d] = xp.flip(a) * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
wo : float
Desired passband center, as angular frequency (e.g., rad/s).
Defaults to no change.
bw : float
Desired passband width, as angular frequency (e.g., rad/s).
Defaults to 1.
Returns
-------
b : array_like
Numerator polynomial coefficients of the transformed band-pass filter.
a : array_like
Denominator polynomial coefficients of the transformed band-pass filter.
See Also
--------
lp2lp, lp2hp, lp2bs, bilinear
lp2bp_zpk
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
This is the "wideband" transformation, producing a passband with
geometric (log frequency) symmetry about `wo`.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> lp = signal.lti([1.0], [1.0, 1.0])
>>> bp = signal.lti(*signal.lp2bp(lp.num, lp.den))
>>> w, mag_lp, p_lp = lp.bode()
>>> w, mag_bp, p_bp = bp.bode(w)
>>> plt.plot(w, mag_lp, label='Lowpass')
>>> plt.plot(w, mag_bp, label='Bandpass')
>>> plt.semilogx()
>>> plt.grid(True)
>>> plt.xlabel('Frequency [rad/s]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.legend()
"""
xp = array_namespace(a, b)
a, b = map(xp.asarray, (a, b))
a, b = xp_promote(a, b, force_floating=True, xp=xp)
a = xpx.atleast_nd(a, ndim=1, xp=xp)
b = xpx.atleast_nd(b, ndim=1, xp=xp)
D = a.shape[0] - 1
N = b.shape[0] - 1
ma = max([N, D])
Np = N + ma
Dp = D + ma
bprime = xp.empty(Np + 1, dtype=b.dtype)
aprime = xp.empty(Dp + 1, dtype=a.dtype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i
aprime[Dp - j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
wo : float
Desired stopband center, as angular frequency (e.g., rad/s).
Defaults to no change.
bw : float
Desired stopband width, as angular frequency (e.g., rad/s).
Defaults to 1.
Returns
-------
b : array_like
Numerator polynomial coefficients of the transformed band-stop filter.
a : array_like
Denominator polynomial coefficients of the transformed band-stop filter.
See Also
--------
lp2lp, lp2hp, lp2bp, bilinear
lp2bs_zpk
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2}
This is the "wideband" transformation, producing a stopband with
geometric (log frequency) symmetry about `wo`.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> lp = signal.lti([1.0], [1.0, 1.5])
>>> bs = signal.lti(*signal.lp2bs(lp.num, lp.den))
>>> w, mag_lp, p_lp = lp.bode()
>>> w, mag_bs, p_bs = bs.bode(w)
>>> plt.plot(w, mag_lp, label='Lowpass')
>>> plt.plot(w, mag_bs, label='Bandstop')
>>> plt.semilogx()
>>> plt.grid(True)
>>> plt.xlabel('Frequency [rad/s]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.legend()
"""
xp = array_namespace(a, b)
a, b = map(xp.asarray, (a, b))
a, b = xp_promote(a, b, force_floating=True, xp=xp)
a = xpx.atleast_nd(a, ndim=1, xp=xp)
b = xpx.atleast_nd(b, ndim=1, xp=xp)
D = a.shape[0] - 1
N = b.shape[0] - 1
M = max([N, D])
Np = M + M
Dp = M + M
bprime = xp.empty(Np + 1, dtype=b.dtype)
aprime = xp.empty(Dp + 1, dtype=a.dtype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * b[N - i] *
(wosq) ** (M - i - k) * bw ** i)
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * a[D - i] *
(wosq) ** (M - i - k) * bw ** i)
aprime[Dp - j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
r"""Calculate a digital IIR filter from an analog transfer function by utilizing
the bilinear transform.
Parameters
----------
b : array_like
Coefficients of the numerator polynomial of the analog transfer function in
form of a complex- or real-valued 1d array.
a : array_like
Coefficients of the denominator polynomial of the analog transfer function in
form of a complex- or real-valued 1d array.
fs : float
Sample rate, as ordinary frequency (e.g., hertz). No pre-warping is
done in this function.
Returns
-------
beta : ndarray
Coefficients of the numerator polynomial of the digital transfer function in
form of a complex- or real-valued 1d array.
alpha : ndarray
Coefficients of the denominator polynomial of the digital transfer function in
form of a complex- or real-valued 1d array.
Notes
-----
The parameters :math:`b = [b_0, \ldots, b_Q]` and :math:`a = [a_0, \ldots, a_P]`
are 1d arrays of length :math:`Q+1` and :math:`P+1`. They define the analog
transfer function
.. math::
H_a(s) = \frac{b_0 s^Q + b_1 s^{Q-1} + \cdots + b_Q}{
a_0 s^P + a_1 s^{P-1} + \cdots + a_P}\ .
The bilinear transform [1]_ is applied by substituting
.. math::
s = \kappa \frac{z-1}{z+1}\ , \qquad \kappa := 2 f_s\ ,
into :math:`H_a(s)`, with :math:`f_s` being the sampling rate.
This results in the digital transfer function in the :math:`z`-domain
.. math::
H_d(z) = \frac{b_0 \left(\kappa \frac{z-1}{z+1}\right)^Q +
b_1 \left(\kappa \frac{z-1}{z+1}\right)^{Q-1} +
\cdots + b_Q}{
a_0 \left(\kappa \frac{z-1}{z+1}\right)^P +
a_1 \left(\kappa \frac{z-1}{z+1}\right)^{P-1} +
\cdots + a_P}\ .
This expression can be simplified by multiplying numerator and denominator by
:math:`(z+1)^N`, with :math:`N=\max(P, Q)`. This allows :math:`H_d(z)` to be
reformulated as
.. math::
& & \frac{b_0 \big(\kappa (z-1)\big)^Q (z+1)^{N-Q} +
b_1 \big(\kappa (z-1)\big)^{Q-1} (z+1)^{N-Q+1} +
\cdots + b_Q(z+1)^N}{
a_0 \big(\kappa (z-1)\big)^P (z+1)^{N-P} +
a_1 \big(\kappa (z-1)\big)^{P-1} (z+1)^{N-P+1} +
\cdots + a_P(z+1)^N}\\
&=:& \frac{\beta_0 + \beta_1 z^{-1} + \cdots + \beta_N z^{-N}}{
\alpha_0 + \alpha_1 z^{-1} + \cdots + \alpha_N z^{-N}}\ .
This is the equation implemented to perform the bilinear transform. Note that for
large :math:`f_s`, :math:`\kappa^Q` or :math:`\kappa^P` can cause a numeric
overflow for sufficiently large :math:`P` or :math:`Q`.
References
----------
.. [1] "Bilinear Transform", Wikipedia,
https://en.wikipedia.org/wiki/Bilinear_transform
See Also
--------
lp2lp, lp2hp, lp2bp, lp2bs, bilinear_zpk
Examples
--------
The following example shows the frequency response of an analog bandpass filter and
the corresponding digital filter derived by utilitzing the bilinear transform:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import numpy as np
...
>>> fs = 100 # sampling frequency
>>> om_c = 2 * np.pi * np.array([7, 13]) # corner frequencies
>>> bb_s, aa_s = signal.butter(4, om_c, btype='bandpass', analog=True, output='ba')
>>> bb_z, aa_z = signal.bilinear(bb_s, aa_s, fs)
...
>>> w_z, H_z = signal.freqz(bb_z, aa_z) # frequency response of digitial filter
>>> w_s, H_s = signal.freqs(bb_s, aa_s, worN=w_z*fs) # analog filter response
...
>>> f_z, f_s = w_z * fs / (2*np.pi), w_s / (2*np.pi)
>>> Hz_dB, Hs_dB = (20*np.log10(np.abs(H_).clip(1e-10)) for H_ in (H_z, H_s))
>>> fg0, ax0 = plt.subplots()
>>> ax0.set_title("Frequency Response of 4-th order Bandpass Filter")
>>> ax0.set(xlabel='Frequency $f$ in Hertz', ylabel='Magnitude in dB',
... xlim=[f_z[1], fs/2], ylim=[-200, 2])
>>> ax0.semilogx(f_z, Hz_dB, alpha=.5, label=r'$|H_z(e^{j 2 \pi f})|$')
>>> ax0.semilogx(f_s, Hs_dB, alpha=.5, label=r'$|H_s(j 2 \pi f)|$')
>>> ax0.legend()
>>> ax0.grid(which='both', axis='x')
>>> ax0.grid(which='major', axis='y')
>>> plt.show()
The difference in the higher frequencies shown in the plot is caused by an effect
called "frequency warping". [1]_ describes a method called "pre-warping" to
reduce those deviations.
"""
xp = array_namespace(b, a)
b, a = map(np.asarray, (b, a))
b, a = np.atleast_1d(b), np.atleast_1d(a) # convert scalars, if needed
if not a.ndim == 1:
raise ValueError(f"Parameter a is not a 1d array since {a.shape=}")
if not b.ndim == 1:
raise ValueError(f"Parameter b is not a 1d array since {b.shape=}")
b, a = np.trim_zeros(b, 'f'), np.trim_zeros(a, 'f') # remove leading zeros
fs = _validate_fs(fs, allow_none=False)
# Splitting the factor fs*2 between numerator and denominator reduces the chance of
# numeric overflow for large fs and large N:
fac = np.sqrt(fs*2)
zp1 = np.polynomial.Polynomial((+1, 1)) / fac # Polynomial (z + 1) / fac
zm1 = np.polynomial.Polynomial((-1, 1)) * fac # Polynomial (z - 1) * fac
# Note that NumPy's Polynomial coefficient order is backward compared to a and b.
N = max(len(a), len(b)) - 1
numerator = sum(b_ * zp1**(N-q) * zm1**q for q, b_ in enumerate(b[::-1]))
denominator = sum(a_ * zp1**(N-p) * zm1**p for p, a_ in enumerate(a[::-1]))
return normalize(
xp.asarray(numerator.coef[::-1].copy()),
xp.asarray(denominator.coef[::-1].copy())
)
def _validate_gpass_gstop(gpass, gstop):
if gpass <= 0.0:
raise ValueError("gpass should be larger than 0.0")
elif gstop <= 0.0:
raise ValueError("gstop should be larger than 0.0")
elif gpass > gstop:
raise ValueError("gpass should be smaller than gstop")
def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba',
fs=None):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains, construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba'), pole-zero ('zpk') or second order
sections ('sos') form.
Parameters
----------
wp, ws : float or array like, shape (2,)
Passband and stopband edge frequencies. Possible values are scalars
(for lowpass and highpass filters) or ranges (for bandpass and bandstop
filters).
For digital filters, these are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s).
Note, that for bandpass and bandstop filters passband must lie strictly
inside stopband or vice versa. Also note that the cutoff at the band edges
for IIR filters is defined as half-power, so -3dB, not half-amplitude (-6dB)
like for `scipy.signal.fiwin`.
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
output : {'ba', 'zpk', 'sos'}, optional
Filter form of the output:
- second-order sections (recommended): 'sos'
- numerator/denominator (default) : 'ba'
- pole-zero : 'zpk'
In general the second-order sections ('sos') form is
recommended because inferring the coefficients for the
numerator/denominator form ('ba') suffers from numerical
instabilities. For reasons of backward compatibility the default
form is the numerator/denominator form ('ba'), where the 'b'
and the 'a' in 'ba' refer to the commonly used names of the
coefficients used.
Note: Using the second-order sections form ('sos') is sometimes
associated with additional computational costs: for
data-intense use cases it is therefore recommended to also
investigate the numerator/denominator form ('ba').
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
>>> import numpy as np
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import matplotlib.ticker
>>> wp = 0.2
>>> ws = 0.3
>>> gpass = 1
>>> gstop = 40
>>> system = signal.iirdesign(wp, ws, gpass, gstop)
>>> w, h = signal.freqz(*system)
>>> fig, ax1 = plt.subplots()
>>> ax1.set_title('Digital filter frequency response')
>>> ax1.plot(w, 20 * np.log10(abs(h)), 'b')
>>> ax1.set_ylabel('Amplitude [dB]', color='b')
>>> ax1.set_xlabel('Frequency [rad/sample]')
>>> ax1.grid(True)
>>> ax1.set_ylim([-120, 20])
>>> ax2 = ax1.twinx()
>>> phase = np.unwrap(np.angle(h))
>>> ax2.plot(w, phase, 'g')
>>> ax2.set_ylabel('Phase [rad]', color='g')
>>> ax2.grid(True)
>>> ax2.axis('tight')
>>> ax2.set_ylim([-6, 1])
>>> nticks = 8
>>> ax1.yaxis.set_major_locator(matplotlib.ticker.LinearLocator(nticks))
>>> ax2.yaxis.set_major_locator(matplotlib.ticker.LinearLocator(nticks))
"""
xp = array_namespace(wp, ws)
wp, ws = map(xp.asarray, (wp, ws))
try:
ordfunc = filter_dict[ftype][1]
except KeyError as e:
raise ValueError(f"Invalid IIR filter type: {ftype}") from e
except IndexError as e:
raise ValueError(f"{ftype} does not have order selection. "
"Use iirfilter function.") from e
_validate_gpass_gstop(gpass, gstop)
wp = xpx.atleast_nd(wp, ndim=1, xp=xp)
ws = xpx.atleast_nd(ws, ndim=1, xp=xp)
fs = _validate_fs(fs, allow_none=True)
if wp.shape[0] != ws.shape[0] or wp.shape not in [(1,), (2,)]:
raise ValueError("wp and ws must have one or two elements each, and "
f"the same shape, got {wp.shape} and {ws.shape}")
if xp.any(wp <= 0) or xp.any(ws <= 0):
raise ValueError("Values for wp, ws must be greater than 0")
if not analog:
if fs is None:
if xp.any(wp >= 1) or xp.any(ws >= 1):
raise ValueError("Values for wp, ws must be less than 1")
elif xp.any(wp >= fs/2) or xp.any(ws >= fs/2):
raise ValueError("Values for wp, ws must be less than fs/2 "
f"(fs={fs} -> fs/2={fs/2})")
if wp.shape[0] == 2:
if not ((ws[0] < wp[0] and wp[1] < ws[1]) or
(wp[0] < ws[0] and ws[1] < wp[1])):
raise ValueError("Passband must lie strictly inside stopband "
"or vice versa")
band_type = 2 * (wp.shape[0] - 1)
band_type += 1
if wp[0] >= ws[0]:
band_type += 1
btype = {1: 'lowpass', 2: 'highpass',
3: 'bandstop', 4: 'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog, fs=fs)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype,
ftype=ftype, output=output, fs=fs)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False,
ftype='butter', output='ba', fs=None):
"""
IIR digital and analog filter design given order and critical points.
Design an Nth-order digital or analog filter and return the filter
coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For digital filters, `Wn` are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g., rad/s).
When Wn is a length-2 sequence, ``Wn[0]`` must be less than ``Wn[1]``.
rp : float, optional
For Chebyshev and elliptic filters, provides the maximum ripple
in the passband. (dB)
rs : float, optional
For Chebyshev and elliptic filters, provides the minimum attenuation
in the stop band. (dB)
btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
The type of filter. Default is 'bandpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Filter form of the output:
- second-order sections (recommended): 'sos'
- numerator/denominator (default) : 'ba'
- pole-zero : 'zpk'
In general the second-order sections ('sos') form is
recommended because inferring the coefficients for the
numerator/denominator form ('ba') suffers from numerical
instabilities. For reasons of backward compatibility the default
form is the numerator/denominator form ('ba'), where the 'b'
and the 'a' in 'ba' refer to the commonly used names of the
coefficients used.
Note: Using the second-order sections form ('sos') is sometimes
associated with additional computational costs: for
data-intense use cases it is therefore recommended to also
investigate the numerator/denominator form ('ba').
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirdesign : General filter design using passband and stopband spec
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
The current behavior is for ``ndarray`` outputs to have 64 bit precision
(``float64`` or ``complex128``) regardless of the dtype of `Wn` but
outputs may respect the dtype of `Wn` in a future version.
Examples
--------
Generate a 17th-order Chebyshev II analog bandpass filter from 50 Hz to
200 Hz and plot the frequency response:
>>> import numpy as np
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.iirfilter(17, [2*np.pi*50, 2*np.pi*200], rs=60,
... btype='band', analog=True, ftype='cheby2')
>>> w, h = signal.freqs(b, a, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.semilogx(w / (2*np.pi), 20 * np.log10(np.maximum(abs(h), 1e-5)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [Hz]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
Create a digital filter with the same properties, in a system with
sampling rate of 2000 Hz, and plot the frequency response. (Second-order
sections implementation is required to ensure stability of a filter of
this order):
>>> sos = signal.iirfilter(17, [50, 200], rs=60, btype='band',
... analog=False, ftype='cheby2', fs=2000,
... output='sos')
>>> w, h = signal.freqz_sos(sos, 2000, fs=2000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.semilogx(w, 20 * np.log10(np.maximum(abs(h), 1e-5)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [Hz]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
"""
xp = array_namespace(Wn)
# For now, outputs will have float64 base dtype regardless of
# the dtype of Wn, so cast to float64 here to ensure 64 bit
# precision for all calculations.
Wn = xp.asarray(Wn, dtype=xp.float64)
fs = _validate_fs(fs, allow_none=True)
ftype, btype, output = (x.lower() for x in (ftype, btype, output))
if fs is not None:
if analog:
raise ValueError("fs cannot be specified for an analog filter")
Wn = Wn / (fs/2)
if xp.any(Wn <= 0):
raise ValueError("filter critical frequencies must be greater than 0")
if xp_size(Wn) > 1 and not Wn[0] < Wn[1]:
raise ValueError("Wn[0] must be less than Wn[1]")
try:
btype = band_dict[btype]
except KeyError as e:
raise ValueError(f"'{btype}' is an invalid bandtype for filter.") from e
try:
typefunc = filter_dict[ftype][0]
except KeyError as e:
raise ValueError(f"'{ftype}' is not a valid basic IIR filter.") from e
if output not in ['ba', 'zpk', 'sos']:
raise ValueError(f"'{output}' is not a valid output form.")
if rp is not None and rp < 0:
raise ValueError("passband ripple (rp) must be positive")
if rs is not None and rs < 0:
raise ValueError("stopband attenuation (rs) must be positive")
# Get analog lowpass prototype
if typefunc == buttap:
z, p, k = typefunc(N, xp=xp)
elif typefunc == besselap:
z, p, k = typefunc(N, norm=bessel_norms[ftype], xp=xp)
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to "
"design a Chebyshev I filter.")
z, p, k = typefunc(N, rp, xp=xp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband attenuation (rs) must be provided to "
"design an Chebyshev II filter.")
z, p, k = typefunc(N, rs, xp=xp)
elif typefunc == ellipap:
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an "
"elliptic filter.")
z, p, k = typefunc(N, rp, rs, xp=xp)
else:
raise NotImplementedError(f"'{ftype}' not implemented in iirfilter.")
# Pre-warp frequencies for digital filter design
if not analog:
if xp.any(Wn <= 0) or xp.any(Wn >= 1):
if fs is not None:
raise ValueError("Digital filter critical frequencies must "
f"be 0 < Wn < fs/2 (fs={fs} -> fs/2={fs/2})")
raise ValueError("Digital filter critical frequencies "
"must be 0 < Wn < 1")
fs = 2.0
warped = 2 * fs * xp.tan(xp.pi * Wn / fs)
else:
warped = Wn
# transform to lowpass, bandpass, highpass, or bandstop
if btype in ('lowpass', 'highpass'):
if xp_size(Wn) != 1:
raise ValueError('Must specify a single critical frequency Wn '
'for lowpass or highpass filter')
if btype == 'lowpass':
z, p, k = lp2lp_zpk(z, p, k, wo=warped)
elif btype == 'highpass':
z, p, k = lp2hp_zpk(z, p, k, wo=warped)
elif btype in ('bandpass', 'bandstop'):
try:
bw = warped[1] - warped[0]
wo = xp.sqrt(warped[0] * warped[1])
except IndexError as e:
raise ValueError('Wn must specify start and stop frequencies for '
'bandpass or bandstop filter') from e
if btype == 'bandpass':
z, p, k = lp2bp_zpk(z, p, k, wo=wo, bw=bw)
elif btype == 'bandstop':
z, p, k = lp2bs_zpk(z, p, k, wo=wo, bw=bw)
else:
raise NotImplementedError(f"'{btype}' not implemented in iirfilter.")
# Find discrete equivalent if necessary
if not analog:
z, p, k = bilinear_zpk(z, p, k, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return z, p, k
elif output == 'ba':
return zpk2tf(z, p, k)
elif output == 'sos':
return zpk2sos(z, p, k, analog=analog)
def _relative_degree(z, p):
"""
Return relative degree of transfer function from zeros and poles
"""
degree = p.shape[0] - z.shape[0]
if degree < 0:
raise ValueError("Improper transfer function. "
"Must have at least as many poles as zeros.")
else:
return degree
def bilinear_zpk(z, p, k, fs):
r"""
Return a digital IIR filter from an analog one using a bilinear transform.
Transform a set of poles and zeros from the analog s-plane to the digital
z-plane using Tustin's method, which substitutes ``2*fs*(z-1) / (z+1)`` for
``s``, maintaining the shape of the frequency response.
Parameters
----------
z : array_like
Zeros of the analog filter transfer function.
p : array_like
Poles of the analog filter transfer function.
k : float
System gain of the analog filter transfer function.
fs : float
Sample rate, as ordinary frequency (e.g., hertz). No prewarping is
done in this function.
Returns
-------
z : ndarray
Zeros of the transformed digital filter transfer function.
p : ndarray
Poles of the transformed digital filter transfer function.
k : float
System gain of the transformed digital filter.
See Also
--------
lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, lp2bs_zpk
bilinear
Notes
-----
.. versionadded:: 1.1.0
Examples
--------
>>> import numpy as np
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> fs = 100
>>> bf = 2 * np.pi * np.array([7, 13])
>>> filts = signal.lti(*signal.butter(4, bf, btype='bandpass', analog=True,
... output='zpk'))
>>> filtz = signal.lti(*signal.bilinear_zpk(filts.zeros, filts.poles,
... filts.gain, fs))
>>> wz, hz = signal.freqz_zpk(filtz.zeros, filtz.poles, filtz.gain)
>>> ws, hs = signal.freqs_zpk(filts.zeros, filts.poles, filts.gain,
... worN=fs*wz)
>>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hz).clip(1e-15)),
... label=r'$|H_z(e^{j \omega})|$')
>>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hs).clip(1e-15)),
... label=r'$|H(j \omega)|$')
>>> plt.legend()
>>> plt.xlabel('Frequency [Hz]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(True)
"""
xp = array_namespace(z, p)
z, p = map(xp.asarray, (z, p))
z = xpx.atleast_nd(z, ndim=1, xp=xp)
p = xpx.atleast_nd(p, ndim=1, xp=xp)
fs = _validate_fs(fs, allow_none=False)
degree = _relative_degree(z, p)
fs2 = 2.0*fs
# Bilinear transform the poles and zeros
z_z = (fs2 + z) / (fs2 - z)
p_z = (fs2 + p) / (fs2 - p)
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z = xp.concat((z_z, -xp.ones(degree)))
# Compensate for gain change
k_z = k * xp.real(xp.prod(fs2 - z) / xp.prod(fs2 - p))
return z_z, p_z, k_z
def lp2lp_zpk(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog filter transfer function.
p : array_like
Poles of the analog filter transfer function.
k : float
System gain of the analog filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g., rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed low-pass filter transfer function.
p : ndarray
Poles of the transformed low-pass filter transfer function.
k : float
System gain of the transformed low-pass filter.
See Also
--------
lp2hp_zpk, lp2bp_zpk, lp2bs_zpk, bilinear
lp2lp
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s}{\omega_0}
.. versionadded:: 1.1.0
Examples
--------
Use the 'zpk' (Zero-Pole-Gain) representation of a lowpass filter to
transform it to a new 'zpk' representation associated with a cutoff frequency wo.
>>> from scipy.signal import lp2lp_zpk
>>> z = [7, 2]
>>> p = [5, 13]
>>> k = 0.8
>>> wo = 0.4
>>> lp2lp_zpk(z, p, k, wo)
( array([2.8, 0.8]), array([2. , 5.2]), 0.8)
"""
xp = array_namespace(z, p)
z, p = map(xp.asarray, (z, p))
z = xpx.atleast_nd(z, ndim=1, xp=xp)
p = xpx.atleast_nd(p, ndim=1, xp=xp)
wo = float(wo) # Avoid int wraparound
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = wo * z
p_lp = wo * p
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo**degree
return z_lp, p_lp, k_lp
def lp2hp_zpk(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog filter transfer function.
p : array_like
Poles of the analog filter transfer function.
k : float
System gain of the analog filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g., rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed high-pass filter transfer function.
p : ndarray
Poles of the transformed high-pass filter transfer function.
k : float
System gain of the transformed high-pass filter.
See Also
--------
lp2lp_zpk, lp2bp_zpk, lp2bs_zpk, bilinear
lp2hp
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{\omega_0}{s}
This maintains symmetry of the lowpass and highpass responses on a
logarithmic scale.
.. versionadded:: 1.1.0
Examples
--------
Use the 'zpk' (Zero-Pole-Gain) representation of a lowpass filter to
transform it to a highpass filter with a cutoff frequency wo.
>>> from scipy.signal import lp2hp_zpk
>>> z = [ -2 + 3j , -0.5 - 0.8j ]
>>> p = [ -1 , -4 ]
>>> k = 10
>>> wo = 0.6
>>> lp2hp_zpk(z, p, k, wo)
( array([-0.09230769-0.13846154j, -0.33707865+0.53932584j]),
array([-0.6 , -0.15]),
8.5)
"""
xp = array_namespace(z, p)
z, p = map(xp.asarray, (z, p))
# XXX: no xp_promote here since that breaks TestButter
z = xpx.atleast_nd(z, ndim=1, xp=xp)
p = xpx.atleast_nd(p, ndim=1, xp=xp)
wo = float(wo)
degree = _relative_degree(z, p)
# Invert positions radially about unit circle to convert LPF to HPF
# Scale all points radially from origin to shift cutoff frequency
z_hp = wo / z
p_hp = wo / p
# If lowpass had zeros at infinity, inverting moves them to origin.
z_hp = xp.concat((z_hp, xp.zeros(degree)))
# Cancel out gain change caused by inversion
k_hp = k * xp.real(xp.prod(-z) / xp.prod(-p))
return z_hp, p_hp, k_hp
def lp2bp_zpk(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog filter transfer function.
p : array_like
Poles of the analog filter transfer function.
k : float
System gain of the analog filter transfer function.
wo : float
Desired passband center, as angular frequency (e.g., rad/s).
Defaults to no change.
bw : float
Desired passband width, as angular frequency (e.g., rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-pass filter transfer function.
p : ndarray
Poles of the transformed band-pass filter transfer function.
k : float
System gain of the transformed band-pass filter.
See Also
--------
lp2lp_zpk, lp2hp_zpk, lp2bs_zpk, bilinear
lp2bp
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
This is the "wideband" transformation, producing a passband with
geometric (log frequency) symmetry about `wo`.
.. versionadded:: 1.1.0
Examples
--------
Use the 'zpk' (Zero-Pole-Gain) representation of a lowpass filter to
transform it to a bandpass filter with a center frequency wo and
bandwidth bw.
>>> from scipy.signal import lp2bp_zpk
>>> z = [ 5 + 2j , 5 - 2j ]
>>> p = [ 7 , -16 ]
>>> k = 0.8
>>> wo = 0.62
>>> bw = 15
>>> lp2bp_zpk(z, p, k, wo, bw)
( array([7.49955815e+01+3.00017676e+01j, 7.49955815e+01-3.00017676e+01j,
4.41850748e-03-1.76761126e-03j, 4.41850748e-03+1.76761126e-03j]),
array([1.04996339e+02+0.j, -1.60167736e-03+0.j, 3.66108003e-03+0.j,
-2.39998398e+02+0.j]), 0.8)
"""
xp = array_namespace(z, p)
z, p = map(xp.asarray, (z, p))
z, p = xp_promote(z, p, force_floating=True, xp=xp)
z = xpx.atleast_nd(z, ndim=1, xp=xp)
p = xpx.atleast_nd(p, ndim=1, xp=xp)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Scale poles and zeros to desired bandwidth
z_lp = z * bw/2
p_lp = p * bw/2
# Square root needs to produce complex result, not NaN
z_lp = xp.astype(z_lp, xp.complex128)
p_lp = xp.astype(p_lp, xp.complex128)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bp = xp.concat((z_lp + xp.sqrt(z_lp**2 - wo**2),
z_lp - xp.sqrt(z_lp**2 - wo**2)))
p_bp = xp.concat((p_lp + xp.sqrt(p_lp**2 - wo**2),
p_lp - xp.sqrt(p_lp**2 - wo**2)))
# Move degree zeros to origin, leaving degree zeros at infinity for BPF
z_bp = xp.concat((z_bp, xp.zeros(degree)))
# Cancel out gain change from frequency scaling
k_bp = k * bw**degree
return z_bp, p_bp, k_bp
def lp2bs_zpk(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
stopband width `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog filter transfer function.
p : array_like
Poles of the analog filter transfer function.
k : float
System gain of the analog filter transfer function.
wo : float
Desired stopband center, as angular frequency (e.g., rad/s).
Defaults to no change.
bw : float
Desired stopband width, as angular frequency (e.g., rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-stop filter transfer function.
p : ndarray
Poles of the transformed band-stop filter transfer function.
k : float
System gain of the transformed band-stop filter.
See Also
--------
lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, bilinear
lp2bs
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2}
This is the "wideband" transformation, producing a stopband with
geometric (log frequency) symmetry about `wo`.
.. versionadded:: 1.1.0
Examples
--------
Transform a low-pass filter represented in 'zpk' (Zero-Pole-Gain) form
into a bandstop filter represented in 'zpk' form, with a center frequency wo and
bandwidth bw.
>>> from scipy.signal import lp2bs_zpk
>>> z = [ ]
>>> p = [ 0.7 , -1 ]
>>> k = 9
>>> wo = 0.5
>>> bw = 10
>>> lp2bs_zpk(z, p, k, wo, bw)
( array([0.+0.5j, 0.+0.5j, 0.-0.5j, 0.-0.5j]),
array([14.2681928 +0.j, -0.02506281+0.j, 0.01752149+0.j, -9.97493719+0.j]),
-12.857142857142858)
"""
xp = array_namespace(z, p)
z, p = map(xp.asarray, (z, p))
z, p = xp_promote(z, p, force_floating=True, xp=xp)
z = xpx.atleast_nd(z, ndim=1, xp=xp)
p = xpx.atleast_nd(p, ndim=1, xp=xp)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Invert to a highpass filter with desired bandwidth
z_hp = (bw/2) / z
p_hp = (bw/2) / p
# Square root needs to produce complex result, not NaN
z_hp = xp.astype(z_hp, xp.complex128)
p_hp = xp.astype(p_hp, xp.complex128)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bs = xp.concat((z_hp + xp.sqrt(z_hp**2 - wo**2),
z_hp - xp.sqrt(z_hp**2 - wo**2)))
p_bs = xp.concat((p_hp + xp.sqrt(p_hp**2 - wo**2),
p_hp - xp.sqrt(p_hp**2 - wo**2)))
# Move any zeros that were at infinity to the center of the stopband
z_bs = xp.concat((z_bs, xp.full(degree, +1j*wo)))
z_bs = xp.concat((z_bs, xp.full(degree, -1j*wo)))
# Cancel out gain change caused by inversion
k_bs = k * xp.real(xp.prod(-z) / xp.prod(-p))
return z_bs, p_bs, k_bs
def butter(N, Wn, btype='low', analog=False, output='ba', fs=None):
"""
Butterworth digital and analog filter design.
Design an Nth-order digital or analog Butterworth filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter. For 'bandpass' and 'bandstop' filters,
the resulting order of the final second-order sections ('sos')
matrix is ``2*N``, with `N` the number of biquad sections
of the desired system.
Wn : array_like
The critical frequency or frequencies. For lowpass and highpass
filters, Wn is a scalar; for bandpass and bandstop filters,
Wn is a length-2 sequence.
For a Butterworth filter, this is the point at which the gain
drops to 1/sqrt(2) that of the passband (the "-3 dB point").
For digital filters, if `fs` is not specified, `Wn` units are
normalized from 0 to 1, where 1 is the Nyquist frequency (`Wn` is
thus in half cycles / sample and defined as 2*critical frequencies
/ `fs`). If `fs` is specified, `Wn` is in the same units as `fs`.
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba' for backwards
compatibility, but 'sos' should be used for general-purpose filtering.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output='sos'``.
See Also
--------
buttord, buttap
Notes
-----
The Butterworth filter has maximally flat frequency response in the
passband.
The ``'sos'`` output parameter was added in 0.16.0.
If the transfer function form ``[b, a]`` is requested, numerical
problems can occur since the conversion between roots and
the polynomial coefficients is a numerically sensitive operation,
even for N >= 4. It is recommended to work with the SOS
representation.
.. warning::
Designing high-order and narrowband IIR filters in TF form can
result in unstable or incorrect filtering due to floating point
numerical precision issues. Consider inspecting output filter
characteristics `freqz` or designing the filters with second-order
sections via ``output='sos'``.
The current behavior is for ``ndarray`` outputs to have 64 bit precision
(``float64`` or ``complex128``) regardless of the dtype of `Wn` but
outputs may respect the dtype of `Wn` in a future version.
Examples
--------
Design an analog filter and plot its frequency response, showing the
critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth filter frequency response')
>>> plt.xlabel('Frequency [rad/s]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz
>>> t = np.linspace(0, 1, 1000, False) # 1 second
>>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t)
>>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
>>> ax1.plot(t, sig)
>>> ax1.set_title('10 Hz and 20 Hz sinusoids')
>>> ax1.axis([0, 1, -2, 2])
Design a digital high-pass filter at 15 Hz to remove the 10 Hz tone, and
apply it to the signal. (It's recommended to use second-order sections
format when filtering, to avoid numerical error with transfer function
(``ba``) format):
>>> sos = signal.butter(10, 15, 'hp', fs=1000, output='sos')
>>> filtered = signal.sosfilt(sos, sig)
>>> ax2.plot(t, filtered)
>>> ax2.set_title('After 15 Hz high-pass filter')
>>> ax2.axis([0, 1, -2, 2])
>>> ax2.set_xlabel('Time [s]')
>>> plt.tight_layout()
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='butter', fs=fs)
def cheby1(N, rp, Wn, btype='low', analog=False, output='ba', fs=None):
"""
Chebyshev type I digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type I filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type I filters, this is the point in the transition band at which
the gain first drops below -`rp`.
For digital filters, `Wn` are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g., rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba' for backwards
compatibility, but 'sos' should be used for general-purpose filtering.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output='sos'``.
See Also
--------
cheb1ord, cheb1ap
Notes
-----
The Chebyshev type I filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the passband and increased ringing in the step response.
Type I filters roll off faster than Type II (`cheby2`), but Type II
filters do not have any ripple in the passband.
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
The current behavior is for ``ndarray`` outputs to have 64 bit precision
(``float64`` or ``complex128``) regardless of the dtype of `Wn` but
outputs may respect the dtype of `Wn` in a future version.
Examples
--------
Design an analog filter and plot its frequency response, showing the
critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type I frequency response (rp=5)')
>>> plt.xlabel('Frequency [rad/s]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz
>>> t = np.linspace(0, 1, 1000, False) # 1 second
>>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t)
>>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
>>> ax1.plot(t, sig)
>>> ax1.set_title('10 Hz and 20 Hz sinusoids')
>>> ax1.axis([0, 1, -2, 2])
Design a digital high-pass filter at 15 Hz to remove the 10 Hz tone, and
apply it to the signal. (It's recommended to use second-order sections
format when filtering, to avoid numerical error with transfer function
(``ba``) format):
>>> sos = signal.cheby1(10, 1, 15, 'hp', fs=1000, output='sos')
>>> filtered = signal.sosfilt(sos, sig)
>>> ax2.plot(t, filtered)
>>> ax2.set_title('After 15 Hz high-pass filter')
>>> ax2.axis([0, 1, -2, 2])
>>> ax2.set_xlabel('Time [s]')
>>> plt.tight_layout()
>>> plt.show()
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog,
output=output, ftype='cheby1', fs=fs)
def cheby2(N, rs, Wn, btype='low', analog=False, output='ba', fs=None):
"""
Chebyshev type II digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type II filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type II filters, this is the point in the transition band at which
the gain first reaches -`rs`.
For digital filters, `Wn` are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g., rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba' for backwards
compatibility, but 'sos' should be used for general-purpose filtering.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output='sos'``.
See Also
--------
cheb2ord, cheb2ap
Notes
-----
The Chebyshev type II filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the stopband and increased ringing in the step response.
Type II filters do not roll off as fast as Type I (`cheby1`).
The ``'sos'`` output parameter was added in 0.16.0.
The current behavior is for ``ndarray`` outputs to have 64 bit precision
(``float64`` or ``complex128``) regardless of the dtype of `Wn` but
outputs may respect the dtype of `Wn` in a future version.
Examples
--------
Design an analog filter and plot its frequency response, showing the
critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type II frequency response (rs=40)')
>>> plt.xlabel('Frequency [rad/s]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.show()
Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz
>>> t = np.linspace(0, 1, 1000, False) # 1 second
>>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t)
>>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
>>> ax1.plot(t, sig)
>>> ax1.set_title('10 Hz and 20 Hz sinusoids')
>>> ax1.axis([0, 1, -2, 2])
Design a digital high-pass filter at 17 Hz to remove the 10 Hz tone, and
apply it to the signal. (It's recommended to use second-order sections
format when filtering, to avoid numerical error with transfer function
(``ba``) format):
>>> sos = signal.cheby2(12, 20, 17, 'hp', fs=1000, output='sos')
>>> filtered = signal.sosfilt(sos, sig)
>>> ax2.plot(t, filtered)
>>> ax2.set_title('After 17 Hz high-pass filter')
>>> ax2.axis([0, 1, -2, 2])
>>> ax2.set_xlabel('Time [s]')
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog,
output=output, ftype='cheby2', fs=fs)
def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba', fs=None):
"""
Elliptic (Cauer) digital and analog filter design.
Design an Nth-order digital or analog elliptic filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For elliptic filters, this is the point in the transition band at
which the gain first drops below -`rp`.
For digital filters, `Wn` are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g., rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba' for backwards
compatibility, but 'sos' should be used for general-purpose filtering.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output='sos'``.
See Also
--------
ellipord, ellipap
Notes
-----
Also known as Cauer or Zolotarev filters, the elliptical filter maximizes
the rate of transition between the frequency response's passband and
stopband, at the expense of ripple in both, and increased ringing in the
step response.
As `rp` approaches 0, the elliptical filter becomes a Chebyshev
type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev
type I filter (`cheby1`). As both approach 0, it becomes a Butterworth
filter (`butter`).
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
The current behavior is for ``ndarray`` outputs to have 64 bit precision
(``float64`` or ``complex128``) regardless of the dtype of `Wn` but
outputs may respect the dtype of `Wn` in a future version.
Examples
--------
Design an analog filter and plot its frequency response, showing the
critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptic filter frequency response (rp=5, rs=40)')
>>> plt.xlabel('Frequency [rad/s]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz
>>> t = np.linspace(0, 1, 1000, False) # 1 second
>>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t)
>>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
>>> ax1.plot(t, sig)
>>> ax1.set_title('10 Hz and 20 Hz sinusoids')
>>> ax1.axis([0, 1, -2, 2])
Design a digital high-pass filter at 17 Hz to remove the 10 Hz tone, and
apply it to the signal. (It's recommended to use second-order sections
format when filtering, to avoid numerical error with transfer function
(``ba``) format):
>>> sos = signal.ellip(8, 1, 100, 17, 'hp', fs=1000, output='sos')
>>> filtered = signal.sosfilt(sos, sig)
>>> ax2.plot(t, filtered)
>>> ax2.set_title('After 17 Hz high-pass filter')
>>> ax2.axis([0, 1, -2, 2])
>>> ax2.set_xlabel('Time [s]')
>>> plt.tight_layout()
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog,
output=output, ftype='elliptic', fs=fs)
def bessel(N, Wn, btype='low', analog=False, output='ba', norm='phase',
fs=None):
"""
Bessel/Thomson digital and analog filter design.
Design an Nth-order digital or analog Bessel filter and return the
filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies (defined
by the `norm` parameter).
For analog filters, `Wn` is an angular frequency (e.g., rad/s).
For digital filters, `Wn` are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`Wn` is thus in
half-cycles / sample.)
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned. (See Notes.)
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
norm : {'phase', 'delay', 'mag'}, optional
Critical frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at angular (e.g. rad/s) frequency `Wn`. This happens for
both low-pass and high-pass filters, so this is the
"phase-matched" case.
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1/`Wn` (e.g., seconds). This is the "natural" type obtained by
solving Bessel polynomials.
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency `Wn`.
.. versionadded:: 0.18.0
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output='sos'``.
Notes
-----
Also known as a Thomson filter, the analog Bessel filter has maximally
flat group delay and maximally linear phase response, with very little
ringing in the step response. [1]_
The Bessel is inherently an analog filter. This function generates digital
Bessel filters using the bilinear transform, which does not preserve the
phase response of the analog filter. As such, it is only approximately
correct at frequencies below about fs/4. To get maximally-flat group
delay at higher frequencies, the analog Bessel filter must be transformed
using phase-preserving techniques.
See `besselap` for implementation details and references.
The ``'sos'`` output parameter was added in 0.16.0.
The current behavior is for ``ndarray`` outputs to have 64 bit precision
(``float64`` or ``complex128``) regardless of the dtype of `Wn` but
outputs may respect the dtype of `Wn` in a future version.
References
----------
.. [1] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
Examples
--------
Plot the phase-normalized frequency response, showing the relationship
to the Butterworth's cutoff frequency (green):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed')
>>> b, a = signal.bessel(4, 100, 'low', analog=True, norm='phase')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.title('Bessel filter magnitude response (with Butterworth)')
>>> plt.xlabel('Frequency [rad/s]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
and the phase midpoint:
>>> plt.figure()
>>> plt.semilogx(w, np.unwrap(np.angle(h)))
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-np.pi, color='red') # phase midpoint
>>> plt.title('Bessel filter phase response')
>>> plt.xlabel('Frequency [rad/s]')
>>> plt.ylabel('Phase [rad]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the magnitude-normalized frequency response, showing the -3 dB cutoff:
>>> b, a = signal.bessel(3, 10, 'low', analog=True, norm='mag')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.axhline(-3, color='red') # -3 dB magnitude
>>> plt.axvline(10, color='green') # cutoff frequency
>>> plt.title('Amplitude-normalized Bessel filter frequency response')
>>> plt.xlabel('Frequency [rad/s]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the delay-normalized filter, showing the maximally-flat group delay
at 0.1 seconds:
>>> b, a = signal.bessel(5, 1/0.1, 'low', analog=True, norm='delay')
>>> w, h = signal.freqs(b, a)
>>> plt.figure()
>>> plt.semilogx(w[1:], -np.diff(np.unwrap(np.angle(h)))/np.diff(w))
>>> plt.axhline(0.1, color='red') # 0.1 seconds group delay
>>> plt.title('Bessel filter group delay')
>>> plt.xlabel('Frequency [rad/s]')
>>> plt.ylabel('Group delay [s]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='bessel_'+norm, fs=fs)
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""
Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp : scalar
Edge of passband `passb`.
ind : int, {0, 1}
Index specifying which `passb` edge to vary (0 or 1).
passb : ndarray
Two element sequence of fixed passband edges.
stopb : ndarray
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : {'butter', 'cheby', 'ellip'}
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
Notes
-----
Band-stop filters are used in applications where certain frequency
components need to be blocked while others are allowed; for instance,
removing noise at specific frequencies while allowing the desired signal
to pass through. The order of a filter often determines its complexity and
accuracy. Determining the right order can be a challenge. This function
aims to provide an appropriate order for an analog band stop filter.
Examples
--------
>>> import numpy as np
>>> from scipy.signal import band_stop_obj
>>> wp = 2
>>> ind = 1
>>> passb = np.array([1, 3])
>>> stopb = np.array([0.5, 4])
>>> gstop = 30
>>> gpass = 3
>>> filter_type = 'butter'
>>> band_stop_obj(wp, ind, passb, stopb, gpass, gstop, filter_type)
np.float64(-2.758504160760643)
"""
_validate_gpass_gstop(gpass, gstop)
passbC = passb.copy()
passbC[ind] = wp
nat = (stopb * (passbC[0] - passbC[1]) /
(stopb ** 2 - passbC[0] * passbC[1]))
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = (np.log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * np.log10(nat)))
elif type == 'cheby':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = np.arccosh(np.sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / np.arccosh(nat)
elif type == 'ellip':
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = np.sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
n = (d0[0] * d1[1] / (d0[1] * d1[0]))
else:
raise ValueError(f"Incorrect type: {type}")
return n
def _pre_warp(wp, ws, analog, *, xp):
# Pre-warp frequencies for digital filter design
if not analog:
passb = xp.tan(xp.pi * wp / 2.0)
stopb = xp.tan(xp.pi * ws / 2.0)
else:
passb, stopb = wp, ws
return passb, stopb
def _validate_wp_ws(wp, ws, fs, analog, *, xp):
wp = xpx.atleast_nd(wp, ndim=1, xp=xp)
ws = xpx.atleast_nd(ws, ndim=1, xp=xp)
wp, ws = xp_promote(wp, ws, force_floating=True, xp=xp)
if fs is not None:
if analog:
raise ValueError("fs cannot be specified for an analog filter")
wp = 2 * wp / fs
ws = 2 * ws / fs
filter_type = 2 * (wp.shape[0] - 1) + 1
if wp[0] >= ws[0]:
filter_type += 1
return wp, ws, filter_type
def _find_nat_freq(stopb, passb, gpass, gstop, filter_type, filter_kind, *, xp):
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
passb, stopb = np.asarray(passb), np.asarray(stopb) # XXX fminbound array API
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop,
filter_kind),
disp=0)
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop,
filter_kind),
disp=0)
passb = [float(wp0), float(wp1)]
passb, stopb = xp.asarray(passb), xp.asarray(stopb)
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
else:
raise ValueError(f"should not happen: {filter_type =}.")
nat = xp.min(xp.abs(nat))
return nat, passb
def _postprocess_wn(WN, analog, fs, *, xp):
wn = WN if analog else xp.atan(WN) * 2.0 / xp.pi
if wn.shape[0] == 1:
wn = wn[0]
if fs is not None:
wn = wn * fs / 2
return wn
def buttord(wp, ws, gpass, gstop, analog=False, fs=None):
"""Butterworth filter order selection.
Return the order of the lowest order digital or analog Butterworth filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float or array-like
Passband and stopband edge frequencies.
For digital filters, these are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results. If `fs` is specified,
this is in the same units, and `fs` must also be passed to `butter`.
See Also
--------
butter : Filter design using order and critical points
cheb1ord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog bandpass filter with passband within 3 dB from 20 to
50 rad/s, while rejecting at least -40 dB below 14 and above 60 rad/s.
Plot its frequency response, showing the passband and stopband
constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True)
>>> b, a = signal.butter(N, Wn, 'band', True)
>>> w, h = signal.freqs(b, a, np.logspace(1, 2, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth bandpass filter fit to constraints')
>>> plt.xlabel('Frequency [rad/s]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([1, 14, 14, 1], [-40, -40, 99, 99], '0.9', lw=0) # stop
>>> plt.fill([20, 20, 50, 50], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop
>>> plt.axis([10, 100, -60, 3])
>>> plt.show()
"""
xp = array_namespace(wp, ws)
wp, ws = map(xp.asarray, (wp, ws))
_validate_gpass_gstop(gpass, gstop)
fs = _validate_fs(fs, allow_none=True)
wp, ws, filter_type = _validate_wp_ws(wp, ws, fs, analog, xp=xp)
passb, stopb = _pre_warp(wp, ws, analog, xp=xp)
nat, passb = _find_nat_freq(
stopb, passb, gpass, gstop, filter_type, 'butter', xp=xp
)
GSTOP = 10 ** (0.1 * builtins.abs(gstop))
GPASS = 10 ** (0.1 * builtins.abs(gpass))
ord = int(
math.ceil(math.log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * math.log10(nat)))
)
# Find the Butterworth natural frequency WN (or the "3dB" frequency")
# to give exactly gpass at passb.
try:
W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord))
except ZeroDivisionError:
W0 = 1.0
warnings.warn("Order is zero...check input parameters.",
RuntimeWarning, stacklevel=2)
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0 * passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
discr = xp.sqrt((passb[1] - passb[0]) ** 2 +
4 * W0 ** 2 * passb[0] * passb[1])
WN0 = ((passb[1] - passb[0]) + discr) / (2 * W0)
WN1 = ((passb[1] - passb[0]) - discr) / (2 * W0)
WN = xp.asarray([float(WN0), float(WN1)])
WN = xp.sort(xp.abs(WN))
elif filter_type == 4: # pass
W0 = xp.asarray([-W0, W0], dtype=xp.float64)
WN = (-W0 * (passb[1] - passb[0]) / 2.0 +
xp.sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 +
passb[0] * passb[1]))
WN = xp.sort(xp.abs(WN))
else:
raise ValueError(f"Bad type: {filter_type}")
wn = _postprocess_wn(WN, analog, fs, xp=xp)
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=False, fs=None):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type I
filter that loses no more than `gpass` dB in the passband and has at
least `gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results. If `fs` is specified,
this is in the same units, and `fs` must also be passed to `cheby1`.
See Also
--------
cheby1 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital lowpass filter such that the passband is within 3 dB up
to 0.2*(fs/2), while rejecting at least -40 dB above 0.3*(fs/2). Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> N, Wn = signal.cheb1ord(0.2, 0.3, 3, 40)
>>> b, a = signal.cheby1(N, 3, Wn, 'low')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev I lowpass filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, 0.2, 0.2, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([0.3, 0.3, 2, 2], [ 9, -40, -40, 9], '0.9', lw=0) # pass
>>> plt.axis([0.08, 1, -60, 3])
>>> plt.show()
"""
xp = array_namespace(wp, ws)
wp, ws = map(xp.asarray, (wp, ws))
fs = _validate_fs(fs, allow_none=True)
_validate_gpass_gstop(gpass, gstop)
wp, ws, filter_type = _validate_wp_ws(wp, ws, fs, analog, xp=xp)
passb, stopb = _pre_warp(wp, ws, analog, xp=xp)
nat, passb = _find_nat_freq(stopb, passb, gpass, gstop, filter_type, 'cheby', xp=xp)
GSTOP = 10 ** (0.1 * builtins.abs(gstop))
GPASS = 10 ** (0.1 * builtins.abs(gpass))
v_pass_stop = math.acosh(math.sqrt((GSTOP - 1.0) / (GPASS - 1.0)))
ord = int(xp.ceil(v_pass_stop / xp.acosh(nat)))
# Natural frequencies are just the passband edges
wn = _postprocess_wn(passb, analog, fs, xp=xp)
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=False, fs=None):
"""Chebyshev type II filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type II
filter that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results. If `fs` is specified,
this is in the same units, and `fs` must also be passed to `cheby2`.
See Also
--------
cheby2 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital bandstop filter which rejects -60 dB from 0.2*(fs/2) to
0.5*(fs/2), while staying within 3 dB below 0.1*(fs/2) or above
0.6*(fs/2). Plot its frequency response, showing the passband and
stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> N, Wn = signal.cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60)
>>> b, a = signal.cheby2(N, 60, Wn, 'stop')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev II bandstop filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, .1, .1, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([.2, .2, .5, .5], [ 9, -60, -60, 9], '0.9', lw=0) # pass
>>> plt.fill([.6, .6, 2, 2], [-99, -3, -3, -99], '0.9', lw=0) # stop
>>> plt.axis([0.06, 1, -80, 3])
>>> plt.show()
"""
xp = array_namespace(wp, ws)
wp, ws = map(xp.asarray, (wp, ws))
fs = _validate_fs(fs, allow_none=True)
_validate_gpass_gstop(gpass, gstop)
wp, ws, filter_type = _validate_wp_ws(wp, ws, fs, analog, xp=xp)
passb, stopb = _pre_warp(wp, ws, analog, xp=xp)
nat, passb = _find_nat_freq(stopb, passb, gpass, gstop, filter_type, 'cheby', xp=xp)
GSTOP = 10 ** (0.1 * builtins.abs(gstop))
GPASS = 10 ** (0.1 * builtins.abs(gpass))
v_pass_stop = math.acosh(math.sqrt((GSTOP - 1.0) / (GPASS - 1.0)))
ord = int(xp.ceil(v_pass_stop / xp.acosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = math.cosh(1.0 / ord * v_pass_stop)
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat0 = (new_freq / 2.0 * (passb[0] - passb[1]) +
math.sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 +
passb[1] * passb[0]))
nat1 = passb[1] * passb[0] / nat0
nat = xp.asarray([float(nat0), float(nat1)])
elif filter_type == 4:
nat0 = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) +
math.sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) +
passb[1] * passb[0]))
nat1 = passb[0] * passb[1] / nat0
nat = xp.asarray([float(nat0), float(nat1)])
wn = _postprocess_wn(nat, analog, fs, xp=xp)
return ord, wn
_POW10_LOG10 = math.log(10)
def _pow10m1(x):
"""10 ** x - 1 for x near 0"""
return math.expm1(_POW10_LOG10 * x)
def ellipord(wp, ws, gpass, gstop, analog=False, fs=None):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital or analog elliptic filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are in the same units as `fs`. By default,
`fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
where 1 is the Nyquist frequency. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
Returns
-------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results. If `fs` is specified,
this is in the same units, and `fs` must also be passed to `ellip`.
See Also
--------
ellip : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog highpass filter such that the passband is within 3 dB
above 30 rad/s, while rejecting -60 dB at 10 rad/s. Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> N, Wn = signal.ellipord(30, 10, 3, 60, True)
>>> b, a = signal.ellip(N, 3, 60, Wn, 'high', True)
>>> w, h = signal.freqs(b, a, np.logspace(0, 3, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptical highpass filter fit to constraints')
>>> plt.xlabel('Frequency [rad/s]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.1, 10, 10, .1], [1e4, 1e4, -60, -60], '0.9', lw=0) # stop
>>> plt.fill([30, 30, 1e9, 1e9], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.axis([1, 300, -80, 3])
>>> plt.show()
"""
xp = array_namespace(wp, ws)
wp, ws = map(xp.asarray, (wp, ws))
fs = _validate_fs(fs, allow_none=True)
_validate_gpass_gstop(gpass, gstop)
wp, ws, filter_type = _validate_wp_ws(wp, ws, fs, analog, xp=xp)
passb, stopb = _pre_warp(wp, ws, analog, xp=xp)
nat, passb = _find_nat_freq(stopb, passb, gpass, gstop, filter_type, 'ellip', xp=xp)
arg1_sq = _pow10m1(0.1 * gpass) / _pow10m1(0.1 * gstop)
arg0 = 1.0 / nat
arg0 = np.asarray(arg0)
d0 = special.ellipk(arg0 ** 2), special.ellipkm1(arg0 ** 2)
d1 = special.ellipk(arg1_sq), special.ellipkm1(arg1_sq)
ord = int(np.ceil(d0[0] * d1[1] / (d0[1] * d1[0])))
wn = _postprocess_wn(passb, analog, fs, xp=xp)
return ord, wn
def buttap(N, *, xp=None, device=None):
"""Return (z,p,k) for analog prototype of Nth-order Butterworth filter.
The filter will have an angular (e.g., rad/s) cutoff frequency of 1.
Parameters
----------
N : int
The order of the filter
%(xp_device_snippet)s
Returns
-------
z : ndarray[float64]
Zeros of the transfer function. Is always an empty array.
p : ndarray[complex128]
Poles of the transfer function.
k : float
Gain of the transfer function.
See Also
--------
butter : Filter design function using this prototype
"""
if xp is None:
xp = np_compat
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
z = xp.asarray([], device=device, dtype=xp.float64)
m = xp.arange(-N+1, N, 2, device=device, dtype=xp.float64)
# Middle value is 0 to ensure an exactly real pole
p = -xp.exp(1j * xp.pi * m / (2 * N))
k = 1.0
return z, p, k
def cheb1ap(N, rp, *, xp=None, device=None):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rp` decibels of ripple in the passband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
Parameters
----------
N : int
The order of the filter
rp: float
The ripple intensity
%(xp_device_snippet)s
Returns
-------
z : ndarray[float64]
Zeros of the transfer function. Is always an empty array.
p : ndarray[complex128]
Poles of the transfer function.
k : float
Gain of the transfer function.
See Also
--------
cheby1 : Filter design function using this prototype
"""
if xp is None:
xp = np_compat
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero error
# Even order filters have DC gain of -rp dB
return (
xp.asarray([], device=device, dtype=xp.float64),
xp.asarray([], device=device, dtype=xp.complex128), 10**(-rp/20)
)
z = xp.asarray([], device=device, dtype=xp.float64)
# Ripple factor (epsilon)
eps = math.sqrt(10 ** (0.1 * rp) - 1.0)
mu = 1.0 / N * math.asinh(1 / eps)
# Arrange poles in an ellipse on the left half of the S-plane
m = xp.arange(-N+1, N, 2, dtype=xp.float64, device=device)
theta = xp.pi * m / (2*N)
p = -xp.sinh(mu + 1j*theta)
k = xp.real(xp.prod(-p, axis=0))
if N % 2 == 0:
k = k / math.sqrt(1 + eps * eps)
return z, p, k
def cheb2ap(N, rs, *, xp=None, device=None):
"""
Return (z,p,k) for Nth-order Chebyshev type II analog lowpass filter.
The returned filter prototype has attenuation of at least ``rs`` decibels
in the stopband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the attenuation first reaches ``rs``.
Parameters
----------
N : int
The order of the filter
rs : float
The attenuation in the stopband
%(xp_device_snippet)s
Returns
-------
z : ndarray[complex128]
Zeros of the transfer function.
p : ndarray[complex128]
Poles of the transfer function.
k : float
Gain of the transfer function.
See Also
--------
cheby2 : Filter design function using this prototype
"""
if xp is None:
xp = np_compat
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
return (
xp.asarray([], device=device, dtype=xp.complex128),
xp.asarray([], device=device, dtype=xp.complex128),
1.0
)
# Ripple factor (epsilon)
de = 1.0 / math.sqrt(10 ** (0.1 * rs) - 1)
mu = math.asinh(1.0 / de) / N
if N % 2:
m = xp.concat(
(xp.arange(-N + 1, 0, 2, dtype=xp.float64, device=device),
xp.arange(2, N, 2, dtype=xp.float64, device=device)
)
)
else:
m = xp.arange(-N+1, N, 2, dtype=xp.float64, device=device)
z = 1j / xp.sin(m * xp.pi / (2 * N))
# Poles around the unit circle like Butterworth
m1 = xp.arange(-N+1, N, 2, dtype=xp.float64, device=device)
theta1 = xp.pi * m1 / (2 * N)
p = -1 / xp.sinh(mu + 1j*theta1)
k = xp.real(xp.prod(-p, axis=0) / xp.prod(-z, axis=0))
return z, p, k
EPSILON = 2e-16
# number of terms in solving degree equation
_ELLIPDEG_MMAX = 7
def _ellipdeg(n, m1):
"""Solve degree equation using nomes
Given n, m1, solve
n * K(m) / K'(m) = K1(m1) / K1'(m1)
for m
See [1], Eq. (49)
References
----------
.. [1] Orfanidis, "Lecture Notes on Elliptic Filter Design",
https://www.ece.rutgers.edu/~orfanidi/ece521/notes.pdf
"""
K1 = special.ellipk(m1)
K1p = special.ellipkm1(m1)
q1 = np.exp(-np.pi * K1p / K1)
q = q1 ** (1/n)
mnum = np.arange(_ELLIPDEG_MMAX + 1)
mden = np.arange(1, _ELLIPDEG_MMAX + 2)
num = np.sum(q ** (mnum * (mnum+1)))
den = 1 + 2 * np.sum(q ** (mden**2))
return 16 * q * (num / den) ** 4
# Maximum number of iterations in Landen transformation recursion
# sequence. 10 is conservative; unit tests pass with 4, Orfanidis
# (see _arc_jac_cn [1]) suggests 5.
_ARC_JAC_SN_MAXITER = 10
def _arc_jac_sn(w, m):
"""Inverse Jacobian elliptic sn
Solve for z in w = sn(z, m)
Parameters
----------
w : complex scalar
argument
m : scalar
modulus; in interval [0, 1]
See [1], Eq. (56)
References
----------
.. [1] Orfanidis, "Lecture Notes on Elliptic Filter Design",
https://www.ece.rutgers.edu/~orfanidi/ece521/notes.pdf
"""
def _complement(kx):
# (1-k**2) ** 0.5; the expression below
# works for small kx
return ((1 - kx) * (1 + kx)) ** 0.5
k = m ** 0.5
if k > 1:
return np.nan
elif k == 1:
return np.arctanh(w)
ks = [k]
niter = 0
while ks[-1] != 0:
k_ = ks[-1]
k_p = _complement(k_)
ks.append((1 - k_p) / (1 + k_p))
niter += 1
if niter > _ARC_JAC_SN_MAXITER:
raise ValueError('Landen transformation not converging')
K = np.prod(1 + np.array(ks[1:])) * np.pi/2
wns = [w]
for kn, knext in zip(ks[:-1], ks[1:]):
wn = wns[-1]
wnext = (2 * wn /
((1 + knext) * (1 + _complement(kn * wn))))
wns.append(wnext)
u = 2 / np.pi * np.arcsin(wns[-1])
z = K * u
return z
def _arc_jac_sc1(w, m):
"""Real inverse Jacobian sc, with complementary modulus
Solve for z in w = sc(z, 1-m)
w - real scalar
m - modulus
From [1], sc(z, m) = -i * sn(i * z, 1 - m)
References
----------
# noqa: E501
.. [1] https://functions.wolfram.com/EllipticFunctions/JacobiSC/introductions/JacobiPQs/ShowAll.html,
"Representations through other Jacobi functions"
"""
zcomplex = _arc_jac_sn(1j * w, m)
if abs(zcomplex.real) > 1e-14:
raise ValueError
return zcomplex.imag
def ellipap(N, rp, rs, *, xp=None, device=None):
"""Return (z,p,k) of Nth-order elliptic analog lowpass filter.
The filter is a normalized prototype that has `rp` decibels of ripple
in the passband and a stopband `rs` decibels down.
The filter's angular (e.g., rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
Parameters
----------
N : int
The order of the filter
rp : float
The passband ripple intensity
rs : float
The stopband attenuation
%(xp_device_snippet)s
Returns
-------
z : ndarray[complex128]
Zeros of the transfer function.
p : ndarray[complex128]
Poles of the transfer function.
k : float
Gain of the transfer function.
See Also
--------
ellip : Filter design function using this prototype
References
----------
.. [1] Lutovac, Tosic, and Evans, "Filter Design for Signal Processing",
Chapters 5 and 12.
.. [2] Orfanidis, "Lecture Notes on Elliptic Filter Design",
https://www.ece.rutgers.edu/~orfanidi/ece521/notes.pdf
"""
if xp is None:
xp = np_compat
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
# Even order filters have DC gain of -rp dB
return (
xp.asarray([], device=device, dtype=xp.complex128),
xp.asarray([], device=device, dtype=xp.complex128),
10**(-rp/20)
)
elif N == 1:
p = -math.sqrt(1.0 / _pow10m1(0.1 * rp))
k = -p
z = []
return (
xp.asarray(z, device=device, dtype=xp.complex128),
xp.asarray(p, device=device, dtype=xp.complex128), k
)
eps_sq = _pow10m1(0.1 * rp)
eps = math.sqrt(eps_sq)
ck1_sq = eps_sq / _pow10m1(0.1 * rs)
if ck1_sq == 0:
raise ValueError("Cannot design a filter with given rp and rs"
" specifications.")
# do computations with numpy, xp.asarray the return values
val = special.ellipk(ck1_sq), special.ellipkm1(ck1_sq)
m = _ellipdeg(N, ck1_sq)
capk = special.ellipk(m)
j = np.arange(1 - N % 2, N, 2)
jj = len(j)
[s, c, d, phi] = special.ellipj(j * capk / N, m * np.ones(jj))
snew = np.compress(abs(s) > EPSILON, s, axis=-1)
z = 1.0 / (np.sqrt(m) * snew)
z = 1j * z
z = np.concatenate((z, np.conjugate(z)))
r = _arc_jac_sc1(1. / eps, ck1_sq)
v0 = capk * r / (N * val[0])
[sv, cv, dv, phi] = special.ellipj(v0, 1 - m)
p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0)
if N % 2:
newp = np.compress(
abs(p.imag) > EPSILON * np.sqrt(np.sum(p * np.conjugate(p), axis=0).real),
p, axis=-1
)
p = np.concatenate((p, np.conjugate(newp)))
else:
p = np.concatenate((p, np.conjugate(p)))
k = (np.prod(-p, axis=0) / np.prod(-z, axis=0)).real
if N % 2 == 0:
k = k / np.sqrt(1 + eps_sq)
return (
xp.asarray(z, device=device, dtype=xp.complex128),
xp.asarray(p, device=device, dtype=xp.complex128), float(k)
)
# TODO: Make this a real public function scipy.misc.ff
def _falling_factorial(x, n):
r"""
Return the factorial of `x` to the `n` falling.
This is defined as:
.. math:: x^\underline n = (x)_n = x (x-1) \cdots (x-n+1)
This can more efficiently calculate ratios of factorials, since:
n!/m! == falling_factorial(n, n-m)
where n >= m
skipping the factors that cancel out
the usual factorial n! == ff(n, n)
"""
val = 1
for k in range(x - n + 1, x + 1):
val *= k
return val
def _bessel_poly(n, reverse=False):
"""
Return the coefficients of Bessel polynomial of degree `n`
If `reverse` is true, a reverse Bessel polynomial is output.
Output is a list of coefficients:
[1] = 1
[1, 1] = 1*s + 1
[1, 3, 3] = 1*s^2 + 3*s + 3
[1, 6, 15, 15] = 1*s^3 + 6*s^2 + 15*s + 15
[1, 10, 45, 105, 105] = 1*s^4 + 10*s^3 + 45*s^2 + 105*s + 105
etc.
Output is a Python list of arbitrary precision long ints, so n is only
limited by your hardware's memory.
Sequence is http://oeis.org/A001498, and output can be confirmed to
match http://oeis.org/A001498/b001498.txt :
>>> from scipy.signal._filter_design import _bessel_poly
>>> i = 0
>>> for n in range(51):
... for x in _bessel_poly(n, reverse=True):
... print(i, x)
... i += 1
"""
if abs(int(n)) != n:
raise ValueError("Polynomial order must be a nonnegative integer")
else:
n = int(n) # np.int32 doesn't work, for instance
out = []
for k in range(n + 1):
num = _falling_factorial(2*n - k, n)
den = 2**(n - k) * math.factorial(k)
out.append(num // den)
if reverse:
return out[::-1]
else:
return out
def _campos_zeros(n):
"""
Return approximate zero locations of Bessel polynomials y_n(x) for order
`n` using polynomial fit (Campos-Calderon 2011)
"""
if n == 1:
return np.asarray([-1+0j])
s = npp_polyval(n, [0, 0, 2, 0, -3, 1])
b3 = npp_polyval(n, [16, -8]) / s
b2 = npp_polyval(n, [-24, -12, 12]) / s
b1 = npp_polyval(n, [8, 24, -12, -2]) / s
b0 = npp_polyval(n, [0, -6, 0, 5, -1]) / s
r = npp_polyval(n, [0, 0, 2, 1])
a1 = npp_polyval(n, [-6, -6]) / r
a2 = 6 / r
k = np.arange(1, n+1)
x = npp_polyval(k, [0, a1, a2])
y = npp_polyval(k, [b0, b1, b2, b3])
return x + 1j*y
def _aberth(f, fp, x0, tol=1e-15, maxiter=50):
"""
Given a function `f`, its first derivative `fp`, and a set of initial
guesses `x0`, simultaneously find the roots of the polynomial using the
Aberth-Ehrlich method.
``len(x0)`` should equal the number of roots of `f`.
(This is not a complete implementation of Bini's algorithm.)
"""
N = len(x0)
x = np.array(x0, complex)
beta = np.empty_like(x0)
for iteration in range(maxiter):
alpha = -f(x) / fp(x) # Newton's method
# Model "repulsion" between zeros
for k in range(N):
beta[k] = np.sum(1/(x[k] - x[k+1:]))
beta[k] += np.sum(1/(x[k] - x[:k]))
x += alpha / (1 + alpha * beta)
if not all(np.isfinite(x)):
raise RuntimeError('Root-finding calculation failed')
# Mekwi: The iterative process can be stopped when |hn| has become
# less than the largest error one is willing to permit in the root.
if all(abs(alpha) <= tol):
break
else:
raise Exception('Zeros failed to converge')
return x
def _bessel_zeros(N):
"""
Find zeros of ordinary Bessel polynomial of order `N`, by root-finding of
modified Bessel function of the second kind
"""
if N == 0:
return np.asarray([])
# Generate starting points
x0 = _campos_zeros(N)
# Zeros are the same for exp(1/x)*K_{N+0.5}(1/x) and Nth-order ordinary
# Bessel polynomial y_N(x)
def f(x):
return special.kve(N+0.5, 1/x)
# First derivative of above
def fp(x):
return (special.kve(N-0.5, 1/x)/(2*x**2) -
special.kve(N+0.5, 1/x)/(x**2) +
special.kve(N+1.5, 1/x)/(2*x**2))
# Starting points converge to true zeros
x = _aberth(f, fp, x0)
# Improve precision using Newton's method on each
for i in range(len(x)):
x[i] = optimize.newton(f, x[i], fp, tol=1e-15)
# Average complex conjugates to make them exactly symmetrical
x = np.mean((x, x[::-1].conj()), 0)
# Zeros should sum to -1
if abs(np.sum(x) + 1) > 1e-15:
raise RuntimeError('Generated zeros are inaccurate')
return x
def _norm_factor(p, k):
"""
Numerically find frequency shift to apply to delay-normalized filter such
that -3 dB point is at 1 rad/sec.
`p` is an array_like of polynomial poles
`k` is a float gain
First 10 values are listed in "Bessel Scale Factors" table,
"Bessel Filters Polynomials, Poles and Circuit Elements 2003, C. Bond."
"""
p = np.asarray(p, dtype=np.complex128)
def G(w):
"""
Gain of filter
"""
return np.abs(k / np.prod(1j*w - p))
def cutoff(w):
"""
When gain = -3 dB, return 0
"""
return G(w) - 1/math.sqrt(2)
return optimize.newton(cutoff, 1.5)
def besselap(N, norm='phase', *, xp=None, device=None):
"""
Return (z,p,k) for analog prototype of an Nth-order Bessel filter.
Parameters
----------
N : int
The order of the filter.
norm : {'phase', 'delay', 'mag'}, optional
Frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at an angular (e.g., rad/s) cutoff frequency of 1. This
happens for both low-pass and high-pass filters, so this is the
"phase-matched" case. [6]_
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1 (e.g., 1 second). This is the "natural" type obtained by
solving Bessel polynomials
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency 1. This is called "frequency normalization" by
Bond. [1]_
.. versionadded:: 0.18.0
%(xp_device_snippet)s
Returns
-------
z : ndarray[float64]
Zeros of the transfer function. Is always an empty array.
p : ndarray[complex128]
Poles of the transfer function.
k : float
Gain of the transfer function. For phase-normalized, this is always 1.
See Also
--------
bessel : Filter design function using this prototype
Notes
-----
To find the pole locations, approximate starting points are generated [2]_
for the zeros of the ordinary Bessel polynomial [3]_, then the
Aberth-Ehrlich method [4]_ [5]_ is used on the Kv(x) Bessel function to
calculate more accurate zeros, and these locations are then inverted about
the unit circle.
References
----------
.. [1] C.R. Bond, "Bessel Filter Constants",
http://www.crbond.com/papers/bsf.pdf
.. [2] Campos and Calderon, "Approximate closed-form formulas for the
zeros of the Bessel Polynomials", :arXiv:`1105.0957`.
.. [3] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
.. [4] Aberth, "Iteration Methods for Finding all Zeros of a Polynomial
Simultaneously", Mathematics of Computation, Vol. 27, No. 122,
April 1973
.. [5] Ehrlich, "A modified Newton method for polynomials", Communications
of the ACM, Vol. 10, Issue 2, pp. 107-108, Feb. 1967,
:DOI:`10.1145/363067.363115`
.. [6] Miller and Bohn, "A Bessel Filter Crossover, and Its Relation to
Others", RaneNote 147, 1998,
https://www.ranecommercial.com/legacy/note147.html
"""
if xp is None:
xp = np_compat
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
N = int(N) # calculation below doesn't always fit in np.int64
if N == 0:
p = []
k = 1
else:
# Find roots of reverse Bessel polynomial
p = 1/_bessel_zeros(N)
a_last = _falling_factorial(2*N, N) // 2**N
# Shift them to a different normalization if required
if norm in ('delay', 'mag'):
# Normalized for group delay of 1
k = a_last
if norm == 'mag':
# -3 dB magnitude point is at 1 rad/sec
norm_factor = _norm_factor(p, k)
p /= norm_factor
k = norm_factor**-N * a_last
elif norm == 'phase':
# Phase-matched (1/2 max phase shift at 1 rad/sec)
# Asymptotes are same as Butterworth filter
p *= 10**(-math.log10(a_last)/N)
k = 1
else:
raise ValueError('normalization not understood')
z = xp.asarray([], device=device, dtype=xp.float64)
return (
z,
xp.asarray(p, device=device, dtype=xp.complex128),
float(k)
)
def iirnotch(w0, Q, fs=2.0, *, xp=None, device=None):
"""
Design second-order IIR notch digital filter.
A notch filter is a band-stop filter with a narrow bandwidth
(high quality factor). It rejects a narrow frequency band and
leaves the rest of the spectrum little changed.
Parameters
----------
w0 : float
Frequency to remove from a signal. If `fs` is specified, this is in
the same units as `fs`. By default, it is a normalized scalar that must
satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding to half of the
sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
%(xp_device_snippet)s
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
See Also
--------
iirpeak
Notes
-----
.. versionadded:: 0.19.0
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996
Examples
--------
Design and plot filter to remove the 60 Hz component from a
signal sampled at 200 Hz, using a quality factor Q = 30
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> fs = 200.0 # Sample frequency (Hz)
>>> f0 = 60.0 # Frequency to be removed from signal (Hz)
>>> Q = 30.0 # Quality factor
>>> # Design notch filter
>>> b, a = signal.iirnotch(f0, Q, fs)
>>> # Frequency response
>>> freq, h = signal.freqz(b, a, fs=fs)
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
>>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude [dB]", color='blue')
>>> ax[0].set_xlim([0, 100])
>>> ax[0].set_ylim([-25, 10])
>>> ax[0].grid(True)
>>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
>>> ax[1].set_ylabel("Phase [deg]", color='green')
>>> ax[1].set_xlabel("Frequency [Hz]")
>>> ax[1].set_xlim([0, 100])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid(True)
>>> plt.show()
"""
return _design_notch_peak_filter(w0, Q, "notch", fs, xp=xp, device=device)
def iirpeak(w0, Q, fs=2.0, *, xp=None, device=None):
"""
Design second-order IIR peak (resonant) digital filter.
A peak filter is a band-pass filter with a narrow bandwidth
(high quality factor). It rejects components outside a narrow
frequency band.
Parameters
----------
w0 : float
Frequency to be retained in a signal. If `fs` is specified, this is in
the same units as `fs`. By default, it is a normalized scalar that must
satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding to half of the
sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
peak filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0
%(xp_device_snippet)s
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
See Also
--------
iirnotch
Notes
-----
.. versionadded:: 0.19.0
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996
Examples
--------
Design and plot filter to remove the frequencies other than the 300 Hz
component from a signal sampled at 1000 Hz, using a quality factor Q = 30
>>> import numpy as np
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> fs = 1000.0 # Sample frequency (Hz)
>>> f0 = 300.0 # Frequency to be retained (Hz)
>>> Q = 30.0 # Quality factor
>>> # Design peak filter
>>> b, a = signal.iirpeak(f0, Q, fs)
>>> # Frequency response
>>> freq, h = signal.freqz(b, a, fs=fs)
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
>>> ax[0].plot(freq, 20*np.log10(np.maximum(abs(h), 1e-5)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude [dB]", color='blue')
>>> ax[0].set_xlim([0, 500])
>>> ax[0].set_ylim([-50, 10])
>>> ax[0].grid(True)
>>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
>>> ax[1].set_ylabel("Phase [deg]", color='green')
>>> ax[1].set_xlabel("Frequency [Hz]")
>>> ax[1].set_xlim([0, 500])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid(True)
>>> plt.show()
"""
return _design_notch_peak_filter(w0, Q, "peak", fs, xp=xp, device=device)
def _design_notch_peak_filter(w0, Q, ftype, fs=2.0, *, xp=None, device=None):
"""
Design notch or peak digital filter.
Parameters
----------
w0 : float
Normalized frequency to remove from a signal. If `fs` is specified,
this is in the same units as `fs`. By default, it is a normalized
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1``
corresponding to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
ftype : str
The type of IIR filter to design:
- notch filter : ``notch``
- peak filter : ``peak``
fs : float, optional
The sampling frequency of the digital system.
.. versionadded:: 1.2.0:
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
"""
if xp is None:
xp = np_compat
fs = _validate_fs(fs, allow_none=False)
# Guarantee that the inputs are floats
w0 = float(w0)
Q = float(Q)
w0 = 2 * w0 / fs
# Checks if w0 is within the range
if w0 > 1.0 or w0 < 0.0:
raise ValueError("w0 should be such that 0 < w0 < 1")
# Get bandwidth
bw = w0/Q
# Normalize inputs
bw = bw * xp.pi
w0 = w0 * xp.pi
if ftype not in ("notch", "peak"):
raise ValueError("Unknown ftype.")
# Compute beta according to Eqs. 11.3.4 (p.575) and 11.3.19 (p.579) from
# reference [1]. Due to assuming a -3 dB attenuation value, i.e, assuming
# gb = 1 / np.sqrt(2), the following terms simplify to:
# (np.sqrt(1.0 - gb**2.0) / gb) = 1
# (gb / np.sqrt(1.0 - gb**2.0)) = 1
beta = math.tan(bw / 2.0)
# Compute gain: formula 11.3.6 (p.575) from reference [1]
gain = 1.0 / (1.0 + beta)
# Compute numerator b and denominator a
# formulas 11.3.7 (p.575) and 11.3.21 (p.579)
# from reference [1]
if ftype == "notch":
b = gain * xp.asarray([1.0, -2.0*math.cos(w0), 1.0], device=device)
else:
b = (1.0 - gain) * xp.asarray([1.0, 0.0, -1.0], device=device)
a = xp.asarray([1.0, -2.0 * gain * math.cos(w0), (2.0*gain - 1.0)], device=device)
return b, a
def iircomb(w0, Q, ftype='notch', fs=2.0, *, pass_zero=False, xp=None, device=None):
"""
Design IIR notching or peaking digital comb filter.
A notching comb filter consists of regularly-spaced band-stop filters with
a narrow bandwidth (high quality factor). Each rejects a narrow frequency
band and leaves the rest of the spectrum little changed.
A peaking comb filter consists of regularly-spaced band-pass filters with
a narrow bandwidth (high quality factor). Each rejects components outside
a narrow frequency band.
Parameters
----------
w0 : float
The fundamental frequency of the comb filter (the spacing between its
peaks). This must evenly divide the sampling frequency. If `fs` is
specified, this is in the same units as `fs`. By default, it is
a normalized scalar that must satisfy ``0 < w0 < 1``, with
``w0 = 1`` corresponding to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
ftype : {'notch', 'peak'}
The type of comb filter generated by the function. If 'notch', then
the Q factor applies to the notches. If 'peak', then the `Q` factor
applies to the peaks. Default is 'notch'.
fs : float, optional
The sampling frequency of the signal. Default is 2.0.
pass_zero : bool, optional
If False (default), the notches (nulls) of the filter are centered on
frequencies ``[0, w0, 2*w0, ...]``, and the peaks are centered on the
midpoints ``[w0/2, 3*w0/2, 5*w0/2, ...]``. If True, the peaks are centered
on ``[0, w0, 2*w0, ...]`` (passing zero frequency) and vice versa.
.. versionadded:: 1.9.0
%(xp_device_snippet)s
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
Raises
------
ValueError
If `w0` is less than or equal to 0 or greater than or equal to
``fs/2``, if `fs` is not divisible by `w0`, if `ftype`
is not 'notch' or 'peak'
See Also
--------
iirnotch
iirpeak
Notes
-----
For implementation details, see [1]_. The TF implementation of the
comb filter is numerically stable even at higher orders due to the
use of a single repeated pole, which won't suffer from precision loss.
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996, ch. 11, "Digital Filter Design"
Examples
--------
Design and plot notching comb filter at 20 Hz for a
signal sampled at 200 Hz, using quality factor Q = 30
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> fs = 200.0 # Sample frequency (Hz)
>>> f0 = 20.0 # Frequency to be removed from signal (Hz)
>>> Q = 30.0 # Quality factor
>>> # Design notching comb filter
>>> b, a = signal.iircomb(f0, Q, ftype='notch', fs=fs)
>>> # Frequency response
>>> freq, h = signal.freqz(b, a, fs=fs)
>>> response = abs(h)
>>> # To avoid divide by zero when graphing
>>> response[response == 0] = 1e-20
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6), sharex=True)
>>> ax[0].plot(freq, 20*np.log10(abs(response)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude [dB]", color='blue')
>>> ax[0].set_xlim([0, 100])
>>> ax[0].set_ylim([-30, 10])
>>> ax[0].grid(True)
>>> ax[1].plot(freq, np.mod(np.angle(h, deg=True) + 180, 360) - 180, color='green')
>>> ax[1].set_ylabel("Phase [deg]", color='green')
>>> ax[1].set_xlabel("Frequency [Hz]")
>>> ax[1].set_xlim([0, 100])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid(True)
>>> plt.show()
Design and plot peaking comb filter at 250 Hz for a
signal sampled at 1000 Hz, using quality factor Q = 30
>>> fs = 1000.0 # Sample frequency (Hz)
>>> f0 = 250.0 # Frequency to be retained (Hz)
>>> Q = 30.0 # Quality factor
>>> # Design peaking filter
>>> b, a = signal.iircomb(f0, Q, ftype='peak', fs=fs, pass_zero=True)
>>> # Frequency response
>>> freq, h = signal.freqz(b, a, fs=fs)
>>> response = abs(h)
>>> # To avoid divide by zero when graphing
>>> response[response == 0] = 1e-20
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6), sharex=True)
>>> ax[0].plot(freq, 20*np.log10(np.maximum(abs(h), 1e-5)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude [dB]", color='blue')
>>> ax[0].set_xlim([0, 500])
>>> ax[0].set_ylim([-80, 10])
>>> ax[0].grid(True)
>>> ax[1].plot(freq, np.mod(np.angle(h)*180/np.pi + 180, 360) - 180, color='green')
>>> ax[1].set_ylabel("Phase [deg]", color='green')
>>> ax[1].set_xlabel("Frequency [Hz]")
>>> ax[1].set_xlim([0, 500])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid(True)
>>> plt.show()
"""
if xp is None:
xp = np_compat
# Convert w0, Q, and fs to float
w0 = float(w0)
Q = float(Q)
fs = _validate_fs(fs, allow_none=False)
# Check for invalid cutoff frequency or filter type
ftype = ftype.lower()
if not 0 < w0 < fs / 2:
raise ValueError(f"w0 must be between 0 and {fs / 2}"
f" (Nyquist), but given {w0}.")
if ftype not in ('notch', 'peak'):
raise ValueError('ftype must be either notch or peak.')
# Compute the order of the filter
N = round(fs / w0)
# Check for cutoff frequency divisibility
if abs(w0 - fs/N)/fs > 1e-14:
raise ValueError('fs must be divisible by w0.')
# Compute frequency in radians and filter bandwidth
# Eq. 11.3.1 (p. 574) from reference [1]
w0 = (2 * xp.pi * w0) / fs
w_delta = w0 / Q
# Define base gain values depending on notch or peak filter
# Compute -3dB attenuation
# Eqs. 11.4.1 and 11.4.2 (p. 582) from reference [1]
if ftype == 'notch':
G0, G = 1, 0
elif ftype == 'peak':
G0, G = 0, 1
# Compute beta according to Eq. 11.5.3 (p. 591) from reference [1]. Due to
# assuming a -3 dB attenuation value, i.e, assuming GB = 1 / np.sqrt(2),
# the following term simplifies to:
# np.sqrt((GB**2 - G0**2) / (G**2 - GB**2)) = 1
beta = math.tan(N * w_delta / 4)
# Compute filter coefficients
# Eq 11.5.1 (p. 590) variables a, b, c from reference [1]
ax = (1 - beta) / (1 + beta)
bx = (G0 + G * beta) / (1 + beta)
cx = (G0 - G * beta) / (1 + beta)
# Last coefficients are negative to get peaking comb that passes zero or
# notching comb that doesn't.
negative_coef = ((ftype == 'peak' and pass_zero) or
(ftype == 'notch' and not pass_zero))
# Compute numerator coefficients
# Eq 11.5.1 (p. 590) or Eq 11.5.4 (p. 591) from reference [1]
# b - cz^-N or b + cz^-N
b = xp.zeros(N + 1, device=device)
sgn = -1. if negative_coef else 1
xpx.at(b, 0).set(bx)
xpx.at(b, -1).set(sgn * cx)
# Compute denominator coefficients
# Eq 11.5.1 (p. 590) or Eq 11.5.4 (p. 591) from reference [1]
# 1 - az^-N or 1 + az^-N
a = xp.zeros(N + 1, device=device)
xpx.at(a, 0).set(1.)
xpx.at(a, -1).set(sgn * ax)
return b, a
def _hz_to_erb(hz):
"""
Utility for converting from frequency (Hz) to the
Equivalent Rectangular Bandwidth (ERB) scale
ERB = frequency / EarQ + minBW
"""
EarQ = 9.26449
minBW = 24.7
return hz / EarQ + minBW
def gammatone(freq, ftype, order=None, numtaps=None, fs=None, *, xp=None, device=None):
"""
Gammatone filter design.
This function computes the coefficients of an FIR or IIR gammatone
digital filter [1]_.
Parameters
----------
freq : float
Center frequency of the filter (expressed in the same units
as `fs`).
ftype : {'fir', 'iir'}
The type of filter the function generates. If 'fir', the function
will generate an Nth order FIR gammatone filter. If 'iir', the
function will generate an 8th order digital IIR filter, modeled as
as 4th order gammatone filter.
order : int, optional
The order of the filter. Only used when ``ftype='fir'``.
Default is 4 to model the human auditory system. Must be between
0 and 24.
numtaps : int, optional
Length of the filter. Only used when ``ftype='fir'``.
Default is ``fs*0.015`` if `fs` is greater than 1000,
15 if `fs` is less than or equal to 1000.
fs : float, optional
The sampling frequency of the signal. `freq` must be between
0 and ``fs/2``. Default is 2.
%(xp_device_snippet)s
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials of the filter.
Raises
------
ValueError
If `freq` is less than or equal to 0 or greater than or equal to
``fs/2``, if `ftype` is not 'fir' or 'iir', if `order` is less than
or equal to 0 or greater than 24 when ``ftype='fir'``
See Also
--------
firwin
iirfilter
References
----------
.. [1] Slaney, Malcolm, "An Efficient Implementation of the
Patterson-Holdsworth Auditory Filter Bank", Apple Computer
Technical Report 35, 1993, pp.3-8, 34-39.
Examples
--------
16-sample 4th order FIR Gammatone filter centered at 440 Hz
>>> from scipy import signal
>>> signal.gammatone(440, 'fir', numtaps=16, fs=16000)
(array([ 0.00000000e+00, 2.22196719e-07, 1.64942101e-06, 4.99298227e-06,
1.01993969e-05, 1.63125770e-05, 2.14648940e-05, 2.29947263e-05,
1.76776931e-05, 2.04980537e-06, -2.72062858e-05, -7.28455299e-05,
-1.36651076e-04, -2.19066855e-04, -3.18905076e-04, -4.33156712e-04]),
[1.0])
IIR Gammatone filter centered at 440 Hz
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> fc, fs = 440, 16000
>>> b, a = signal.gammatone(fc, 'iir', fs=fs)
>>> w, h = signal.freqz(b, a)
>>> plt.plot(w * fs / (2 * np.pi), 20 * np.log10(abs(h)))
>>> plt.xscale('log')
>>> plt.title('Gammatone filter frequency response')
>>> plt.xlabel('Frequency [Hz]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(fc, color='green') # cutoff frequency
>>> plt.show()
"""
if xp is None:
xp = np_compat
# Converts freq to float
freq = float(freq)
# Set sampling rate if not passed
if fs is None:
fs = 2
fs = _validate_fs(fs, allow_none=False)
# Check for invalid cutoff frequency or filter type
ftype = ftype.lower()
filter_types = ['fir', 'iir']
if not 0 < freq < fs / 2:
raise ValueError(f"The frequency must be between 0 and {fs / 2}"
f" (Nyquist), but given {freq}.")
if ftype not in filter_types:
raise ValueError('ftype must be either fir or iir.')
# Calculate FIR gammatone filter
if ftype == 'fir':
# Set order and numtaps if not passed
if order is None:
order = 4
order = operator.index(order)
if numtaps is None:
numtaps = max(int(fs * 0.015), 15)
numtaps = operator.index(numtaps)
# Check for invalid order
if not 0 < order <= 24:
raise ValueError("Invalid order: order must be > 0 and <= 24.")
# Gammatone impulse response settings
t = xp.arange(numtaps, device=device, dtype=xp_default_dtype(xp)) / fs
bw = 1.019 * _hz_to_erb(freq)
# Calculate the FIR gammatone filter
b = (t ** (order - 1)) * xp.exp(-2 * xp.pi * bw * t)
b = b * xp.cos(2 * xp.pi * freq * t)
# Scale the FIR filter so the frequency response is 1 at cutoff
scale_factor = 2 * (2 * xp.pi * bw) ** (order)
scale_factor /= float_factorial(order - 1)
scale_factor /= fs
b = b * scale_factor
a = xp.asarray([1.0], device=device)
# Calculate IIR gammatone filter
elif ftype == 'iir':
# Raise warning if order and/or numtaps is passed
if order is not None:
warnings.warn('order is not used for IIR gammatone filter.', stacklevel=2)
if numtaps is not None:
warnings.warn('numtaps is not used for IIR gammatone filter.', stacklevel=2)
# Gammatone impulse response settings
T = 1./fs
bw = 2 * math.pi * 1.019 * _hz_to_erb(freq)
fr = 2 * freq * math.pi * T
bwT = bw * T
# Calculate the gain to normalize the volume at the center frequency
g1 = -2 * cmath.exp(2j * fr) * T
g2 = 2 * cmath.exp(-(bwT) + 1j * fr) * T
g3 = math.sqrt(3 + 2 ** (3 / 2)) * math.sin(fr)
g4 = math.sqrt(3 - 2 ** (3 / 2)) * math.sin(fr)
g5 = cmath.exp(2j * fr)
g = g1 + g2 * (math.cos(fr) - g4)
g *= (g1 + g2 * (math.cos(fr) + g4))
g *= (g1 + g2 * (math.cos(fr) - g3))
g *= (g1 + g2 * (math.cos(fr) + g3))
g /= ((-2 / math.exp(2 * bwT) - 2 * g5 + 2 * (1 + g5) / math.exp(bwT)) ** 4)
g = math.hypot(g.real, g.imag)
# Create empty filter coefficient lists
b = [None] * 5 #np.empty(5)
a = [None] * 9 # np.empty(9)
# Calculate the numerator coefficients
b[0] = (T ** 4) / g
b[1] = -4 * T ** 4 * math.cos(fr) / math.exp(bw * T) / g
b[2] = 6 * T ** 4 * math.cos(2 * fr) / math.exp(2 * bw * T) / g
b[3] = -4 * T ** 4 * math.cos(3 * fr) / math.exp(3 * bw * T) / g
b[4] = T ** 4 * math.cos(4 * fr) / math.exp(4 * bw * T) / g
# Calculate the denominator coefficients
a[0] = 1
a[1] = -8 * math.cos(fr) / math.exp(bw * T)
a[2] = 4 * (4 + 3 * math.cos(2 * fr)) / math.exp(2 * bw * T)
a[3] = -8 * (6 * math.cos(fr) + math.cos(3 * fr))
a[3] /= math.exp(3 * bw * T)
a[4] = 2 * (18 + 16 * math.cos(2 * fr) + math.cos(4 * fr))
a[4] /= math.exp(4 * bw * T)
a[5] = -8 * (6 * math.cos(fr) + math.cos(3 * fr))
a[5] /= math.exp(5 * bw * T)
a[6] = 4 * (4 + 3 * math.cos(2 * fr)) / math.exp(6 * bw * T)
a[7] = -8 * math.cos(fr) / math.exp(7 * bw * T)
a[8] = math.exp(-8 * bw * T)
return xp.asarray(b, device=device), xp.asarray(a, device=device)
filter_dict = {'butter': [buttap, buttord],
'butterworth': [buttap, buttord],
'cauer': [ellipap, ellipord],
'elliptic': [ellipap, ellipord],
'ellip': [ellipap, ellipord],
'bessel': [besselap],
'bessel_phase': [besselap],
'bessel_delay': [besselap],
'bessel_mag': [besselap],
'cheby1': [cheb1ap, cheb1ord],
'chebyshev1': [cheb1ap, cheb1ord],
'chebyshevi': [cheb1ap, cheb1ord],
'cheby2': [cheb2ap, cheb2ord],
'chebyshev2': [cheb2ap, cheb2ord],
'chebyshevii': [cheb2ap, cheb2ord],
}
band_dict = {'band': 'bandpass',
'bandpass': 'bandpass',
'pass': 'bandpass',
'bp': 'bandpass',
'bs': 'bandstop',
'bandstop': 'bandstop',
'bands': 'bandstop',
'stop': 'bandstop',
'l': 'lowpass',
'low': 'lowpass',
'lowpass': 'lowpass',
'lp': 'lowpass',
'high': 'highpass',
'highpass': 'highpass',
'h': 'highpass',
'hp': 'highpass',
}
bessel_norms = {'bessel': 'phase',
'bessel_phase': 'phase',
'bessel_delay': 'delay',
'bessel_mag': 'mag'}
########## complete the docstrings, on import
_xp_device_snippet = {'xp_device_snippet':
"""\
xp : array_namespace, optional
Optional array namespace.
Should be compatible with the array API standard, or supported by array-api-compat.
Default: ``numpy``
device: any
optional device specification for output. Should match one of the
supported device specification in ``xp``.
"""
}
_names = ["buttap", "cheb1ap", "cheb2ap", "ellipap", "besselap",
"iirnotch", "iirpeak", "iircomb", "gammatone",
]
for name in _names:
window = vars()[name]
window.__doc__ = doccer.docformat(window.__doc__, _xp_device_snippet)
| BadCoefficients |
python | spyder-ide__spyder | spyder/utils/external/dafsa/dafsa.py | {
"start": 10128,
"end": 13227
} | class ____(dict):
"""
Class representing edge objects in a DAFSA.
This class overloads a normal Python dictionary, and in simpler
implementations could potentially be replaced with a pure dictionary.
It was implemented as its own object for homogeneity and for planned
future expansions, particularly in terms of fuzzy automata.
Parameters
----------
node : DAFSANode
Reference to the target node, mandatory. Please note that it
must be a DAFSANode object and *not* a node id.
weight : int
Edge weight as collected from training data. Defaults to 0.
"""
def __init__(self, node, weight=0):
"""
Initializes a DAFSA edge.
"""
# Call super class initialization.
super().__init__()
# Validate values and set them
if not isinstance(node, DAFSANode):
raise TypeError(
"`node` must be a DAFSANode (perhaps a `node_id` was passed?)."
)
self.node = node
self.weight = weight
def __str__(self):
"""
Return a textual representation of the node.
The representation only include the ``node_id``, without information
on the node actual contents.
Returns
-------
string : str
The (potentially ambiguous) textual representation of the
current edge.
"""
return "{node_id: %i, weight: %i}" % (self.node.node_id, self.weight)
def __repr__(self):
"""
Return a full textual representation of the node.
The representation includes information on the entire contents of
the node.
Returns
-------
string : str
The unambiguous textual representation of the current edge.
"""
return "{node: <%s>, weight: %i}" % (repr(self.node), self.weight)
def __hash__(self):
"""
Return a hash for the edge.
The returned has is based on the potentially ambigous string
representation provided by the ``.__str__()`` method, allowing to
use edges as, among others, dictionary keys. The choice of the
potentially ambiguous ``.__str__()`` over ``.__repr__()`` is intentional
and by design and complemented by the ``.repr_hash()`` method.
Returns
-------
hash : number
The hash from the (potentially ambigous) textual representation of
the current edge.
"""
return self.__str__().__hash__()
def repr_hash(self):
"""
Return a hash for the edge.
The returned has is based on the unambigous string
representation provided by the ``.__repr__()`` method, allowing to
use edges as, among others, dictionary keys. The method is
complemented by the ``.__hash__()`` one.
Returns
-------
hash : number
The hash from the unambigous textual representation of the
current edge.
"""
return self.__repr__().__hash__()
| DAFSAEdge |
python | Lightning-AI__lightning | tests/tests_pytorch/models/test_hparams.py | {
"start": 17760,
"end": 18140
} | class ____(BoringModel):
"""This model has the super().__init__() call at the end."""
def __init__(self, arg1, arg2, *args, **kwargs):
self.argument1 = arg1 # arg2 intentionally not set
arg1 = "overwritten"
local_var = 1234 # noqa: F841
super().__init__(*args, **kwargs) # this is intentionally here at the end
| LocalVariableModelSuperLast |
python | numba__numba | numba/tests/test_tracing.py | {
"start": 246,
"end": 1013
} | class ____:
"""Capture the trace temporarily for validation."""
def __init__(self):
self.buffer = StringIO()
self.handler = logging.StreamHandler(self.buffer)
def __enter__(self):
self._handlers = logger.handlers
self.buffer = StringIO()
logger.handlers = [logging.StreamHandler(self.buffer)]
def __exit__(self, type, value, traceback):
logger.handlers = self._handlers
def getvalue(self):
# Depending on how the tests are run, object names may be
# qualified by their containing module.
# Remove that to make the trace output independent from the testing mode.
log = self.buffer.getvalue()
log = log.replace(__name__ + '.','')
return log
| CapturedTrace |
python | pydata__xarray | xarray/tests/test_dataset.py | {
"start": 8728,
"end": 299797
} | class ____:
def test_repr(self) -> None:
data = create_test_data(seed=123, use_extension_array=True)
data.attrs["foo"] = "bar"
# need to insert str dtype at runtime to handle different endianness
var5 = (
"\n var5 (dim1) int64[pyarrow] 64B 5 9 7 2 6 2 8 1"
if has_pyarrow
else ""
)
expected = dedent(
f"""\
<xarray.Dataset> Size: 2kB
Dimensions: (dim2: 9, dim3: 10, time: 20, dim1: 8)
Coordinates:
* dim2 (dim2) float64 72B 0.0 0.5 1.0 1.5 2.0 2.5 3.0 3.5 4.0
* dim3 (dim3) {data["dim3"].dtype} 40B 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j'
* time (time) datetime64[ns] 160B 2000-01-01 2000-01-02 ... 2000-01-20
numbers (dim3) int64 80B 0 1 2 0 0 1 1 2 2 3
Dimensions without coordinates: dim1
Data variables:
var1 (dim1, dim2) float64 576B -0.9891 -0.3678 1.288 ... -0.2116 0.364
var2 (dim1, dim2) float64 576B 0.953 1.52 1.704 ... 0.1347 -0.6423
var3 (dim3, dim1) float64 640B 0.4107 0.9941 0.1665 ... 0.716 1.555
var4 (dim1) category 3{6 if Version(pd.__version__) >= Version("3.0.0dev0") else 2}B b c b a c a c a{var5}
Attributes:
foo: bar"""
)
actual = "\n".join(x.rstrip() for x in repr(data).split("\n"))
assert expected == actual
with set_options(display_width=100):
max_len = max(map(len, repr(data).split("\n")))
assert 90 < max_len < 100
expected = dedent(
"""\
<xarray.Dataset> Size: 0B
Dimensions: ()
Data variables:
*empty*"""
)
actual = "\n".join(x.rstrip() for x in repr(Dataset()).split("\n"))
print(actual)
assert expected == actual
# verify that ... doesn't appear for scalar coordinates
data = Dataset({"foo": ("x", np.ones(10))}).mean()
expected = dedent(
"""\
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
foo float64 8B 1.0"""
)
actual = "\n".join(x.rstrip() for x in repr(data).split("\n"))
print(actual)
assert expected == actual
# verify long attributes are truncated
data = Dataset(attrs={"foo": "bar" * 1000})
assert len(repr(data)) < 1000
def test_repr_multiindex(self) -> None:
data = create_test_multiindex()
obj_size = np.dtype("O").itemsize
expected = dedent(
f"""\
<xarray.Dataset> Size: {8 * obj_size + 32}B
Dimensions: (x: 4)
Coordinates:
* x (x) object {4 * obj_size}B MultiIndex
* level_1 (x) object {4 * obj_size}B 'a' 'a' 'b' 'b'
* level_2 (x) int64 32B 1 2 1 2
Data variables:
*empty*"""
)
actual = "\n".join(x.rstrip() for x in repr(data).split("\n"))
print(actual)
assert expected == actual
# verify that long level names are not truncated
midx = pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=("a_quite_long_level_name", "level_2")
)
midx_coords = Coordinates.from_pandas_multiindex(midx, "x")
data = Dataset({}, midx_coords)
expected = dedent(
f"""\
<xarray.Dataset> Size: {8 * obj_size + 32}B
Dimensions: (x: 4)
Coordinates:
* x (x) object {4 * obj_size}B MultiIndex
* a_quite_long_level_name (x) object {4 * obj_size}B 'a' 'a' 'b' 'b'
* level_2 (x) int64 32B 1 2 1 2
Data variables:
*empty*"""
)
actual = "\n".join(x.rstrip() for x in repr(data).split("\n"))
print(actual)
assert expected == actual
def test_repr_period_index(self) -> None:
data = create_test_data(seed=456)
data.coords["time"] = pd.period_range("2000-01-01", periods=20, freq="D")
# check that creating the repr doesn't raise an error #GH645
repr(data)
def test_unicode_data(self) -> None:
# regression test for GH834
data = Dataset({"foø": ["ba®"]}, attrs={"å": "∑"})
repr(data) # should not raise
byteorder = "<" if sys.byteorder == "little" else ">"
expected = dedent(
f"""\
<xarray.Dataset> Size: 12B
Dimensions: (foø: 1)
Coordinates:
* foø (foø) {byteorder}U3 12B {"ba®"!r}
Data variables:
*empty*
Attributes:
å: ∑"""
)
actual = str(data)
assert expected == actual
def test_repr_nep18(self) -> None:
class Array:
def __init__(self):
self.shape = (2,)
self.ndim = 1
self.dtype = np.dtype(np.float64)
def __array_function__(self, *args, **kwargs):
return NotImplemented
def __array_ufunc__(self, *args, **kwargs):
return NotImplemented
def __repr__(self):
return "Custom\nArray"
dataset = Dataset({"foo": ("x", Array())})
expected = dedent(
"""\
<xarray.Dataset> Size: 16B
Dimensions: (x: 2)
Dimensions without coordinates: x
Data variables:
foo (x) float64 16B Custom Array"""
)
assert expected == repr(dataset)
def test_info(self) -> None:
ds = create_test_data(seed=123)
ds = ds.drop_vars("dim3") # string type prints differently in PY2 vs PY3
ds.attrs["unicode_attr"] = "ba®"
ds.attrs["string_attr"] = "bar"
buf = StringIO()
ds.info(buf=buf)
expected = dedent(
"""\
xarray.Dataset {
dimensions:
\tdim2 = 9 ;
\ttime = 20 ;
\tdim1 = 8 ;
\tdim3 = 10 ;
variables:
\tfloat64 dim2(dim2) ;
\tdatetime64[ns] time(time) ;
\tfloat64 var1(dim1, dim2) ;
\t\tvar1:foo = variable ;
\tfloat64 var2(dim1, dim2) ;
\t\tvar2:foo = variable ;
\tfloat64 var3(dim3, dim1) ;
\t\tvar3:foo = variable ;
\tint64 numbers(dim3) ;
// global attributes:
\t:unicode_attr = ba® ;
\t:string_attr = bar ;
}"""
)
actual = buf.getvalue()
assert expected == actual
buf.close()
def test_constructor(self) -> None:
x1 = ("x", 2 * np.arange(100))
x2 = ("x", np.arange(1000))
z = (["x", "y"], np.arange(1000).reshape(100, 10))
with pytest.raises(ValueError, match=r"conflicting sizes"):
Dataset({"a": x1, "b": x2})
with pytest.raises(TypeError, match=r"tuple of form"):
Dataset({"x": (1, 2, 3, 4, 5, 6, 7)})
with pytest.raises(ValueError, match=r"already exists as a scalar"):
Dataset({"x": 0, "y": ("x", [1, 2, 3])})
# nD coordinate variable "x" sharing name with dimension
actual = Dataset({"a": x1, "x": z})
assert "x" not in actual.xindexes
_assert_internal_invariants(actual, check_default_indexes=True)
# verify handling of DataArrays
expected = Dataset({"x": x1, "z": z})
actual = Dataset({"z": expected["z"]})
assert_identical(expected, actual)
def test_constructor_1d(self) -> None:
expected = Dataset({"x": (["x"], 5.0 + np.arange(5))})
actual = Dataset({"x": 5.0 + np.arange(5)})
assert_identical(expected, actual)
actual = Dataset({"x": [5, 6, 7, 8, 9]})
assert_identical(expected, actual)
def test_constructor_0d(self) -> None:
expected = Dataset({"x": ([], 1)})
for arg in [1, np.array(1), expected["x"]]:
actual = Dataset({"x": arg})
assert_identical(expected, actual)
class Arbitrary:
pass
d = pd.Timestamp("2000-01-01T12")
args = [
True,
None,
3.4,
np.nan,
"hello",
b"raw",
np.datetime64("2000-01-01"),
d,
d.to_pydatetime(),
Arbitrary(),
]
for arg in args:
print(arg)
expected = Dataset({"x": ([], arg)})
actual = Dataset({"x": arg})
assert_identical(expected, actual)
def test_constructor_auto_align(self) -> None:
a = DataArray([1, 2], [("x", [0, 1])])
b = DataArray([3, 4], [("x", [1, 2])])
# verify align uses outer join
expected = Dataset(
{"a": ("x", [1, 2, np.nan]), "b": ("x", [np.nan, 3, 4])}, {"x": [0, 1, 2]}
)
actual = Dataset({"a": a, "b": b})
assert_identical(expected, actual)
# regression test for GH346
assert isinstance(actual.variables["x"], IndexVariable)
# variable with different dimensions
c = ("y", [3, 4])
expected2 = expected.merge({"c": c})
actual = Dataset({"a": a, "b": b, "c": c})
assert_identical(expected2, actual)
# variable that is only aligned against the aligned variables
d = ("x", [3, 2, 1])
expected3 = expected.merge({"d": d})
actual = Dataset({"a": a, "b": b, "d": d})
assert_identical(expected3, actual)
e = ("x", [0, 0])
with pytest.raises(ValueError, match=r"conflicting sizes"):
Dataset({"a": a, "b": b, "e": e})
def test_constructor_pandas_sequence(self) -> None:
ds = self.make_example_math_dataset()
pandas_objs = {
var_name: ds[var_name].to_pandas() for var_name in ["foo", "bar"]
}
ds_based_on_pandas = Dataset(pandas_objs, ds.coords, attrs=ds.attrs)
del ds_based_on_pandas["x"]
assert_equal(ds, ds_based_on_pandas)
# reindex pandas obj, check align works
rearranged_index = reversed(pandas_objs["foo"].index)
pandas_objs["foo"] = pandas_objs["foo"].reindex(rearranged_index)
ds_based_on_pandas = Dataset(pandas_objs, ds.coords, attrs=ds.attrs)
del ds_based_on_pandas["x"]
assert_equal(ds, ds_based_on_pandas)
def test_constructor_pandas_single(self) -> None:
das = [
DataArray(np.random.rand(4), dims=["a"]), # series
DataArray(np.random.rand(4, 3), dims=["a", "b"]), # df
]
for a in das:
pandas_obj = a.to_pandas()
ds_based_on_pandas = Dataset(pandas_obj) # type: ignore[arg-type] # TODO: improve typing of __init__
for dim in ds_based_on_pandas.data_vars:
assert isinstance(dim, int)
assert_array_equal(ds_based_on_pandas[dim], pandas_obj[dim])
def test_constructor_compat(self) -> None:
data = {"x": DataArray(0, coords={"y": 1}), "y": ("z", [1, 1, 1])}
expected = Dataset({"x": 0}, {"y": ("z", [1, 1, 1])})
actual = Dataset(data)
assert_identical(expected, actual)
data = {"y": ("z", [1, 1, 1]), "x": DataArray(0, coords={"y": 1})}
actual = Dataset(data)
assert_identical(expected, actual)
original = Dataset(
{"a": (("x", "y"), np.ones((2, 3)))},
{"c": (("x", "y"), np.zeros((2, 3))), "x": [0, 1]},
)
expected = Dataset(
{"a": ("x", np.ones(2)), "b": ("y", np.ones(3))},
{"c": (("x", "y"), np.zeros((2, 3))), "x": [0, 1]},
)
actual = Dataset(
{"a": original["a"][:, 0], "b": original["a"][0].drop_vars("x")}
)
assert_identical(expected, actual)
data = {"x": DataArray(0, coords={"y": 3}), "y": ("z", [1, 1, 1])}
with pytest.raises(MergeError):
Dataset(data)
data = {"x": DataArray(0, coords={"y": 1}), "y": [1, 1]}
actual = Dataset(data)
expected = Dataset({"x": 0}, {"y": [1, 1]})
assert_identical(expected, actual)
def test_constructor_with_coords(self) -> None:
with pytest.raises(ValueError, match=r"found in both data_vars and"):
Dataset({"a": ("x", [1])}, {"a": ("x", [1])})
ds = Dataset({}, {"a": ("x", [1])})
assert not ds.data_vars
assert list(ds.coords.keys()) == ["a"]
mindex = pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=("level_1", "level_2")
)
with pytest.raises(ValueError, match=r"conflicting MultiIndex"):
with pytest.warns(
FutureWarning,
match=r".*`pandas.MultiIndex`.*no longer be implicitly promoted.*",
):
Dataset({}, {"x": mindex, "y": mindex})
Dataset({}, {"x": mindex, "level_1": range(4)})
def test_constructor_no_default_index(self) -> None:
# explicitly passing a Coordinates object skips the creation of default index
ds = Dataset(coords=Coordinates({"x": [1, 2, 3]}, indexes={}))
assert "x" in ds
assert "x" not in ds.xindexes
def test_constructor_multiindex(self) -> None:
midx = pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("one", "two"))
coords = Coordinates.from_pandas_multiindex(midx, "x")
ds = Dataset(coords=coords)
assert_identical(ds, coords.to_dataset())
with pytest.warns(
FutureWarning,
match=r".*`pandas.MultiIndex`.*no longer be implicitly promoted.*",
):
Dataset(data_vars={"x": midx})
with pytest.warns(
FutureWarning,
match=r".*`pandas.MultiIndex`.*no longer be implicitly promoted.*",
):
Dataset(coords={"x": midx})
def test_constructor_custom_index(self) -> None:
class CustomIndex(Index): ...
coords = Coordinates(
coords={"x": ("x", [1, 2, 3])}, indexes={"x": CustomIndex()}
)
ds = Dataset(coords=coords)
assert isinstance(ds.xindexes["x"], CustomIndex)
# test coordinate variables copied
assert ds.variables["x"] is not coords.variables["x"]
@pytest.mark.filterwarnings("ignore:return type")
def test_properties(self) -> None:
ds = create_test_data()
# dims / sizes
# These exact types aren't public API, but this makes sure we don't
# change them inadvertently:
assert isinstance(ds.dims, utils.Frozen)
# TODO change after deprecation cycle in GH #8500 is complete
assert isinstance(ds.dims.mapping, dict)
assert type(ds.dims.mapping) is dict
with pytest.warns(
FutureWarning,
match=r" To access a mapping from dimension names to lengths, please use `Dataset.sizes`",
):
assert ds.dims == ds.sizes
assert ds.sizes == {"dim1": 8, "dim2": 9, "dim3": 10, "time": 20}
# dtypes
assert isinstance(ds.dtypes, utils.Frozen)
assert isinstance(ds.dtypes.mapping, dict)
assert ds.dtypes == {
"var1": np.dtype("float64"),
"var2": np.dtype("float64"),
"var3": np.dtype("float64"),
}
# data_vars
assert list(ds) == list(ds.data_vars)
assert list(ds.keys()) == list(ds.data_vars)
assert "aasldfjalskdfj" not in ds.variables
assert "dim1" in repr(ds.variables)
assert len(ds) == 3
assert bool(ds)
assert list(ds.data_vars) == ["var1", "var2", "var3"]
assert list(ds.data_vars.keys()) == ["var1", "var2", "var3"]
assert "var1" in ds.data_vars
assert "dim1" not in ds.data_vars
assert "numbers" not in ds.data_vars
assert len(ds.data_vars) == 3
# xindexes
assert set(ds.xindexes) == {"dim2", "dim3", "time"}
assert len(ds.xindexes) == 3
assert "dim2" in repr(ds.xindexes)
assert all(isinstance(idx, Index) for idx in ds.xindexes.values())
# indexes
assert set(ds.indexes) == {"dim2", "dim3", "time"}
assert len(ds.indexes) == 3
assert "dim2" in repr(ds.indexes)
assert all(isinstance(idx, pd.Index) for idx in ds.indexes.values())
# coords
assert list(ds.coords) == ["dim2", "dim3", "time", "numbers"]
assert "dim2" in ds.coords
assert "numbers" in ds.coords
assert "var1" not in ds.coords
assert "dim1" not in ds.coords
assert len(ds.coords) == 4
# nbytes
assert (
Dataset({"x": np.int64(1), "y": np.array([1, 2], dtype=np.float32)}).nbytes
== 16
)
def test_warn_ds_dims_deprecation(self) -> None:
# TODO remove after deprecation cycle in GH #8500 is complete
ds = create_test_data()
with pytest.warns(FutureWarning, match="return type"):
ds.dims["dim1"]
with pytest.warns(FutureWarning, match="return type"):
ds.dims.keys()
with pytest.warns(FutureWarning, match="return type"):
ds.dims.values()
with pytest.warns(FutureWarning, match="return type"):
ds.dims.items()
with assert_no_warnings():
len(ds.dims)
ds.dims.__iter__()
_ = "dim1" in ds.dims
def test_asarray(self) -> None:
ds = Dataset({"x": 0})
with pytest.raises(TypeError, match=r"cannot directly convert"):
np.asarray(ds)
def test_get_index(self) -> None:
ds = Dataset({"foo": (("x", "y"), np.zeros((2, 3)))}, coords={"x": ["a", "b"]})
assert ds.get_index("x").equals(pd.Index(["a", "b"]))
assert ds.get_index("y").equals(pd.Index([0, 1, 2]))
with pytest.raises(KeyError):
ds.get_index("z")
def test_attr_access(self) -> None:
ds = Dataset(
{"tmin": ("x", [42], {"units": "Celsius"})}, attrs={"title": "My test data"}
)
assert_identical(ds.tmin, ds["tmin"])
assert_identical(ds.tmin.x, ds.x)
assert ds.title == ds.attrs["title"]
assert ds.tmin.units == ds["tmin"].attrs["units"]
assert {"tmin", "title"} <= set(dir(ds))
assert "units" in set(dir(ds.tmin))
# should defer to variable of same name
ds.attrs["tmin"] = -999
assert ds.attrs["tmin"] == -999
assert_identical(ds.tmin, ds["tmin"])
def test_variable(self) -> None:
a = Dataset()
d = np.random.random((10, 3))
a["foo"] = (("time", "x"), d)
assert "foo" in a.variables
assert "foo" in a
a["bar"] = (("time", "x"), d)
# order of creation is preserved
assert list(a.variables) == ["foo", "bar"]
assert_array_equal(a["foo"].values, d)
# try to add variable with dim (10,3) with data that's (3,10)
with pytest.raises(ValueError):
a["qux"] = (("time", "x"), d.T)
def test_modify_inplace(self) -> None:
a = Dataset()
vec = np.random.random((10,))
attributes = {"foo": "bar"}
a["x"] = ("x", vec, attributes)
assert "x" in a.coords
assert isinstance(a.coords["x"].to_index(), pd.Index)
assert_identical(a.coords["x"].variable, a.variables["x"])
b = Dataset()
b["x"] = ("x", vec, attributes)
assert_identical(a["x"], b["x"])
assert a.sizes == b.sizes
# this should work
a["x"] = ("x", vec[:5])
a["z"] = ("x", np.arange(5))
with pytest.raises(ValueError):
# now it shouldn't, since there is a conflicting length
a["x"] = ("x", vec[:4])
arr = np.random.random((10, 1))
scal = np.array(0)
with pytest.raises(ValueError):
a["y"] = ("y", arr)
with pytest.raises(ValueError):
a["y"] = ("y", scal)
assert "y" not in a.dims
def test_coords_properties(self) -> None:
# use int64 for repr consistency on windows
data = Dataset(
{
"x": ("x", np.array([-1, -2], "int64")),
"y": ("y", np.array([0, 1, 2], "int64")),
"foo": (["x", "y"], np.random.randn(2, 3)),
},
{"a": ("x", np.array([4, 5], "int64")), "b": np.int64(-10)},
)
coords = data.coords
assert isinstance(coords, DatasetCoordinates)
# len
assert len(coords) == 4
# iter
assert list(coords) == ["x", "y", "a", "b"]
assert_identical(coords["x"].variable, data["x"].variable)
assert_identical(coords["y"].variable, data["y"].variable)
assert "x" in coords
assert "a" in coords
assert 0 not in coords
assert "foo" not in coords
with pytest.raises(KeyError):
coords["foo"]
with pytest.raises(KeyError):
coords[0]
# repr
expected = dedent(
"""\
Coordinates:
* x (x) int64 16B -1 -2
* y (y) int64 24B 0 1 2
a (x) int64 16B 4 5
b int64 8B -10"""
)
actual = repr(coords)
assert expected == actual
# dims
assert coords.sizes == {"x": 2, "y": 3}
# dtypes
assert coords.dtypes == {
"x": np.dtype("int64"),
"y": np.dtype("int64"),
"a": np.dtype("int64"),
"b": np.dtype("int64"),
}
def test_coords_modify(self) -> None:
data = Dataset(
{
"x": ("x", [-1, -2]),
"y": ("y", [0, 1, 2]),
"foo": (["x", "y"], np.random.randn(2, 3)),
},
{"a": ("x", [4, 5]), "b": -10},
)
actual = data.copy(deep=True)
actual.coords["x"] = ("x", ["a", "b"])
assert_array_equal(actual["x"], ["a", "b"])
actual = data.copy(deep=True)
actual.coords["z"] = ("z", ["a", "b"])
assert_array_equal(actual["z"], ["a", "b"])
actual = data.copy(deep=True)
with pytest.raises(ValueError, match=r"conflicting dimension sizes"):
actual.coords["x"] = ("x", [-1])
assert_identical(actual, data) # should not be modified
actual = data.copy()
del actual.coords["b"]
expected = data.reset_coords("b", drop=True)
assert_identical(expected, actual)
with pytest.raises(KeyError):
del data.coords["not_found"]
with pytest.raises(KeyError):
del data.coords["foo"]
actual = data.copy(deep=True)
actual.coords.update({"c": 11})
expected = data.merge({"c": 11}).set_coords("c")
assert_identical(expected, actual)
# regression test for GH3746
del actual.coords["x"]
assert "x" not in actual.xindexes
def test_update_index(self) -> None:
actual = Dataset(coords={"x": [1, 2, 3]})
actual["x"] = ["a", "b", "c"]
assert actual.xindexes["x"].to_pandas_index().equals(pd.Index(["a", "b", "c"]))
def test_coords_setitem_with_new_dimension(self) -> None:
actual = Dataset()
actual.coords["foo"] = ("x", [1, 2, 3])
expected = Dataset(coords={"foo": ("x", [1, 2, 3])})
assert_identical(expected, actual)
def test_coords_setitem_multiindex(self) -> None:
data = create_test_multiindex()
with pytest.raises(ValueError, match=r"cannot drop or update.*corrupt.*index "):
data.coords["level_1"] = range(4)
def test_coords_set(self) -> None:
one_coord = Dataset({"x": ("x", [0]), "yy": ("x", [1]), "zzz": ("x", [2])})
two_coords = Dataset({"zzz": ("x", [2])}, {"x": ("x", [0]), "yy": ("x", [1])})
all_coords = Dataset(
coords={"x": ("x", [0]), "yy": ("x", [1]), "zzz": ("x", [2])}
)
actual = one_coord.set_coords("x")
assert_identical(one_coord, actual)
actual = one_coord.set_coords(["x"])
assert_identical(one_coord, actual)
actual = one_coord.set_coords("yy")
assert_identical(two_coords, actual)
actual = one_coord.set_coords(["yy", "zzz"])
assert_identical(all_coords, actual)
actual = one_coord.reset_coords()
assert_identical(one_coord, actual)
actual = two_coords.reset_coords()
assert_identical(one_coord, actual)
actual = all_coords.reset_coords()
assert_identical(one_coord, actual)
actual = all_coords.reset_coords(["yy", "zzz"])
assert_identical(one_coord, actual)
actual = all_coords.reset_coords("zzz")
assert_identical(two_coords, actual)
with pytest.raises(ValueError, match=r"cannot remove index"):
one_coord.reset_coords("x")
actual = all_coords.reset_coords("zzz", drop=True)
expected = all_coords.drop_vars("zzz")
assert_identical(expected, actual)
expected = two_coords.drop_vars("zzz")
assert_identical(expected, actual)
def test_coords_to_dataset(self) -> None:
orig = Dataset({"foo": ("y", [-1, 0, 1])}, {"x": 10, "y": [2, 3, 4]})
expected = Dataset(coords={"x": 10, "y": [2, 3, 4]})
actual = orig.coords.to_dataset()
assert_identical(expected, actual)
def test_coords_merge(self) -> None:
orig_coords = Dataset(coords={"a": ("x", [1, 2]), "x": [0, 1]}).coords
other_coords = Dataset(coords={"b": ("x", ["a", "b"]), "x": [0, 1]}).coords
expected = Dataset(
coords={"a": ("x", [1, 2]), "b": ("x", ["a", "b"]), "x": [0, 1]}
)
actual = orig_coords.merge(other_coords)
assert_identical(expected, actual)
actual = other_coords.merge(orig_coords)
assert_identical(expected, actual)
other_coords = Dataset(coords={"x": ("x", ["a"])}).coords
with pytest.raises(MergeError):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={"x": ("x", ["a", "b"])}).coords
with pytest.raises(MergeError):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={"x": ("x", ["a", "b", "c"])}).coords
with pytest.raises(MergeError):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={"a": ("x", [8, 9])}).coords
expected = Dataset(coords={"x": range(2)})
actual = orig_coords.merge(other_coords)
assert_identical(expected, actual)
actual = other_coords.merge(orig_coords)
assert_identical(expected, actual)
other_coords = Dataset(coords={"x": np.nan}).coords
actual = orig_coords.merge(other_coords)
assert_identical(orig_coords.to_dataset(), actual)
actual = other_coords.merge(orig_coords)
assert_identical(orig_coords.to_dataset(), actual)
def test_coords_merge_mismatched_shape(self) -> None:
orig_coords = Dataset(coords={"a": ("x", [1, 1])}).coords
other_coords = Dataset(coords={"a": 1}).coords
expected = orig_coords.to_dataset()
actual = orig_coords.merge(other_coords)
assert_identical(expected, actual)
other_coords = Dataset(coords={"a": ("y", [1])}).coords
expected = Dataset(coords={"a": (["x", "y"], [[1], [1]])})
actual = orig_coords.merge(other_coords)
assert_identical(expected, actual)
actual = other_coords.merge(orig_coords)
assert_identical(expected.transpose(), actual)
orig_coords = Dataset(coords={"a": ("x", [np.nan])}).coords
other_coords = Dataset(coords={"a": np.nan}).coords
expected = orig_coords.to_dataset()
actual = orig_coords.merge(other_coords)
assert_identical(expected, actual)
def test_data_vars_properties(self) -> None:
ds = Dataset()
ds["foo"] = (("x",), [1.0])
ds["bar"] = 2.0
# iter
assert set(ds.data_vars) == {"foo", "bar"}
assert "foo" in ds.data_vars
assert "x" not in ds.data_vars
assert_identical(ds["foo"], ds.data_vars["foo"])
# repr
expected = dedent(
"""\
Data variables:
foo (x) float64 8B 1.0
bar float64 8B 2.0"""
)
actual = repr(ds.data_vars)
assert expected == actual
# dtypes
assert ds.data_vars.dtypes == {
"foo": np.dtype("float64"),
"bar": np.dtype("float64"),
}
# len
ds.coords["x"] = [1]
assert len(ds.data_vars) == 2
# https://github.com/pydata/xarray/issues/7588
with pytest.raises(
AssertionError, match=r"something is wrong with Dataset._coord_names"
):
ds._coord_names = {"w", "x", "y", "z"}
len(ds.data_vars)
def test_equals_and_identical(self) -> None:
data = create_test_data(seed=42)
assert data.equals(data)
assert data.identical(data)
data2 = create_test_data(seed=42)
data2.attrs["foobar"] = "baz"
assert data.equals(data2)
assert not data.identical(data2)
del data2["time"]
assert not data.equals(data2)
data = create_test_data(seed=42).rename({"var1": None})
assert data.equals(data)
assert data.identical(data)
data2 = data.reset_coords()
assert not data2.equals(data)
assert not data2.identical(data)
def test_equals_failures(self) -> None:
data = create_test_data()
assert not data.equals("foo") # type: ignore[arg-type]
assert not data.identical(123) # type: ignore[arg-type]
assert not data.broadcast_equals({1: 2}) # type: ignore[arg-type]
def test_broadcast_equals(self) -> None:
data1 = Dataset(coords={"x": 0})
data2 = Dataset(coords={"x": [0]})
assert data1.broadcast_equals(data2)
assert not data1.equals(data2)
assert not data1.identical(data2)
def test_attrs(self) -> None:
data = create_test_data(seed=42)
data.attrs = {"foobar": "baz"}
assert data.attrs["foobar"], "baz"
assert isinstance(data.attrs, dict)
def test_chunks_does_not_load_data(self) -> None:
# regression test for GH6538
store = InaccessibleVariableDataStore()
create_test_data().dump_to_store(store)
ds = open_dataset(store)
assert ds.chunks == {}
@requires_dask
@pytest.mark.parametrize(
"use_cftime,calendar",
[
(False, "standard"),
(pytest.param(True, marks=pytest.mark.skipif(not has_cftime)), "standard"),
(pytest.param(True, marks=pytest.mark.skipif(not has_cftime)), "noleap"),
(pytest.param(True, marks=pytest.mark.skipif(not has_cftime)), "360_day"),
],
)
def test_chunk_by_season_resampler(self, use_cftime: bool, calendar: str) -> None:
import dask.array
N = 365 + 365 # 2 years - 1 day
time = xr.date_range(
"2000-01-01", periods=N, freq="D", use_cftime=use_cftime, calendar=calendar
)
ds = Dataset(
{
"pr": ("time", dask.array.random.random((N), chunks=(20))),
"pr2d": (("x", "time"), dask.array.random.random((10, N), chunks=(20))),
"ones": ("time", np.ones((N,))),
},
coords={"time": time},
)
# Standard seasons
rechunked = ds.chunk(
{"x": 2, "time": SeasonResampler(["DJF", "MAM", "JJA", "SON"])}
)
assert rechunked.chunksizes["x"] == (2,) * 5
assert len(rechunked.chunksizes["time"]) == 9
assert rechunked.chunksizes["x"] == (2,) * 5
assert sum(rechunked.chunksizes["time"]) == ds.sizes["time"]
if calendar == "standard":
assert rechunked.chunksizes["time"] == (60, 92, 92, 91, 90, 92, 92, 91, 30)
elif calendar == "noleap":
assert rechunked.chunksizes["time"] == (59, 92, 92, 91, 90, 92, 92, 91, 31)
elif calendar == "360_day":
assert rechunked.chunksizes["time"] == (60, 90, 90, 90, 90, 90, 90, 90, 40)
else:
raise AssertionError("unreachable")
# Custom seasons
rechunked = ds.chunk(
{"x": 2, "time": SeasonResampler(["DJFM", "AM", "JJA", "SON"])}
)
assert len(rechunked.chunksizes["time"]) == 9
assert sum(rechunked.chunksizes["time"]) == ds.sizes["time"]
assert rechunked.chunksizes["x"] == (2,) * 5
if calendar == "standard":
assert rechunked.chunksizes["time"] == (91, 61, 92, 91, 121, 61, 92, 91, 30)
elif calendar == "noleap":
assert rechunked.chunksizes["time"] == (90, 61, 92, 91, 121, 61, 92, 91, 31)
elif calendar == "360_day":
assert rechunked.chunksizes["time"] == (90, 60, 90, 90, 120, 60, 90, 90, 40)
else:
raise AssertionError("unreachable")
# Test that drop_incomplete doesn't affect chunking
rechunked_drop_true = ds.chunk(
time=SeasonResampler(["DJF", "MAM", "JJA", "SON"], drop_incomplete=True)
)
rechunked_drop_false = ds.chunk(
time=SeasonResampler(["DJF", "MAM", "JJA", "SON"], drop_incomplete=False)
)
assert (
rechunked_drop_true.chunksizes["time"]
== rechunked_drop_false.chunksizes["time"]
)
@requires_dask
def test_chunk_by_season_resampler_errors(self):
"""Test error handling for SeasonResampler chunking."""
# Test error on missing season (should fail with incomplete seasons)
ds = Dataset(
{"x": ("time", np.arange(12))},
coords={"time": pd.date_range("2000-01-01", periods=12, freq="MS")},
)
with pytest.raises(ValueError, match="does not cover all 12 months"):
ds.chunk(time=SeasonResampler(["DJF", "MAM", "SON"]))
ds = Dataset({"foo": ("x", [1, 2, 3])})
# Test error on virtual variable
with pytest.raises(ValueError, match="virtual variable"):
ds.chunk(x=SeasonResampler(["DJF", "MAM", "JJA", "SON"]))
# Test error on non-datetime variable
ds["x"] = ("x", [1, 2, 3])
with pytest.raises(ValueError, match="datetime variables"):
ds.chunk(x=SeasonResampler(["DJF", "MAM", "JJA", "SON"]))
# Test successful case with 1D datetime variable
ds["x"] = ("x", xr.date_range("2001-01-01", periods=3, freq="D"))
# This should work
result = ds.chunk(x=SeasonResampler(["DJF", "MAM", "JJA", "SON"]))
assert result.chunks is not None
# Test error on missing season (should fail with incomplete seasons)
with pytest.raises(ValueError):
ds.chunk(x=SeasonResampler(["DJF", "MAM", "SON"]))
@requires_dask
def test_chunk(self) -> None:
data = create_test_data()
for v in data.variables.values():
assert isinstance(v.data, np.ndarray)
assert data.chunks == {}
reblocked = data.chunk()
for k, v in reblocked.variables.items():
if k in reblocked.dims:
assert isinstance(v.data, np.ndarray)
else:
assert isinstance(v.data, da.Array)
expected_chunks: dict[Hashable, tuple[int, ...]] = {
"dim1": (8,),
"dim2": (9,),
"dim3": (10,),
}
assert reblocked.chunks == expected_chunks
# test kwargs form of chunks
assert data.chunk(expected_chunks).chunks == expected_chunks
def get_dask_names(ds):
return {k: v.data.name for k, v in ds.items()}
orig_dask_names = get_dask_names(reblocked)
reblocked = data.chunk({"time": 5, "dim1": 5, "dim2": 5, "dim3": 5})
# time is not a dim in any of the data_vars, so it
# doesn't get chunked
expected_chunks = {"dim1": (5, 3), "dim2": (5, 4), "dim3": (5, 5)}
assert reblocked.chunks == expected_chunks
# make sure dask names change when rechunking by different amounts
# regression test for GH3350
new_dask_names = get_dask_names(reblocked)
for k, v in new_dask_names.items():
assert v != orig_dask_names[k]
reblocked = data.chunk(expected_chunks)
assert reblocked.chunks == expected_chunks
# reblock on already blocked data
orig_dask_names = get_dask_names(reblocked)
reblocked = reblocked.chunk(expected_chunks)
new_dask_names = get_dask_names(reblocked)
assert reblocked.chunks == expected_chunks
assert_identical(reblocked, data)
# rechunking with same chunk sizes should not change names
for k, v in new_dask_names.items():
assert v == orig_dask_names[k]
with pytest.raises(
ValueError,
match=re.escape(
"chunks keys ('foo',) not found in data dimensions ('dim2', 'dim3', 'time', 'dim1')"
),
):
data.chunk({"foo": 10})
@requires_dask
@pytest.mark.parametrize(
"calendar",
(
"standard",
pytest.param(
"gregorian",
marks=pytest.mark.skipif(not has_cftime, reason="needs cftime"),
),
),
)
@pytest.mark.parametrize("freq", ["D", "W", "5ME", "YE"])
@pytest.mark.parametrize("add_gap", [True, False])
def test_chunk_by_frequency(self, freq: str, calendar: str, add_gap: bool) -> None:
import dask.array
N = 365 * 2
ΔN = 28 # noqa: PLC2401
time = xr.date_range(
"2001-01-01", periods=N + ΔN, freq="D", calendar=calendar
).to_numpy(copy=True)
if add_gap:
# introduce an empty bin
time[31 : 31 + ΔN] = np.datetime64("NaT")
time = time[~np.isnat(time)]
else:
time = time[:N]
ds = Dataset(
{
"pr": ("time", dask.array.random.random((N), chunks=(20))),
"pr2d": (("x", "time"), dask.array.random.random((10, N), chunks=(20))),
"ones": ("time", np.ones((N,))),
},
coords={"time": time},
)
rechunked = ds.chunk(x=2, time=TimeResampler(freq))
expected = tuple(
ds.ones.resample(time=freq).sum().dropna("time").astype(int).data.tolist()
)
assert rechunked.chunksizes["time"] == expected
assert rechunked.chunksizes["x"] == (2,) * 5
rechunked = ds.chunk({"x": 2, "time": TimeResampler(freq)})
assert rechunked.chunksizes["time"] == expected
assert rechunked.chunksizes["x"] == (2,) * 5
def test_chunk_by_frequency_errors(self):
ds = Dataset({"foo": ("x", [1, 2, 3])})
with pytest.raises(ValueError, match="virtual variable"):
ds.chunk(x=TimeResampler("YE"))
ds["x"] = ("x", [1, 2, 3])
with pytest.raises(ValueError, match="datetime variables"):
ds.chunk(x=TimeResampler("YE"))
ds["x"] = ("x", xr.date_range("2001-01-01", periods=3, freq="D"))
with pytest.raises(ValueError, match="Invalid frequency"):
ds.chunk(x=TimeResampler("foo"))
@requires_dask
def test_dask_is_lazy(self) -> None:
store = InaccessibleVariableDataStore()
create_test_data().dump_to_store(store)
ds = open_dataset(store).chunk()
with pytest.raises(UnexpectedDataAccess):
ds.load()
with pytest.raises(UnexpectedDataAccess):
_ = ds["var1"].values
# these should not raise UnexpectedDataAccess:
_ = ds.var1.data
ds.isel(time=10)
ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1)
ds.transpose()
ds.mean()
ds.fillna(0)
ds.rename({"dim1": "foobar"})
ds.set_coords("var1")
ds.drop_vars("var1")
def test_isel(self) -> None:
data = create_test_data()
slicers: dict[Hashable, slice] = {
"dim1": slice(None, None, 2),
"dim2": slice(0, 2),
}
ret = data.isel(slicers)
# Verify that only the specified dimension was altered
assert list(data.dims) == list(ret.dims)
for d in data.dims:
if d in slicers:
assert ret.sizes[d] == np.arange(data.sizes[d])[slicers[d]].size
else:
assert data.sizes[d] == ret.sizes[d]
# Verify that the data is what we expect
for v in data.variables:
assert data[v].dims == ret[v].dims
assert data[v].attrs == ret[v].attrs
slice_list = [slice(None)] * data[v].values.ndim
for d, s in slicers.items():
if d in data[v].dims:
inds = np.nonzero(np.array(data[v].dims) == d)[0]
for ind in inds:
slice_list[ind] = s
expected = data[v].values[tuple(slice_list)]
actual = ret[v].values
np.testing.assert_array_equal(expected, actual)
with pytest.raises(ValueError):
data.isel(not_a_dim=slice(0, 2))
with pytest.raises(
ValueError,
match=r"Dimensions {'not_a_dim'} do not exist. Expected "
r"one or more of "
r"[\w\W]*'dim\d'[\w\W]*'dim\d'[\w\W]*'time'[\w\W]*'dim\d'[\w\W]*",
):
data.isel(not_a_dim=slice(0, 2))
with pytest.warns(
UserWarning,
match=r"Dimensions {'not_a_dim'} do not exist. "
r"Expected one or more of "
r"[\w\W]*'dim\d'[\w\W]*'dim\d'[\w\W]*'time'[\w\W]*'dim\d'[\w\W]*",
):
data.isel(not_a_dim=slice(0, 2), missing_dims="warn")
assert_identical(data, data.isel(not_a_dim=slice(0, 2), missing_dims="ignore"))
ret = data.isel(dim1=0)
assert {"time": 20, "dim2": 9, "dim3": 10} == ret.sizes
assert set(data.data_vars) == set(ret.data_vars)
assert set(data.coords) == set(ret.coords)
assert set(data.xindexes) == set(ret.xindexes)
ret = data.isel(time=slice(2), dim1=0, dim2=slice(5))
assert {"time": 2, "dim2": 5, "dim3": 10} == ret.sizes
assert set(data.data_vars) == set(ret.data_vars)
assert set(data.coords) == set(ret.coords)
assert set(data.xindexes) == set(ret.xindexes)
ret = data.isel(time=0, dim1=0, dim2=slice(5))
assert {"dim2": 5, "dim3": 10} == ret.sizes
assert set(data.data_vars) == set(ret.data_vars)
assert set(data.coords) == set(ret.coords)
assert set(data.xindexes) == set(list(ret.xindexes) + ["time"])
def test_isel_fancy(self) -> None:
# isel with fancy indexing.
data = create_test_data()
pdim1 = [1, 2, 3]
pdim2 = [4, 5, 1]
pdim3 = [1, 2, 3]
actual = data.isel(
dim1=(("test_coord",), pdim1),
dim2=(("test_coord",), pdim2),
dim3=(("test_coord",), pdim3),
)
assert "test_coord" in actual.dims
assert actual.coords["test_coord"].shape == (len(pdim1),)
# Should work with DataArray
actual = data.isel(
dim1=DataArray(pdim1, dims="test_coord"),
dim2=(("test_coord",), pdim2),
dim3=(("test_coord",), pdim3),
)
assert "test_coord" in actual.dims
assert actual.coords["test_coord"].shape == (len(pdim1),)
expected = data.isel(
dim1=(("test_coord",), pdim1),
dim2=(("test_coord",), pdim2),
dim3=(("test_coord",), pdim3),
)
assert_identical(actual, expected)
# DataArray with coordinate
idx1 = DataArray(pdim1, dims=["a"], coords={"a": np.random.randn(3)})
idx2 = DataArray(pdim2, dims=["b"], coords={"b": np.random.randn(3)})
idx3 = DataArray(pdim3, dims=["c"], coords={"c": np.random.randn(3)})
# Should work with DataArray
actual = data.isel(dim1=idx1, dim2=idx2, dim3=idx3)
assert "a" in actual.dims
assert "b" in actual.dims
assert "c" in actual.dims
assert "time" in actual.coords
assert "dim2" in actual.coords
assert "dim3" in actual.coords
expected = data.isel(
dim1=(("a",), pdim1), dim2=(("b",), pdim2), dim3=(("c",), pdim3)
)
expected = expected.assign_coords(a=idx1["a"], b=idx2["b"], c=idx3["c"])
assert_identical(actual, expected)
idx1 = DataArray(pdim1, dims=["a"], coords={"a": np.random.randn(3)})
idx2 = DataArray(pdim2, dims=["a"])
idx3 = DataArray(pdim3, dims=["a"])
# Should work with DataArray
actual = data.isel(dim1=idx1, dim2=idx2, dim3=idx3)
assert "a" in actual.dims
assert "time" in actual.coords
assert "dim2" in actual.coords
assert "dim3" in actual.coords
expected = data.isel(
dim1=(("a",), pdim1), dim2=(("a",), pdim2), dim3=(("a",), pdim3)
)
expected = expected.assign_coords(a=idx1["a"])
assert_identical(actual, expected)
actual = data.isel(dim1=(("points",), pdim1), dim2=(("points",), pdim2))
assert "points" in actual.dims
assert "dim3" in actual.dims
assert "dim3" not in actual.data_vars
np.testing.assert_array_equal(data["dim2"][pdim2], actual["dim2"])
# test that the order of the indexers doesn't matter
assert_identical(
data.isel(dim1=(("points",), pdim1), dim2=(("points",), pdim2)),
data.isel(dim2=(("points",), pdim2), dim1=(("points",), pdim1)),
)
# make sure we're raising errors in the right places
with pytest.raises(IndexError, match=r"Dimensions of indexers mismatch"):
data.isel(dim1=(("points",), [1, 2]), dim2=(("points",), [1, 2, 3]))
with pytest.raises(TypeError, match=r"cannot use a Dataset"):
data.isel(dim1=Dataset({"points": [1, 2]}))
# test to be sure we keep around variables that were not indexed
ds = Dataset({"x": [1, 2, 3, 4], "y": 0})
actual = ds.isel(x=(("points",), [0, 1, 2]))
assert_identical(ds["y"], actual["y"])
# tests using index or DataArray as indexers
stations = Dataset()
stations["station"] = (("station",), ["A", "B", "C"])
stations["dim1s"] = (("station",), [1, 2, 3])
stations["dim2s"] = (("station",), [4, 5, 1])
actual = data.isel(dim1=stations["dim1s"], dim2=stations["dim2s"])
assert "station" in actual.coords
assert "station" in actual.dims
assert_identical(actual["station"].drop_vars(["dim2"]), stations["station"])
with pytest.raises(ValueError, match=r"conflicting values/indexes on "):
data.isel(
dim1=DataArray(
[0, 1, 2], dims="station", coords={"station": [0, 1, 2]}
),
dim2=DataArray(
[0, 1, 2], dims="station", coords={"station": [0, 1, 3]}
),
)
# multi-dimensional selection
stations = Dataset()
stations["a"] = (("a",), ["A", "B", "C"])
stations["b"] = (("b",), [0, 1])
stations["dim1s"] = (("a", "b"), [[1, 2], [2, 3], [3, 4]])
stations["dim2s"] = (("a",), [4, 5, 1])
actual = data.isel(dim1=stations["dim1s"], dim2=stations["dim2s"])
assert "a" in actual.coords
assert "a" in actual.dims
assert "b" in actual.coords
assert "b" in actual.dims
assert "dim2" in actual.coords
assert "a" in actual["dim2"].dims
assert_identical(actual["a"].drop_vars(["dim2"]), stations["a"])
assert_identical(actual["b"], stations["b"])
expected_var1 = data["var1"].variable[
stations["dim1s"].variable, stations["dim2s"].variable
]
expected_var2 = data["var2"].variable[
stations["dim1s"].variable, stations["dim2s"].variable
]
expected_var3 = data["var3"].variable[slice(None), stations["dim1s"].variable]
assert_equal(actual["a"].drop_vars("dim2"), stations["a"])
assert_array_equal(actual["var1"], expected_var1)
assert_array_equal(actual["var2"], expected_var2)
assert_array_equal(actual["var3"], expected_var3)
# test that drop works
ds = xr.Dataset({"a": (("x",), [1, 2, 3])}, coords={"b": (("x",), [5, 6, 7])})
actual = ds.isel({"x": 1}, drop=False)
expected = xr.Dataset({"a": 2}, coords={"b": 6})
assert_identical(actual, expected)
actual = ds.isel({"x": 1}, drop=True)
expected = xr.Dataset({"a": 2})
assert_identical(actual, expected)
actual = ds.isel({"x": DataArray(1)}, drop=False)
expected = xr.Dataset({"a": 2}, coords={"b": 6})
assert_identical(actual, expected)
actual = ds.isel({"x": DataArray(1)}, drop=True)
expected = xr.Dataset({"a": 2})
assert_identical(actual, expected)
def test_isel_dataarray(self) -> None:
"""Test for indexing by DataArray"""
data = create_test_data()
# indexing with DataArray with same-name coordinates.
indexing_da = DataArray(
np.arange(1, 4), dims=["dim1"], coords={"dim1": np.random.randn(3)}
)
actual = data.isel(dim1=indexing_da)
assert_identical(indexing_da["dim1"], actual["dim1"])
assert_identical(data["dim2"], actual["dim2"])
# Conflict in the dimension coordinate
indexing_da = DataArray(
np.arange(1, 4), dims=["dim2"], coords={"dim2": np.random.randn(3)}
)
with pytest.raises(IndexError, match=r"dimension coordinate 'dim2'"):
data.isel(dim2=indexing_da)
# Also the case for DataArray
with pytest.raises(IndexError, match=r"dimension coordinate 'dim2'"):
data["var2"].isel(dim2=indexing_da)
with pytest.raises(IndexError, match=r"dimension coordinate 'dim2'"):
data["dim2"].isel(dim2=indexing_da)
# same name coordinate which does not conflict
indexing_da = DataArray(
np.arange(1, 4), dims=["dim2"], coords={"dim2": data["dim2"].values[1:4]}
)
actual = data.isel(dim2=indexing_da)
assert_identical(actual["dim2"], indexing_da["dim2"])
# Silently drop conflicted (non-dimensional) coordinate of indexer
indexing_da = DataArray(
np.arange(1, 4),
dims=["dim2"],
coords={
"dim2": data["dim2"].values[1:4],
"numbers": ("dim2", np.arange(2, 5)),
},
)
actual = data.isel(dim2=indexing_da)
assert_identical(actual["numbers"], data["numbers"])
# boolean data array with coordinate with the same name
indexing_da = DataArray(
np.arange(1, 10), dims=["dim2"], coords={"dim2": data["dim2"].values}
)
indexing_da = indexing_da < 3
actual = data.isel(dim2=indexing_da)
assert_identical(actual["dim2"], data["dim2"][:2])
# boolean data array with non-dimensioncoordinate
indexing_da = DataArray(
np.arange(1, 10),
dims=["dim2"],
coords={
"dim2": data["dim2"].values,
"non_dim": (("dim2",), np.random.randn(9)),
"non_dim2": 0,
},
)
indexing_da = indexing_da < 3
actual = data.isel(dim2=indexing_da)
assert_identical(
actual["dim2"].drop_vars("non_dim").drop_vars("non_dim2"), data["dim2"][:2]
)
assert_identical(actual["non_dim"], indexing_da["non_dim"][:2])
assert_identical(actual["non_dim2"], indexing_da["non_dim2"])
# non-dimension coordinate will be also attached
indexing_da = DataArray(
np.arange(1, 4),
dims=["dim2"],
coords={"non_dim": (("dim2",), np.random.randn(3))},
)
actual = data.isel(dim2=indexing_da)
assert "non_dim" in actual
assert "non_dim" in actual.coords
# Index by a scalar DataArray
indexing_da = DataArray(3, dims=[], coords={"station": 2})
actual = data.isel(dim2=indexing_da)
assert "station" in actual
actual = data.isel(dim2=indexing_da["station"])
assert "station" in actual
# indexer generated from coordinates
indexing_ds = Dataset({}, coords={"dim2": [0, 1, 2]})
with pytest.raises(IndexError, match=r"dimension coordinate 'dim2'"):
actual = data.isel(dim2=indexing_ds["dim2"])
def test_isel_fancy_convert_index_variable(self) -> None:
# select index variable "x" with a DataArray of dim "z"
# -> drop index and convert index variable to base variable
ds = xr.Dataset({"foo": ("x", [1, 2, 3])}, coords={"x": [0, 1, 2]})
idxr = xr.DataArray([1], dims="z", name="x")
actual = ds.isel(x=idxr)
assert "x" not in actual.xindexes
assert not isinstance(actual.x.variable, IndexVariable)
def test_isel_multicoord_index(self) -> None:
# regression test https://github.com/pydata/xarray/issues/10063
# isel on a multi-coordinate index should return a unique index associated
# to each coordinate
coords = xr.Coordinates(coords={"x": [0, 1], "y": [1, 2]}, indexes={})
ds = xr.Dataset(coords=coords).set_xindex(["x", "y"], XYIndex)
ds2 = ds.isel(x=slice(None), y=slice(None))
assert ds2.xindexes["x"] is ds2.xindexes["y"]
def test_sel(self) -> None:
data = create_test_data()
int_slicers = {"dim1": slice(None, None, 2), "dim2": slice(2), "dim3": slice(3)}
loc_slicers = {
"dim1": slice(None, None, 2),
"dim2": slice(0, 0.5),
"dim3": slice("a", "c"),
}
assert_equal(data.isel(int_slicers), data.sel(loc_slicers))
data["time"] = ("time", pd.date_range("2000-01-01", periods=20))
assert_equal(data.isel(time=0), data.sel(time="2000-01-01"))
assert_equal(
data.isel(time=slice(10)), data.sel(time=slice("2000-01-01", "2000-01-10"))
)
assert_equal(data, data.sel(time=slice("1999", "2005")))
times = pd.date_range("2000-01-01", periods=3)
assert_equal(data.isel(time=slice(3)), data.sel(time=times))
assert_equal(
data.isel(time=slice(3)), data.sel(time=(data["time.dayofyear"] <= 3))
)
td = pd.to_timedelta(np.arange(3), unit="days")
data = Dataset({"x": ("td", np.arange(3)), "td": td})
assert_equal(data, data.sel(td=td))
assert_equal(data, data.sel(td=slice("3 days")))
assert_equal(data.isel(td=0), data.sel(td=pd.Timedelta("0 days")))
assert_equal(data.isel(td=0), data.sel(td=pd.Timedelta("0h")))
assert_equal(data.isel(td=slice(1, 3)), data.sel(td=slice("1 days", "2 days")))
def test_sel_dataarray(self) -> None:
data = create_test_data()
ind = DataArray([0.0, 0.5, 1.0], dims=["dim2"])
actual = data.sel(dim2=ind)
assert_equal(actual, data.isel(dim2=[0, 1, 2]))
# with different dimension
ind = DataArray([0.0, 0.5, 1.0], dims=["new_dim"])
actual = data.sel(dim2=ind)
expected = data.isel(dim2=Variable("new_dim", [0, 1, 2]))
assert "new_dim" in actual.dims
assert_equal(actual, expected)
# Multi-dimensional
ind = DataArray([[0.0], [0.5], [1.0]], dims=["new_dim", "new_dim2"])
actual = data.sel(dim2=ind)
expected = data.isel(dim2=Variable(("new_dim", "new_dim2"), [[0], [1], [2]]))
assert "new_dim" in actual.dims
assert "new_dim2" in actual.dims
assert_equal(actual, expected)
# with coordinate
ind = DataArray(
[0.0, 0.5, 1.0], dims=["new_dim"], coords={"new_dim": ["a", "b", "c"]}
)
actual = data.sel(dim2=ind)
expected = data.isel(dim2=[0, 1, 2]).rename({"dim2": "new_dim"})
assert "new_dim" in actual.dims
assert "new_dim" in actual.coords
assert_equal(
actual.drop_vars("new_dim").drop_vars("dim2"), expected.drop_vars("new_dim")
)
assert_equal(actual["new_dim"].drop_vars("dim2"), ind["new_dim"])
# with conflicted coordinate (silently ignored)
ind = DataArray(
[0.0, 0.5, 1.0], dims=["dim2"], coords={"dim2": ["a", "b", "c"]}
)
actual = data.sel(dim2=ind)
expected = data.isel(dim2=[0, 1, 2])
assert_equal(actual, expected)
# with conflicted coordinate (silently ignored)
ind = DataArray(
[0.0, 0.5, 1.0],
dims=["new_dim"],
coords={"new_dim": ["a", "b", "c"], "dim2": 3},
)
actual = data.sel(dim2=ind)
assert_equal(
actual["new_dim"].drop_vars("dim2"), ind["new_dim"].drop_vars("dim2")
)
expected = data.isel(dim2=[0, 1, 2])
expected["dim2"] = (("new_dim"), expected["dim2"].values)
assert_equal(actual["dim2"].drop_vars("new_dim"), expected["dim2"])
assert actual["var1"].dims == ("dim1", "new_dim")
# with non-dimensional coordinate
ind = DataArray(
[0.0, 0.5, 1.0],
dims=["dim2"],
coords={
"dim2": ["a", "b", "c"],
"numbers": ("dim2", [0, 1, 2]),
"new_dim": ("dim2", [1.1, 1.2, 1.3]),
},
)
actual = data.sel(dim2=ind)
expected = data.isel(dim2=[0, 1, 2])
assert_equal(actual.drop_vars("new_dim"), expected)
assert np.allclose(actual["new_dim"].values, ind["new_dim"].values)
def test_sel_dataarray_mindex(self) -> None:
midx = pd.MultiIndex.from_product([list("abc"), [0, 1]], names=("one", "two"))
midx_coords = Coordinates.from_pandas_multiindex(midx, "x")
midx_coords["y"] = range(3)
mds = xr.Dataset(
{"var": (("x", "y"), np.random.rand(6, 3))}, coords=midx_coords
)
actual_isel = mds.isel(x=xr.DataArray(np.arange(3), dims="x"))
actual_sel = mds.sel(x=DataArray(midx[:3], dims="x"))
assert actual_isel["x"].dims == ("x",)
assert actual_sel["x"].dims == ("x",)
assert_identical(actual_isel, actual_sel)
actual_isel = mds.isel(x=xr.DataArray(np.arange(3), dims="z"))
actual_sel = mds.sel(x=Variable("z", midx[:3]))
assert actual_isel["x"].dims == ("z",)
assert actual_sel["x"].dims == ("z",)
assert_identical(actual_isel, actual_sel)
# with coordinate
actual_isel = mds.isel(
x=xr.DataArray(np.arange(3), dims="z", coords={"z": [0, 1, 2]})
)
actual_sel = mds.sel(
x=xr.DataArray(midx[:3], dims="z", coords={"z": [0, 1, 2]})
)
assert actual_isel["x"].dims == ("z",)
assert actual_sel["x"].dims == ("z",)
assert_identical(actual_isel, actual_sel)
# Vectorized indexing with level-variables raises an error
with pytest.raises(ValueError, match=r"Vectorized selection is "):
mds.sel(one=["a", "b"])
with pytest.raises(
ValueError,
match=r"Vectorized selection is not available along coordinate 'x' with a multi-index",
):
mds.sel(
x=xr.DataArray(
[np.array(midx[:2]), np.array(midx[-2:])], dims=["a", "b"]
)
)
def test_sel_categorical(self) -> None:
ind = pd.Series(["foo", "bar"], dtype="category")
df = pd.DataFrame({"ind": ind, "values": [1, 2]})
ds = df.set_index("ind").to_xarray()
actual = ds.sel(ind="bar")
expected = ds.isel(ind=1)
assert_identical(expected, actual)
def test_sel_categorical_error(self) -> None:
ind = pd.Series(["foo", "bar"], dtype="category")
df = pd.DataFrame({"ind": ind, "values": [1, 2]})
ds = df.set_index("ind").to_xarray()
with pytest.raises(ValueError):
ds.sel(ind="bar", method="nearest")
with pytest.raises(ValueError):
ds.sel(ind="bar", tolerance="nearest") # type: ignore[arg-type]
def test_categorical_index(self) -> None:
cat = pd.CategoricalIndex(
["foo", "bar", "foo"],
categories=["foo", "bar", "baz", "qux", "quux", "corge"],
)
ds = xr.Dataset(
{"var": ("cat", np.arange(3))},
coords={"cat": ("cat", cat), "c": ("cat", [0, 1, 1])},
)
# test slice
actual1 = ds.sel(cat="foo")
expected1 = ds.isel(cat=[0, 2])
assert_identical(expected1, actual1)
# make sure the conversion to the array works
actual2 = ds.sel(cat="foo")["cat"].values
assert (actual2 == np.array(["foo", "foo"])).all()
ds = ds.set_index(index=["cat", "c"])
actual3 = ds.unstack("index")
assert actual3["var"].shape == (2, 2)
def test_categorical_index_reindex(self) -> None:
cat = pd.CategoricalIndex(
["foo", "bar", "baz"],
categories=["foo", "bar", "baz", "qux", "quux", "corge"],
)
ds = xr.Dataset(
{"var": ("cat", np.arange(3))},
coords={"cat": ("cat", cat), "c": ("cat", [0, 1, 2])},
)
actual = ds.reindex(cat=["foo"])["cat"].values
assert (actual == np.array(["foo"])).all()
@pytest.mark.parametrize("fill_value", [np.nan, pd.NA])
def test_extensionarray_negative_reindex(self, fill_value) -> None:
cat = pd.Categorical(
["foo", "bar", "baz"],
categories=["foo", "bar", "baz", "qux", "quux", "corge"],
)
ds = xr.Dataset(
{"cat": ("index", cat)},
coords={"index": ("index", np.arange(3))},
)
reindexed_cat = cast(
pd.api.extensions.ExtensionArray,
(
ds.reindex(index=[-1, 1, 1], fill_value=fill_value)["cat"]
.to_pandas()
.values
),
)
assert reindexed_cat.equals(pd.array([pd.NA, "bar", "bar"], dtype=cat.dtype)) # type: ignore[attr-defined]
def test_extension_array_reindex_same(self) -> None:
series = pd.Series([1, 2, pd.NA, 3], dtype=pd.Int32Dtype())
test = xr.Dataset({"test": series})
res = test.reindex(dim_0=series.index)
align(res, test, join="exact")
def test_categorical_multiindex(self) -> None:
i1 = pd.Series([0, 0])
cat = pd.CategoricalDtype(categories=["foo", "baz", "bar"])
i2 = pd.Series(["baz", "bar"], dtype=cat)
df = pd.DataFrame({"i1": i1, "i2": i2, "values": [1, 2]}).set_index(
["i1", "i2"]
)
actual = df.to_xarray()
assert actual["values"].shape == (1, 2)
def test_sel_drop(self) -> None:
data = Dataset({"foo": ("x", [1, 2, 3])}, {"x": [0, 1, 2]})
expected = Dataset({"foo": 1})
selected = data.sel(x=0, drop=True)
assert_identical(expected, selected)
expected = Dataset({"foo": 1}, {"x": 0})
selected = data.sel(x=0, drop=False)
assert_identical(expected, selected)
data = Dataset({"foo": ("x", [1, 2, 3])})
expected = Dataset({"foo": 1})
selected = data.sel(x=0, drop=True)
assert_identical(expected, selected)
def test_sel_drop_mindex(self) -> None:
midx = pd.MultiIndex.from_arrays([["a", "a"], [1, 2]], names=("foo", "bar"))
midx_coords = Coordinates.from_pandas_multiindex(midx, "x")
data = Dataset(coords=midx_coords)
actual = data.sel(foo="a", drop=True)
assert "foo" not in actual.coords
actual = data.sel(foo="a", drop=False)
assert_equal(actual.foo, DataArray("a", coords={"foo": "a"}))
def test_isel_drop(self) -> None:
data = Dataset({"foo": ("x", [1, 2, 3])}, {"x": [0, 1, 2]})
expected = Dataset({"foo": 1})
selected = data.isel(x=0, drop=True)
assert_identical(expected, selected)
expected = Dataset({"foo": 1}, {"x": 0})
selected = data.isel(x=0, drop=False)
assert_identical(expected, selected)
def test_head(self) -> None:
data = create_test_data()
expected = data.isel(time=slice(5), dim2=slice(6))
actual = data.head(time=5, dim2=6)
assert_equal(expected, actual)
expected = data.isel(time=slice(0))
actual = data.head(time=0)
assert_equal(expected, actual)
expected = data.isel({dim: slice(6) for dim in data.dims})
actual = data.head(6)
assert_equal(expected, actual)
expected = data.isel({dim: slice(5) for dim in data.dims})
actual = data.head()
assert_equal(expected, actual)
with pytest.raises(TypeError, match=r"either dict-like or a single int"):
data.head([3]) # type: ignore[arg-type]
with pytest.raises(TypeError, match=r"expected integer type"):
data.head(dim2=3.1)
with pytest.raises(ValueError, match=r"expected positive int"):
data.head(time=-3)
def test_tail(self) -> None:
data = create_test_data()
expected = data.isel(time=slice(-5, None), dim2=slice(-6, None))
actual = data.tail(time=5, dim2=6)
assert_equal(expected, actual)
expected = data.isel(dim1=slice(0))
actual = data.tail(dim1=0)
assert_equal(expected, actual)
expected = data.isel({dim: slice(-6, None) for dim in data.dims})
actual = data.tail(6)
assert_equal(expected, actual)
expected = data.isel({dim: slice(-5, None) for dim in data.dims})
actual = data.tail()
assert_equal(expected, actual)
with pytest.raises(TypeError, match=r"either dict-like or a single int"):
data.tail([3]) # type: ignore[arg-type]
with pytest.raises(TypeError, match=r"expected integer type"):
data.tail(dim2=3.1)
with pytest.raises(ValueError, match=r"expected positive int"):
data.tail(time=-3)
def test_thin(self) -> None:
data = create_test_data()
expected = data.isel(time=slice(None, None, 5), dim2=slice(None, None, 6))
actual = data.thin(time=5, dim2=6)
assert_equal(expected, actual)
expected = data.isel({dim: slice(None, None, 6) for dim in data.dims})
actual = data.thin(6)
assert_equal(expected, actual)
with pytest.raises(TypeError, match=r"either dict-like or a single int"):
data.thin([3]) # type: ignore[arg-type]
with pytest.raises(TypeError, match=r"expected integer type"):
data.thin(dim2=3.1)
with pytest.raises(ValueError, match=r"cannot be zero"):
data.thin(time=0)
with pytest.raises(ValueError, match=r"expected positive int"):
data.thin(time=-3)
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_sel_fancy(self) -> None:
data = create_test_data()
# add in a range() index
data["dim1"] = data.dim1
pdim1 = [1, 2, 3]
pdim2 = [4, 5, 1]
pdim3 = [1, 2, 3]
expected = data.isel(
dim1=Variable(("test_coord",), pdim1),
dim2=Variable(("test_coord",), pdim2),
dim3=Variable(("test_coord"), pdim3),
)
actual = data.sel(
dim1=Variable(("test_coord",), data.dim1[pdim1]),
dim2=Variable(("test_coord",), data.dim2[pdim2]),
dim3=Variable(("test_coord",), data.dim3[pdim3]),
)
assert_identical(expected, actual)
# DataArray Indexer
idx_t = DataArray(
data["time"][[3, 2, 1]].values, dims=["a"], coords={"a": ["a", "b", "c"]}
)
idx_2 = DataArray(
data["dim2"][[3, 2, 1]].values, dims=["a"], coords={"a": ["a", "b", "c"]}
)
idx_3 = DataArray(
data["dim3"][[3, 2, 1]].values, dims=["a"], coords={"a": ["a", "b", "c"]}
)
actual = data.sel(time=idx_t, dim2=idx_2, dim3=idx_3)
expected = data.isel(
time=Variable(("a",), [3, 2, 1]),
dim2=Variable(("a",), [3, 2, 1]),
dim3=Variable(("a",), [3, 2, 1]),
)
expected = expected.assign_coords(a=idx_t["a"])
assert_identical(expected, actual)
idx_t = DataArray(
data["time"][[3, 2, 1]].values, dims=["a"], coords={"a": ["a", "b", "c"]}
)
idx_2 = DataArray(
data["dim2"][[2, 1, 3]].values, dims=["b"], coords={"b": [0, 1, 2]}
)
idx_3 = DataArray(
data["dim3"][[1, 2, 1]].values, dims=["c"], coords={"c": [0.0, 1.1, 2.2]}
)
actual = data.sel(time=idx_t, dim2=idx_2, dim3=idx_3)
expected = data.isel(
time=Variable(("a",), [3, 2, 1]),
dim2=Variable(("b",), [2, 1, 3]),
dim3=Variable(("c",), [1, 2, 1]),
)
expected = expected.assign_coords(a=idx_t["a"], b=idx_2["b"], c=idx_3["c"])
assert_identical(expected, actual)
# test from sel_points
data = Dataset({"foo": (("x", "y"), np.arange(9).reshape(3, 3))})
data.coords.update({"x": [0, 1, 2], "y": [0, 1, 2]})
expected = Dataset(
{"foo": ("points", [0, 4, 8])},
coords={
"x": Variable(("points",), [0, 1, 2]),
"y": Variable(("points",), [0, 1, 2]),
},
)
actual = data.sel(
x=Variable(("points",), [0, 1, 2]), y=Variable(("points",), [0, 1, 2])
)
assert_identical(expected, actual)
expected.coords.update({"x": ("points", [0, 1, 2]), "y": ("points", [0, 1, 2])})
actual = data.sel(
x=Variable(("points",), [0.1, 1.1, 2.5]),
y=Variable(("points",), [0, 1.2, 2.0]),
method="pad",
)
assert_identical(expected, actual)
idx_x = DataArray([0, 1, 2], dims=["a"], coords={"a": ["a", "b", "c"]})
idx_y = DataArray([0, 2, 1], dims=["b"], coords={"b": [0, 3, 6]})
expected_ary = data["foo"][[0, 1, 2], [0, 2, 1]]
actual = data.sel(x=idx_x, y=idx_y)
assert_array_equal(expected_ary, actual["foo"])
assert_identical(actual["a"].drop_vars("x"), idx_x["a"])
assert_identical(actual["b"].drop_vars("y"), idx_y["b"])
with pytest.raises(KeyError):
data.sel(x=[2.5], y=[2.0], method="pad", tolerance=1e-3)
def test_sel_method(self) -> None:
data = create_test_data()
expected = data.sel(dim2=1)
actual = data.sel(dim2=0.95, method="nearest")
assert_identical(expected, actual)
actual = data.sel(dim2=0.95, method="nearest", tolerance=1)
assert_identical(expected, actual)
with pytest.raises(KeyError):
actual = data.sel(dim2=np.pi, method="nearest", tolerance=0)
expected = data.sel(dim2=[1.5])
actual = data.sel(dim2=[1.45], method="backfill")
assert_identical(expected, actual)
with pytest.raises(NotImplementedError, match=r"slice objects"):
data.sel(dim2=slice(1, 3), method="ffill")
with pytest.raises(TypeError, match=r"``method``"):
# this should not pass silently
data.sel(dim2=1, method=data) # type: ignore[arg-type]
# cannot pass method if there is no associated coordinate
with pytest.raises(ValueError, match=r"cannot supply"):
data.sel(dim1=0, method="nearest")
def test_loc(self) -> None:
data = create_test_data()
expected = data.sel(dim3="a")
actual = data.loc[dict(dim3="a")]
assert_identical(expected, actual)
with pytest.raises(TypeError, match=r"can only lookup dict"):
data.loc["a"] # type: ignore[index]
def test_selection_multiindex(self) -> None:
midx = pd.MultiIndex.from_product(
[["a", "b"], [1, 2], [-1, -2]], names=("one", "two", "three")
)
midx_coords = Coordinates.from_pandas_multiindex(midx, "x")
mdata = Dataset(data_vars={"var": ("x", range(8))}, coords=midx_coords)
def test_sel(
lab_indexer, pos_indexer, replaced_idx=False, renamed_dim=None
) -> None:
ds = mdata.sel(x=lab_indexer)
expected_ds = mdata.isel(x=pos_indexer)
if not replaced_idx:
assert_identical(ds, expected_ds)
else:
if renamed_dim:
assert ds["var"].dims[0] == renamed_dim
ds = ds.rename({renamed_dim: "x"})
assert_identical(ds["var"].variable, expected_ds["var"].variable)
assert not ds["x"].equals(expected_ds["x"])
test_sel(("a", 1, -1), 0)
test_sel(("b", 2, -2), -1)
test_sel(("a", 1), [0, 1], replaced_idx=True, renamed_dim="three")
test_sel(("a",), range(4), replaced_idx=True)
test_sel("a", range(4), replaced_idx=True)
test_sel([("a", 1, -1), ("b", 2, -2)], [0, 7])
test_sel(slice("a", "b"), range(8))
test_sel(slice(("a", 1), ("b", 1)), range(6))
test_sel({"one": "a", "two": 1, "three": -1}, 0)
test_sel({"one": "a", "two": 1}, [0, 1], replaced_idx=True, renamed_dim="three")
test_sel({"one": "a"}, range(4), replaced_idx=True)
assert_identical(mdata.loc[{"x": {"one": "a"}}], mdata.sel(x={"one": "a"}))
assert_identical(mdata.loc[{"x": "a"}], mdata.sel(x="a"))
assert_identical(mdata.loc[{"x": ("a", 1)}], mdata.sel(x=("a", 1)))
assert_identical(mdata.loc[{"x": ("a", 1, -1)}], mdata.sel(x=("a", 1, -1)))
assert_identical(mdata.sel(x={"one": "a", "two": 1}), mdata.sel(one="a", two=1))
def test_broadcast_like(self) -> None:
original1 = DataArray(
np.random.randn(5), [("x", range(5))], name="a"
).to_dataset()
original2 = DataArray(np.random.randn(6), [("y", range(6))], name="b")
expected1, expected2 = broadcast(original1, original2)
assert_identical(
original1.broadcast_like(original2), expected1.transpose("y", "x")
)
assert_identical(original2.broadcast_like(original1), expected2)
def test_to_pandas(self) -> None:
# 0D -> series
actual = Dataset({"a": 1, "b": 2}).to_pandas()
expected = pd.Series([1, 2], ["a", "b"])
assert_array_equal(actual, expected)
# 1D -> dataframe
x = np.random.randn(10)
y = np.random.randn(10)
t = list("abcdefghij")
ds = Dataset({"a": ("t", x), "b": ("t", y), "t": ("t", t)})
actual_df = ds.to_pandas()
expected_df = ds.to_dataframe()
assert expected_df.equals(actual_df), (expected_df, actual_df)
# 2D -> error
x2d = np.random.randn(10, 10)
y2d = np.random.randn(10, 10)
with pytest.raises(ValueError, match=r"cannot convert Datasets"):
Dataset({"a": (["t", "r"], x2d), "b": (["t", "r"], y2d)}).to_pandas()
def test_reindex_like(self) -> None:
data = create_test_data()
data["letters"] = ("dim3", 10 * ["a"])
expected = data.isel(dim1=slice(10), time=slice(13))
actual = data.reindex_like(expected)
assert_identical(actual, expected)
expected = data.copy(deep=True)
expected["dim3"] = ("dim3", list("cdefghijkl"))
expected["var3"][:-2] = expected["var3"][2:].values
expected["var3"][-2:] = np.nan
expected["letters"] = expected["letters"].astype(object)
expected["letters"][-2:] = np.nan
expected["numbers"] = expected["numbers"].astype(float)
expected["numbers"][:-2] = expected["numbers"][2:].values
expected["numbers"][-2:] = np.nan
actual = data.reindex_like(expected)
assert_identical(actual, expected)
def test_reindex(self) -> None:
data = create_test_data()
assert_identical(data, data.reindex())
expected = data.assign_coords(dim1=data["dim1"])
actual = data.reindex(dim1=data["dim1"])
assert_identical(actual, expected)
actual = data.reindex(dim1=data["dim1"].values)
assert_identical(actual, expected)
actual = data.reindex(dim1=data["dim1"].to_index())
assert_identical(actual, expected)
with pytest.raises(
ValueError, match=r"cannot reindex or align along dimension"
):
data.reindex(dim1=data["dim1"][:5])
expected = data.isel(dim2=slice(5))
actual = data.reindex(dim2=data["dim2"][:5])
assert_identical(actual, expected)
# test dict-like argument
actual = data.reindex({"dim2": data["dim2"]})
expected = data
assert_identical(actual, expected)
with pytest.raises(ValueError, match=r"cannot specify both"):
data.reindex({"x": 0}, x=0)
with pytest.raises(ValueError, match=r"dictionary"):
data.reindex("foo") # type: ignore[arg-type]
# invalid dimension
# TODO: (benbovy - explicit indexes): uncomment?
# --> from reindex docstrings: "any mismatched dimension is simply ignored"
# with pytest.raises(ValueError, match=r"indexer keys.*not correspond.*"):
# data.reindex(invalid=0)
# out of order
expected = data.sel(dim2=data["dim2"][:5:-1])
actual = data.reindex(dim2=data["dim2"][:5:-1])
assert_identical(actual, expected)
# multiple fill values
expected = data.reindex(dim2=[0.1, 2.1, 3.1, 4.1]).assign(
var1=lambda ds: ds.var1.copy(data=[[-10, -10, -10, -10]] * len(ds.dim1)),
var2=lambda ds: ds.var2.copy(data=[[-20, -20, -20, -20]] * len(ds.dim1)),
)
actual = data.reindex(
dim2=[0.1, 2.1, 3.1, 4.1], fill_value={"var1": -10, "var2": -20}
)
assert_identical(actual, expected)
# use the default value
expected = data.reindex(dim2=[0.1, 2.1, 3.1, 4.1]).assign(
var1=lambda ds: ds.var1.copy(data=[[-10, -10, -10, -10]] * len(ds.dim1)),
var2=lambda ds: ds.var2.copy(
data=[[np.nan, np.nan, np.nan, np.nan]] * len(ds.dim1)
),
)
actual = data.reindex(dim2=[0.1, 2.1, 3.1, 4.1], fill_value={"var1": -10})
assert_identical(actual, expected)
# regression test for #279
expected = Dataset({"x": ("time", np.random.randn(5))}, {"time": range(5)})
time2 = DataArray(np.arange(5), dims="time2")
with pytest.raises(ValueError):
actual = expected.reindex(time=time2)
# another regression test
ds = Dataset(
{"foo": (["x", "y"], np.zeros((3, 4)))}, {"x": range(3), "y": range(4)}
)
expected = Dataset(
{"foo": (["x", "y"], np.zeros((3, 2)))}, {"x": [0, 1, 3], "y": [0, 1]}
)
expected["foo"][-1] = np.nan
actual = ds.reindex(x=[0, 1, 3], y=[0, 1])
assert_identical(expected, actual)
def test_reindex_attrs_encoding(self) -> None:
ds = Dataset(
{"data": ("x", [1, 2, 3])},
{"x": ("x", [0, 1, 2], {"foo": "bar"}, {"bar": "baz"})},
)
actual = ds.reindex(x=[0, 1])
expected = Dataset(
{"data": ("x", [1, 2])},
{"x": ("x", [0, 1], {"foo": "bar"}, {"bar": "baz"})},
)
assert_identical(actual, expected)
assert actual.x.encoding == expected.x.encoding
def test_reindex_warning(self) -> None:
data = create_test_data()
with pytest.raises(ValueError):
# DataArray with different dimension raises Future warning
ind = xr.DataArray([0.0, 1.0], dims=["new_dim"], name="ind")
data.reindex(dim2=ind)
# Should not warn
ind = xr.DataArray([0.0, 1.0], dims=["dim2"], name="ind")
with warnings.catch_warnings(record=True) as ws:
data.reindex(dim2=ind)
assert len(ws) == 0
def test_reindex_variables_copied(self) -> None:
data = create_test_data()
reindexed_data = data.reindex(copy=False)
for k in data.variables:
assert reindexed_data.variables[k] is not data.variables[k]
def test_reindex_method(self) -> None:
ds = Dataset({"x": ("y", [10, 20]), "y": [0, 1]})
y = [-0.5, 0.5, 1.5]
actual = ds.reindex(y=y, method="backfill")
expected = Dataset({"x": ("y", [10, 20, np.nan]), "y": y})
assert_identical(expected, actual)
actual = ds.reindex(y=y, method="backfill", tolerance=0.1)
expected = Dataset({"x": ("y", 3 * [np.nan]), "y": y})
assert_identical(expected, actual)
actual = ds.reindex(y=y, method="backfill", tolerance=[0.1, 0.5, 0.1])
expected = Dataset({"x": ("y", [np.nan, 20, np.nan]), "y": y})
assert_identical(expected, actual)
actual = ds.reindex(y=[0.1, 0.1, 1], tolerance=[0, 0.1, 0], method="nearest")
expected = Dataset({"x": ("y", [np.nan, 10, 20]), "y": [0.1, 0.1, 1]})
assert_identical(expected, actual)
actual = ds.reindex(y=y, method="pad")
expected = Dataset({"x": ("y", [np.nan, 10, 20]), "y": y})
assert_identical(expected, actual)
alt = Dataset({"y": y})
actual = ds.reindex_like(alt, method="pad")
assert_identical(expected, actual)
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"x": 2, "z": 1}])
def test_reindex_fill_value(self, fill_value) -> None:
ds = Dataset({"x": ("y", [10, 20]), "z": ("y", [-20, -10]), "y": [0, 1]})
y = [0, 1, 2]
actual = ds.reindex(y=y, fill_value=fill_value)
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value_x = fill_value_z = np.nan
elif isinstance(fill_value, dict):
fill_value_x = fill_value["x"]
fill_value_z = fill_value["z"]
else:
fill_value_x = fill_value_z = fill_value
expected = Dataset(
{
"x": ("y", [10, 20, fill_value_x]),
"z": ("y", [-20, -10, fill_value_z]),
"y": y,
}
)
assert_identical(expected, actual)
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"x": 2, "z": 1}])
def test_reindex_like_fill_value(self, fill_value) -> None:
ds = Dataset({"x": ("y", [10, 20]), "z": ("y", [-20, -10]), "y": [0, 1]})
y = [0, 1, 2]
alt = Dataset({"y": y})
actual = ds.reindex_like(alt, fill_value=fill_value)
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value_x = fill_value_z = np.nan
elif isinstance(fill_value, dict):
fill_value_x = fill_value["x"]
fill_value_z = fill_value["z"]
else:
fill_value_x = fill_value_z = fill_value
expected = Dataset(
{
"x": ("y", [10, 20, fill_value_x]),
"z": ("y", [-20, -10, fill_value_z]),
"y": y,
}
)
assert_identical(expected, actual)
@pytest.mark.parametrize("dtype", [str, bytes])
def test_reindex_str_dtype(self, dtype) -> None:
data = Dataset({"data": ("x", [1, 2]), "x": np.array(["a", "b"], dtype=dtype)})
actual = data.reindex(x=data.x)
expected = data
assert_identical(expected, actual)
assert actual.x.dtype == expected.x.dtype
def test_reindex_with_multiindex_level(self) -> None:
# test for https://github.com/pydata/xarray/issues/10347
mindex = pd.MultiIndex.from_product(
[[100, 200, 300], [1, 2, 3, 4]], names=["x", "y"]
)
y_idx = PandasIndex(mindex.levels[1], "y")
ds1 = xr.Dataset(coords={"y": [1, 2, 3]})
ds2 = xr.Dataset(coords=xr.Coordinates.from_xindex(y_idx))
actual = ds1.reindex(y=ds2.y)
assert_identical(actual, ds2)
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"foo": 2, "bar": 1}])
def test_align_fill_value(self, fill_value) -> None:
x = Dataset({"foo": DataArray([1, 2], dims=["x"], coords={"x": [1, 2]})})
y = Dataset({"bar": DataArray([1, 2], dims=["x"], coords={"x": [1, 3]})})
x2, y2 = align(x, y, join="outer", fill_value=fill_value)
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value_foo = fill_value_bar = np.nan
elif isinstance(fill_value, dict):
fill_value_foo = fill_value["foo"]
fill_value_bar = fill_value["bar"]
else:
fill_value_foo = fill_value_bar = fill_value
expected_x2 = Dataset(
{
"foo": DataArray(
[1, 2, fill_value_foo], dims=["x"], coords={"x": [1, 2, 3]}
)
}
)
expected_y2 = Dataset(
{
"bar": DataArray(
[1, fill_value_bar, 2], dims=["x"], coords={"x": [1, 2, 3]}
)
}
)
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
def test_align(self) -> None:
left = create_test_data()
right = left.copy(deep=True)
right["dim3"] = ("dim3", list("cdefghijkl"))
right["var3"][:-2] = right["var3"][2:].values
right["var3"][-2:] = np.random.randn(*right["var3"][-2:].shape)
right["numbers"][:-2] = right["numbers"][2:].values
right["numbers"][-2:] = -10
intersection = list("cdefghij")
union = list("abcdefghijkl")
left2, right2 = align(left, right, join="inner")
assert_array_equal(left2["dim3"], intersection)
assert_identical(left2, right2)
left2, right2 = align(left, right, join="outer")
assert_array_equal(left2["dim3"], union)
assert_equal(left2["dim3"].variable, right2["dim3"].variable)
assert_identical(left2.sel(dim3=intersection), right2.sel(dim3=intersection))
assert np.isnan(left2["var3"][-2:]).all()
assert np.isnan(right2["var3"][:2]).all()
left2, right2 = align(left, right, join="left")
assert_equal(left2["dim3"].variable, right2["dim3"].variable)
assert_equal(left2["dim3"].variable, left["dim3"].variable)
assert_identical(left2.sel(dim3=intersection), right2.sel(dim3=intersection))
assert np.isnan(right2["var3"][:2]).all()
left2, right2 = align(left, right, join="right")
assert_equal(left2["dim3"].variable, right2["dim3"].variable)
assert_equal(left2["dim3"].variable, right["dim3"].variable)
assert_identical(left2.sel(dim3=intersection), right2.sel(dim3=intersection))
assert np.isnan(left2["var3"][-2:]).all()
with pytest.raises(ValueError, match=r"invalid value for join"):
align(left, right, join="foobar") # type: ignore[call-overload]
with pytest.raises(TypeError):
align(left, right, foo="bar") # type: ignore[call-overload]
def test_align_exact(self) -> None:
left = xr.Dataset(coords={"x": [0, 1]})
right = xr.Dataset(coords={"x": [1, 2]})
left1, left2 = xr.align(left, left, join="exact")
assert_identical(left1, left)
assert_identical(left2, left)
with pytest.raises(ValueError, match=r"cannot align.*join.*exact.*not equal.*"):
xr.align(left, right, join="exact")
def test_align_override(self) -> None:
left = xr.Dataset(coords={"x": [0, 1, 2]})
right = xr.Dataset(coords={"x": [0.1, 1.1, 2.1], "y": [1, 2, 3]})
expected_right = xr.Dataset(coords={"x": [0, 1, 2], "y": [1, 2, 3]})
new_left, new_right = xr.align(left, right, join="override")
assert_identical(left, new_left)
assert_identical(new_right, expected_right)
new_left, new_right = xr.align(left, right, exclude="x", join="override")
assert_identical(left, new_left)
assert_identical(right, new_right)
new_left, new_right = xr.align(
left.isel(x=0, drop=True), right, exclude="x", join="override"
)
assert_identical(left.isel(x=0, drop=True), new_left)
assert_identical(right, new_right)
with pytest.raises(
ValueError, match=r"cannot align.*join.*override.*same size"
):
xr.align(left.isel(x=0).expand_dims("x"), right, join="override")
def test_align_exclude(self) -> None:
x = Dataset(
{
"foo": DataArray(
[[1, 2], [3, 4]], dims=["x", "y"], coords={"x": [1, 2], "y": [3, 4]}
)
}
)
y = Dataset(
{
"bar": DataArray(
[[1, 2], [3, 4]], dims=["x", "y"], coords={"x": [1, 3], "y": [5, 6]}
)
}
)
x2, y2 = align(x, y, exclude=["y"], join="outer")
expected_x2 = Dataset(
{
"foo": DataArray(
[[1, 2], [3, 4], [np.nan, np.nan]],
dims=["x", "y"],
coords={"x": [1, 2, 3], "y": [3, 4]},
)
}
)
expected_y2 = Dataset(
{
"bar": DataArray(
[[1, 2], [np.nan, np.nan], [3, 4]],
dims=["x", "y"],
coords={"x": [1, 2, 3], "y": [5, 6]},
)
}
)
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
def test_align_nocopy(self) -> None:
x = Dataset({"foo": DataArray([1, 2, 3], coords=[("x", [1, 2, 3])])})
y = Dataset({"foo": DataArray([1, 2], coords=[("x", [1, 2])])})
expected_x2 = x
expected_y2 = Dataset(
{"foo": DataArray([1, 2, np.nan], coords=[("x", [1, 2, 3])])}
)
x2, y2 = align(x, y, copy=False, join="outer")
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
assert source_ndarray(x["foo"].data) is source_ndarray(x2["foo"].data)
x2, y2 = align(x, y, copy=True, join="outer")
assert source_ndarray(x["foo"].data) is not source_ndarray(x2["foo"].data)
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
def test_align_indexes(self) -> None:
x = Dataset({"foo": DataArray([1, 2, 3], dims="x", coords=[("x", [1, 2, 3])])})
(x2,) = align(x, indexes={"x": [2, 3, 1]})
expected_x2 = Dataset(
{"foo": DataArray([2, 3, 1], dims="x", coords={"x": [2, 3, 1]})}
)
assert_identical(expected_x2, x2)
def test_align_multiple_indexes_common_dim(self) -> None:
a = Dataset(coords={"x": [1, 2], "xb": ("x", [3, 4])}).set_xindex("xb")
b = Dataset(coords={"x": [1], "xb": ("x", [3])}).set_xindex("xb")
(a2, b2) = align(a, b, join="inner")
assert_identical(a2, b, check_default_indexes=False)
assert_identical(b2, b, check_default_indexes=False)
c = Dataset(coords={"x": [1, 3], "xb": ("x", [2, 4])}).set_xindex("xb")
with pytest.raises(AlignmentError, match=r".*conflicting re-indexers"):
align(a, c)
def test_align_conflicting_indexes(self) -> None:
class CustomIndex(PandasIndex): ...
a = Dataset(coords={"xb": ("x", [3, 4])}).set_xindex("xb")
b = Dataset(coords={"xb": ("x", [3])}).set_xindex("xb", CustomIndex)
with pytest.raises(AlignmentError, match=r"cannot align.*conflicting indexes"):
align(a, b)
def test_align_non_unique(self) -> None:
x = Dataset({"foo": ("x", [3, 4, 5]), "x": [0, 0, 1]})
x1, x2 = align(x, x)
assert_identical(x1, x)
assert_identical(x2, x)
y = Dataset({"bar": ("x", [6, 7]), "x": [0, 1]})
with pytest.raises(ValueError, match=r"cannot reindex or align"):
align(x, y)
def test_align_str_dtype(self) -> None:
a = Dataset({"foo": ("x", [0, 1])}, coords={"x": ["a", "b"]})
b = Dataset({"foo": ("x", [1, 2])}, coords={"x": ["b", "c"]})
expected_a = Dataset(
{"foo": ("x", [0, 1, np.nan])}, coords={"x": ["a", "b", "c"]}
)
expected_b = Dataset(
{"foo": ("x", [np.nan, 1, 2])}, coords={"x": ["a", "b", "c"]}
)
actual_a, actual_b = xr.align(a, b, join="outer")
assert_identical(expected_a, actual_a)
assert expected_a.x.dtype == actual_a.x.dtype
assert_identical(expected_b, actual_b)
assert expected_b.x.dtype == actual_b.x.dtype
@pytest.mark.parametrize("join", ["left", "override"])
def test_align_index_var_attrs(self, join) -> None:
# regression test https://github.com/pydata/xarray/issues/6852
# aligning two objects should have no side effect on their index variable
# metadata.
ds = Dataset(coords={"x": ("x", [1, 2, 3], {"units": "m"})})
ds_noattr = Dataset(coords={"x": ("x", [1, 2, 3])})
xr.align(ds_noattr, ds, join=join)
assert ds.x.attrs == {"units": "m"}
assert ds_noattr.x.attrs == {}
def test_align_scalar_index(self) -> None:
# ensure that indexes associated with scalar coordinates are not ignored
# during alignment
ds1 = Dataset(coords={"x": 0}).set_xindex("x", ScalarIndex)
ds2 = Dataset(coords={"x": 0}).set_xindex("x", ScalarIndex)
actual = xr.align(ds1, ds2, join="exact")
assert_identical(actual[0], ds1, check_default_indexes=False)
assert_identical(actual[1], ds2, check_default_indexes=False)
ds3 = Dataset(coords={"x": 1}).set_xindex("x", ScalarIndex)
with pytest.raises(AlignmentError, match="cannot align objects"):
xr.align(ds1, ds3, join="exact")
def test_align_multi_dim_index_exclude_dims(self) -> None:
ds1 = (
Dataset(coords={"x": [1, 2], "y": [3, 4]})
.drop_indexes(["x", "y"])
.set_xindex(["x", "y"], XYIndex)
)
ds2 = (
Dataset(coords={"x": [1, 2], "y": [5, 6]})
.drop_indexes(["x", "y"])
.set_xindex(["x", "y"], XYIndex)
)
for join in ("outer", "exact"):
actual = xr.align(ds1, ds2, join=join, exclude="y")
assert_identical(actual[0], ds1, check_default_indexes=False)
assert_identical(actual[1], ds2, check_default_indexes=False)
with pytest.raises(
AlignmentError, match=r"cannot align objects.*index.*not equal"
):
xr.align(ds1, ds2, join="exact")
with pytest.raises(AlignmentError, match="cannot exclude dimension"):
xr.align(ds1, ds2, join="override", exclude="y")
def test_align_index_equals_future_warning(self) -> None:
# TODO: remove this test once the deprecation cycle is completed
class DeprecatedEqualsSignatureIndex(PandasIndex):
def equals(self, other: Index) -> bool: # type: ignore[override]
return super().equals(other, exclude=None)
ds = (
Dataset(coords={"x": [1, 2]})
.drop_indexes("x")
.set_xindex("x", DeprecatedEqualsSignatureIndex)
)
with pytest.warns(FutureWarning, match=r"signature.*deprecated"):
xr.align(ds, ds.copy(), join="exact")
def test_broadcast(self) -> None:
ds = Dataset(
{"foo": 0, "bar": ("x", [1]), "baz": ("y", [2, 3])}, {"c": ("x", [4])}
)
expected = Dataset(
{
"foo": (("x", "y"), [[0, 0]]),
"bar": (("x", "y"), [[1, 1]]),
"baz": (("x", "y"), [[2, 3]]),
},
{"c": ("x", [4])},
)
(actual,) = broadcast(ds)
assert_identical(expected, actual)
ds_x = Dataset({"foo": ("x", [1])})
ds_y = Dataset({"bar": ("y", [2, 3])})
expected_x = Dataset({"foo": (("x", "y"), [[1, 1]])})
expected_y = Dataset({"bar": (("x", "y"), [[2, 3]])})
actual_x, actual_y = broadcast(ds_x, ds_y)
assert_identical(expected_x, actual_x)
assert_identical(expected_y, actual_y)
array_y = ds_y["bar"]
expected_y2 = expected_y["bar"]
actual_x2, actual_y2 = broadcast(ds_x, array_y)
assert_identical(expected_x, actual_x2)
assert_identical(expected_y2, actual_y2)
def test_broadcast_nocopy(self) -> None:
# Test that data is not copied if not needed
x = Dataset({"foo": (("x", "y"), [[1, 1]])})
y = Dataset({"bar": ("y", [2, 3])})
(actual_x,) = broadcast(x)
assert_identical(x, actual_x)
assert source_ndarray(actual_x["foo"].data) is source_ndarray(x["foo"].data)
actual_x, _actual_y = broadcast(x, y)
assert_identical(x, actual_x)
assert source_ndarray(actual_x["foo"].data) is source_ndarray(x["foo"].data)
def test_broadcast_exclude(self) -> None:
x = Dataset(
{
"foo": DataArray(
[[1, 2], [3, 4]], dims=["x", "y"], coords={"x": [1, 2], "y": [3, 4]}
),
"bar": DataArray(5),
}
)
y = Dataset(
{
"foo": DataArray(
[[1, 2]], dims=["z", "y"], coords={"z": [1], "y": [5, 6]}
)
}
)
x2, y2 = broadcast(x, y, exclude=["y"])
expected_x2 = Dataset(
{
"foo": DataArray(
[[[1, 2]], [[3, 4]]],
dims=["x", "z", "y"],
coords={"z": [1], "x": [1, 2], "y": [3, 4]},
),
"bar": DataArray(
[[5], [5]], dims=["x", "z"], coords={"x": [1, 2], "z": [1]}
),
}
)
expected_y2 = Dataset(
{
"foo": DataArray(
[[[1, 2]], [[1, 2]]],
dims=["x", "z", "y"],
coords={"z": [1], "x": [1, 2], "y": [5, 6]},
)
}
)
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
def test_broadcast_misaligned(self) -> None:
x = Dataset({"foo": DataArray([1, 2, 3], coords=[("x", [-1, -2, -3])])})
y = Dataset(
{
"bar": DataArray(
[[1, 2], [3, 4]],
dims=["y", "x"],
coords={"y": [1, 2], "x": [10, -3]},
)
}
)
x2, y2 = broadcast(x, y)
expected_x2 = Dataset(
{
"foo": DataArray(
[[3, 3], [2, 2], [1, 1], [np.nan, np.nan]],
dims=["x", "y"],
coords={"y": [1, 2], "x": [-3, -2, -1, 10]},
)
}
)
expected_y2 = Dataset(
{
"bar": DataArray(
[[2, 4], [np.nan, np.nan], [np.nan, np.nan], [1, 3]],
dims=["x", "y"],
coords={"y": [1, 2], "x": [-3, -2, -1, 10]},
)
}
)
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
def test_broadcast_multi_index(self) -> None:
# GH6430
ds = Dataset(
{"foo": (("x", "y", "z"), np.ones((3, 4, 2)))},
{"x": ["a", "b", "c"], "y": [1, 2, 3, 4]},
)
stacked = ds.stack(space=["x", "y"])
broadcasted, _ = broadcast(stacked, stacked.space)
assert broadcasted.xindexes["x"] is broadcasted.xindexes["space"]
assert broadcasted.xindexes["y"] is broadcasted.xindexes["space"]
def test_variable_indexing(self) -> None:
data = create_test_data()
v = data["var1"]
d1 = data["dim1"]
d2 = data["dim2"]
assert_equal(v, v[d1.values])
assert_equal(v, v[d1])
assert_equal(v[:3], v[d1 < 3])
assert_equal(v[:, 3:], v[:, d2 >= 1.5])
assert_equal(v[:3, 3:], v[d1 < 3, d2 >= 1.5])
assert_equal(v[:3, :2], v[range(3), range(2)])
assert_equal(v[:3, :2], v.loc[d1[:3], d2[:2]])
def test_drop_variables(self) -> None:
data = create_test_data()
assert_identical(data, data.drop_vars([]))
expected = Dataset({k: data[k] for k in data.variables if k != "time"})
actual = data.drop_vars("time")
assert_identical(expected, actual)
actual = data.drop_vars(["time"])
assert_identical(expected, actual)
with pytest.raises(
ValueError,
match=re.escape(
"These variables cannot be found in this dataset: ['not_found_here']"
),
):
data.drop_vars("not_found_here")
actual = data.drop_vars("not_found_here", errors="ignore")
assert_identical(data, actual)
actual = data.drop_vars(["not_found_here"], errors="ignore")
assert_identical(data, actual)
actual = data.drop_vars(["time", "not_found_here"], errors="ignore")
assert_identical(expected, actual)
# deprecated approach with `drop` works (straight copy paste from above)
with pytest.warns(DeprecationWarning):
actual = data.drop("not_found_here", errors="ignore")
assert_identical(data, actual)
with pytest.warns(DeprecationWarning):
actual = data.drop(["not_found_here"], errors="ignore")
assert_identical(data, actual)
with pytest.warns(DeprecationWarning):
actual = data.drop(["time", "not_found_here"], errors="ignore")
assert_identical(expected, actual)
with pytest.warns(DeprecationWarning):
actual = data.drop({"time", "not_found_here"}, errors="ignore")
assert_identical(expected, actual)
def test_drop_multiindex_level(self) -> None:
data = create_test_multiindex()
expected = data.drop_vars(["x", "level_1", "level_2"])
with pytest.warns(DeprecationWarning):
actual = data.drop_vars("level_1")
assert_identical(expected, actual)
def test_drop_multiindex_labels(self) -> None:
data = create_test_multiindex()
mindex = pd.MultiIndex.from_tuples(
[
("a", 2),
("b", 1),
("b", 2),
],
names=("level_1", "level_2"),
)
expected = Dataset({}, Coordinates.from_pandas_multiindex(mindex, "x"))
actual = data.drop_sel(x=("a", 1))
assert_identical(expected, actual)
def test_drop_index_labels(self) -> None:
data = Dataset({"A": (["x", "y"], np.random.randn(2, 3)), "x": ["a", "b"]})
with pytest.warns(DeprecationWarning):
actual = data.drop(["a"], dim="x")
expected = data.isel(x=[1])
assert_identical(expected, actual)
with pytest.warns(DeprecationWarning):
actual = data.drop(["a", "b"], dim="x")
expected = data.isel(x=slice(0, 0))
assert_identical(expected, actual)
with pytest.raises(KeyError):
# not contained in axis
with pytest.warns(DeprecationWarning):
data.drop(["c"], dim="x")
with pytest.warns(DeprecationWarning):
actual = data.drop(["c"], dim="x", errors="ignore")
assert_identical(data, actual)
with pytest.raises(ValueError):
data.drop(["c"], dim="x", errors="wrong_value") # type: ignore[arg-type]
with pytest.warns(DeprecationWarning):
actual = data.drop(["a", "b", "c"], "x", errors="ignore")
expected = data.isel(x=slice(0, 0))
assert_identical(expected, actual)
# DataArrays as labels are a nasty corner case as they are not
# Iterable[Hashable] - DataArray.__iter__ yields scalar DataArrays.
actual = data.drop_sel(x=DataArray(["a", "b", "c"]), errors="ignore")
expected = data.isel(x=slice(0, 0))
assert_identical(expected, actual)
with pytest.warns(DeprecationWarning):
data.drop(DataArray(["a", "b", "c"]), dim="x", errors="ignore")
assert_identical(expected, actual)
actual = data.drop_sel(y=[1])
expected = data.isel(y=[0, 2])
assert_identical(expected, actual)
with pytest.raises(KeyError, match=r"not found in axis"):
data.drop_sel(x=0)
def test_drop_labels_by_keyword(self) -> None:
data = Dataset(
{"A": (["x", "y"], np.random.randn(2, 6)), "x": ["a", "b"], "y": range(6)}
)
# Basic functionality.
assert len(data.coords["x"]) == 2
with pytest.warns(DeprecationWarning):
ds1 = data.drop(["a"], dim="x")
ds2 = data.drop_sel(x="a")
ds3 = data.drop_sel(x=["a"])
ds4 = data.drop_sel(x=["a", "b"])
ds5 = data.drop_sel(x=["a", "b"], y=range(0, 6, 2))
arr = DataArray(range(3), dims=["c"])
with pytest.warns(DeprecationWarning):
data.drop(arr.coords)
with pytest.warns(DeprecationWarning):
data.drop(arr.xindexes)
assert_array_equal(ds1.coords["x"], ["b"])
assert_array_equal(ds2.coords["x"], ["b"])
assert_array_equal(ds3.coords["x"], ["b"])
assert ds4.coords["x"].size == 0
assert ds5.coords["x"].size == 0
assert_array_equal(ds5.coords["y"], [1, 3, 5])
# Error handling if user tries both approaches.
with pytest.raises(ValueError):
data.drop(labels=["a"], x="a")
with pytest.raises(ValueError):
data.drop(labels=["a"], dim="x", x="a")
warnings.filterwarnings("ignore", r"\W*drop")
with pytest.raises(ValueError):
data.drop(dim="x", x="a")
def test_drop_labels_by_position(self) -> None:
data = Dataset(
{"A": (["x", "y"], np.random.randn(2, 6)), "x": ["a", "b"], "y": range(6)}
)
# Basic functionality.
assert len(data.coords["x"]) == 2
actual = data.drop_isel(x=0)
expected = data.drop_sel(x="a")
assert_identical(expected, actual)
actual = data.drop_isel(x=[0])
expected = data.drop_sel(x=["a"])
assert_identical(expected, actual)
actual = data.drop_isel(x=[0, 1])
expected = data.drop_sel(x=["a", "b"])
assert_identical(expected, actual)
assert actual.coords["x"].size == 0
actual = data.drop_isel(x=[0, 1], y=range(0, 6, 2))
expected = data.drop_sel(x=["a", "b"], y=range(0, 6, 2))
assert_identical(expected, actual)
assert actual.coords["x"].size == 0
with pytest.raises(KeyError):
data.drop_isel(z=1)
def test_drop_indexes(self) -> None:
ds = Dataset(
coords={
"x": ("x", [0, 1, 2]),
"y": ("y", [3, 4, 5]),
"foo": ("x", ["a", "a", "b"]),
}
)
actual = ds.drop_indexes("x")
assert "x" not in actual.xindexes
assert type(actual.x.variable) is Variable
actual = ds.drop_indexes(["x", "y"])
assert "x" not in actual.xindexes
assert "y" not in actual.xindexes
assert type(actual.x.variable) is Variable
assert type(actual.y.variable) is Variable
with pytest.raises(
ValueError,
match=r"The coordinates \('not_a_coord',\) are not found in the dataset coordinates",
):
ds.drop_indexes("not_a_coord")
with pytest.raises(ValueError, match="those coordinates do not have an index"):
ds.drop_indexes("foo")
actual = ds.drop_indexes(["foo", "not_a_coord"], errors="ignore")
assert_identical(actual, ds)
# test index corrupted
midx = pd.MultiIndex.from_tuples([(1, 2), (3, 4)], names=["a", "b"])
midx_coords = Coordinates.from_pandas_multiindex(midx, "x")
ds = Dataset(coords=midx_coords)
with pytest.raises(ValueError, match=r".*would corrupt the following index.*"):
ds.drop_indexes("a")
def test_drop_dims(self) -> None:
data = xr.Dataset(
{
"A": (["x", "y"], np.random.randn(2, 3)),
"B": ("x", np.random.randn(2)),
"x": ["a", "b"],
"z": np.pi,
}
)
actual = data.drop_dims("x")
expected = data.drop_vars(["A", "B", "x"])
assert_identical(expected, actual)
actual = data.drop_dims("y")
expected = data.drop_vars("A")
assert_identical(expected, actual)
actual = data.drop_dims(["x", "y"])
expected = data.drop_vars(["A", "B", "x"])
assert_identical(expected, actual)
with pytest.raises((ValueError, KeyError)):
data.drop_dims("z") # not a dimension
with pytest.raises((ValueError, KeyError)):
data.drop_dims(None) # type:ignore[arg-type]
actual = data.drop_dims("z", errors="ignore")
assert_identical(data, actual)
# should this be allowed?
actual = data.drop_dims(None, errors="ignore") # type:ignore[arg-type]
assert_identical(data, actual)
with pytest.raises(ValueError):
actual = data.drop_dims("z", errors="wrong_value") # type: ignore[arg-type]
actual = data.drop_dims(["x", "y", "z"], errors="ignore")
expected = data.drop_vars(["A", "B", "x"])
assert_identical(expected, actual)
def test_copy(self) -> None:
data = create_test_data()
data.attrs["Test"] = [1, 2, 3]
for copied in [data.copy(deep=False), copy(data)]:
assert_identical(data, copied)
assert data.encoding == copied.encoding
# Note: IndexVariable objects with string dtype are always
# copied because of xarray.core.indexes.safe_cast_to_index.
# Limiting the test to data variables.
for k in data.data_vars:
v0 = data.variables[k]
v1 = copied.variables[k]
assert source_ndarray(v0.data) is source_ndarray(v1.data)
copied["foo"] = ("z", np.arange(5))
assert "foo" not in data
copied.attrs["foo"] = "bar"
assert "foo" not in data.attrs
assert data.attrs["Test"] is copied.attrs["Test"]
for copied in [data.copy(deep=True), deepcopy(data)]:
assert_identical(data, copied)
for k, v0 in data.variables.items():
v1 = copied.variables[k]
assert v0 is not v1
assert data.attrs["Test"] is not copied.attrs["Test"]
def test_copy_with_data(self) -> None:
orig = create_test_data()
new_data = {k: np.random.randn(*v.shape) for k, v in orig.data_vars.items()}
actual = orig.copy(data=new_data)
expected = orig.copy()
for k, v in new_data.items():
expected[k].data = v
assert_identical(expected, actual)
@pytest.mark.xfail(raises=AssertionError)
@pytest.mark.parametrize(
"deep, expected_orig",
[
[
True,
xr.DataArray(
xr.IndexVariable("a", np.array([1, 2])),
coords={"a": [1, 2]},
dims=["a"],
),
],
[
False,
xr.DataArray(
xr.IndexVariable("a", np.array([999, 2])),
coords={"a": [999, 2]},
dims=["a"],
),
],
],
)
def test_copy_coords(self, deep, expected_orig) -> None:
"""The test fails for the shallow copy, and apparently only on Windows
for some reason. In windows coords seem to be immutable unless it's one
dataset deep copied from another."""
ds = xr.DataArray(
np.ones([2, 2, 2]),
coords={"a": [1, 2], "b": ["x", "y"], "c": [0, 1]},
dims=["a", "b", "c"],
name="value",
).to_dataset()
ds_cp = ds.copy(deep=deep)
new_a = np.array([999, 2])
ds_cp.coords["a"] = ds_cp.a.copy(data=new_a)
expected_cp = xr.DataArray(
xr.IndexVariable("a", new_a),
coords={"a": [999, 2]},
dims=["a"],
)
assert_identical(ds_cp.coords["a"], expected_cp)
assert_identical(ds.coords["a"], expected_orig)
def test_copy_with_data_errors(self) -> None:
orig = create_test_data()
new_var1 = np.arange(orig["var1"].size).reshape(orig["var1"].shape)
with pytest.raises(ValueError, match=r"Data must be dict-like"):
orig.copy(data=new_var1) # type: ignore[arg-type]
with pytest.raises(ValueError, match=r"only contain variables in original"):
orig.copy(data={"not_in_original": new_var1})
with pytest.raises(ValueError, match=r"contain all variables in original"):
orig.copy(data={"var1": new_var1})
def test_drop_encoding(self) -> None:
orig = create_test_data()
vencoding = {"scale_factor": 10}
orig.encoding = {"foo": "bar"}
for k in orig.variables.keys():
orig[k].encoding = vencoding
actual = orig.drop_encoding()
assert actual.encoding == {}
for v in actual.variables.values():
assert v.encoding == {}
assert_equal(actual, orig)
def test_rename(self) -> None:
data = create_test_data()
newnames = {
"var1": "renamed_var1",
"dim2": "renamed_dim2",
}
renamed = data.rename(newnames)
variables = dict(data.variables)
for nk, nv in newnames.items():
variables[nv] = variables.pop(nk)
for k, v in variables.items():
dims = list(v.dims)
for name, newname in newnames.items():
if name in dims:
dims[dims.index(name)] = newname
assert_equal(
Variable(dims, v.values, v.attrs),
renamed[k].variable.to_base_variable(),
)
assert v.encoding == renamed[k].encoding
assert type(v) is type(renamed.variables[k])
assert "var1" not in renamed
assert "dim2" not in renamed
with pytest.raises(ValueError, match=r"cannot rename 'not_a_var'"):
data.rename({"not_a_var": "nada"})
with pytest.raises(ValueError, match=r"'var1' conflicts"):
data.rename({"var2": "var1"})
# verify that we can rename a variable without accessing the data
var1 = data["var1"]
data["var1"] = (var1.dims, InaccessibleArray(var1.values))
renamed = data.rename(newnames)
with pytest.raises(UnexpectedDataAccess):
_ = renamed["renamed_var1"].values
# https://github.com/python/mypy/issues/10008
renamed_kwargs = data.rename(**newnames) # type: ignore[arg-type]
assert_identical(renamed, renamed_kwargs)
def test_rename_old_name(self) -> None:
# regtest for GH1477
data = create_test_data()
with pytest.raises(ValueError, match=r"'samecol' conflicts"):
data.rename({"var1": "samecol", "var2": "samecol"})
# This shouldn't cause any problems.
data.rename({"var1": "var2", "var2": "var1"})
def test_rename_same_name(self) -> None:
data = create_test_data()
newnames = {"var1": "var1", "dim2": "dim2"}
renamed = data.rename(newnames)
assert_identical(renamed, data)
def test_rename_dims(self) -> None:
original = Dataset({"x": ("x", [0, 1, 2]), "y": ("x", [10, 11, 12]), "z": 42})
expected = Dataset(
{"x": ("x_new", [0, 1, 2]), "y": ("x_new", [10, 11, 12]), "z": 42}
)
# TODO: (benbovy - explicit indexes) update when set_index supports
# setting index for non-dimension variables
expected = expected.set_coords("x")
actual = original.rename_dims({"x": "x_new"})
assert_identical(expected, actual, check_default_indexes=False)
actual_2 = original.rename_dims(x="x_new")
assert_identical(expected, actual_2, check_default_indexes=False)
# Test to raise ValueError
dims_dict_bad = {"x_bad": "x_new"}
with pytest.raises(ValueError):
original.rename_dims(dims_dict_bad)
with pytest.raises(ValueError):
original.rename_dims({"x": "z"})
def test_rename_vars(self) -> None:
original = Dataset({"x": ("x", [0, 1, 2]), "y": ("x", [10, 11, 12]), "z": 42})
expected = Dataset(
{"x_new": ("x", [0, 1, 2]), "y": ("x", [10, 11, 12]), "z": 42}
)
# TODO: (benbovy - explicit indexes) update when set_index supports
# setting index for non-dimension variables
expected = expected.set_coords("x_new")
actual = original.rename_vars({"x": "x_new"})
assert_identical(expected, actual, check_default_indexes=False)
actual_2 = original.rename_vars(x="x_new")
assert_identical(expected, actual_2, check_default_indexes=False)
# Test to raise ValueError
names_dict_bad = {"x_bad": "x_new"}
with pytest.raises(ValueError):
original.rename_vars(names_dict_bad)
def test_rename_dimension_coord(self) -> None:
# rename a dimension corodinate to a non-dimension coordinate
# should preserve index
original = Dataset(coords={"x": ("x", [0, 1, 2])})
actual = original.rename_vars({"x": "x_new"})
assert "x_new" in actual.xindexes
actual_2 = original.rename_dims({"x": "x_new"})
assert "x" in actual_2.xindexes
def test_rename_dimension_coord_warnings(self) -> None:
# create a dimension coordinate by renaming a dimension or coordinate
# should raise a warning (no index created)
ds = Dataset(coords={"x": ("y", [0, 1])})
with pytest.warns(
UserWarning, match=r"rename 'x' to 'y' does not create an index.*"
):
ds.rename(x="y")
ds = Dataset(coords={"y": ("x", [0, 1])})
with pytest.warns(
UserWarning, match=r"rename 'x' to 'y' does not create an index.*"
):
ds.rename(x="y")
# No operation should not raise a warning
ds = Dataset(
data_vars={"data": (("x", "y"), np.ones((2, 3)))},
coords={"x": range(2), "y": range(3), "a": ("x", [3, 4])},
)
with warnings.catch_warnings():
warnings.simplefilter("error")
ds.rename(x="x")
def test_rename_multiindex(self) -> None:
midx = pd.MultiIndex.from_tuples([(1, 2), (3, 4)], names=["a", "b"])
midx_coords = Coordinates.from_pandas_multiindex(midx, "x")
original = Dataset({}, midx_coords)
midx_renamed = midx.rename(["a", "c"])
midx_coords_renamed = Coordinates.from_pandas_multiindex(midx_renamed, "x")
expected = Dataset({}, midx_coords_renamed)
actual = original.rename({"b": "c"})
assert_identical(expected, actual)
with pytest.raises(ValueError, match=r"'a' conflicts"):
with pytest.warns(UserWarning, match="does not create an index anymore"):
original.rename({"x": "a"})
with pytest.raises(ValueError, match=r"'x' conflicts"):
with pytest.warns(UserWarning, match="does not create an index anymore"):
original.rename({"a": "x"})
with pytest.raises(ValueError, match=r"'b' conflicts"):
original.rename({"a": "b"})
def test_rename_preserve_attrs_encoding(self) -> None:
# test propagate attrs/encoding to new variable(s) created from Index object
original = Dataset(coords={"x": ("x", [0, 1, 2])})
expected = Dataset(coords={"y": ("y", [0, 1, 2])})
for ds, dim in zip([original, expected], ["x", "y"], strict=True):
ds[dim].attrs = {"foo": "bar"}
ds[dim].encoding = {"foo": "bar"}
actual = original.rename({"x": "y"})
assert_identical(actual, expected)
@requires_cftime
def test_rename_does_not_change_CFTimeIndex_type(self) -> None:
# make sure CFTimeIndex is not converted to DatetimeIndex #3522
time = xr.date_range(
start="2000", periods=6, freq="2MS", calendar="noleap", use_cftime=True
)
orig = Dataset(coords={"time": time})
renamed = orig.rename(time="time_new")
assert "time_new" in renamed.xindexes
# TODO: benbovy - flexible indexes: update when CFTimeIndex
# inherits from xarray.Index
assert isinstance(renamed.xindexes["time_new"].to_pandas_index(), CFTimeIndex)
assert renamed.xindexes["time_new"].to_pandas_index().name == "time_new"
# check original has not changed
assert "time" in orig.xindexes
assert isinstance(orig.xindexes["time"].to_pandas_index(), CFTimeIndex)
assert orig.xindexes["time"].to_pandas_index().name == "time"
# note: rename_dims(time="time_new") drops "ds.indexes"
renamed = orig.rename_dims()
assert isinstance(renamed.xindexes["time"].to_pandas_index(), CFTimeIndex)
renamed = orig.rename_vars()
assert isinstance(renamed.xindexes["time"].to_pandas_index(), CFTimeIndex)
def test_rename_does_not_change_DatetimeIndex_type(self) -> None:
# make sure DatetimeIndex is conderved on rename
time = pd.date_range(start="2000", periods=6, freq="2MS")
orig = Dataset(coords={"time": time})
renamed = orig.rename(time="time_new")
assert "time_new" in renamed.xindexes
# TODO: benbovy - flexible indexes: update when DatetimeIndex
# inherits from xarray.Index?
assert isinstance(renamed.xindexes["time_new"].to_pandas_index(), DatetimeIndex)
assert renamed.xindexes["time_new"].to_pandas_index().name == "time_new"
# check original has not changed
assert "time" in orig.xindexes
assert isinstance(orig.xindexes["time"].to_pandas_index(), DatetimeIndex)
assert orig.xindexes["time"].to_pandas_index().name == "time"
# note: rename_dims(time="time_new") drops "ds.indexes"
renamed = orig.rename_dims()
assert isinstance(renamed.xindexes["time"].to_pandas_index(), DatetimeIndex)
renamed = orig.rename_vars()
assert isinstance(renamed.xindexes["time"].to_pandas_index(), DatetimeIndex)
def test_swap_dims(self) -> None:
original = Dataset({"x": [1, 2, 3], "y": ("x", list("abc")), "z": 42})
expected = Dataset({"z": 42}, {"x": ("y", [1, 2, 3]), "y": list("abc")})
actual = original.swap_dims({"x": "y"})
assert_identical(expected, actual)
assert isinstance(actual.variables["y"], IndexVariable)
assert isinstance(actual.variables["x"], Variable)
assert actual.xindexes["y"].equals(expected.xindexes["y"])
roundtripped = actual.swap_dims({"y": "x"})
assert_identical(original.set_coords("y"), roundtripped)
with pytest.raises(ValueError, match=r"cannot swap"):
original.swap_dims({"y": "x"})
with pytest.raises(ValueError, match=r"replacement dimension"):
original.swap_dims({"x": "z"})
expected = Dataset(
{"y": ("u", list("abc")), "z": 42}, coords={"x": ("u", [1, 2, 3])}
)
actual = original.swap_dims({"x": "u"})
assert_identical(expected, actual)
# as kwargs
expected = Dataset(
{"y": ("u", list("abc")), "z": 42}, coords={"x": ("u", [1, 2, 3])}
)
actual = original.swap_dims(x="u")
assert_identical(expected, actual)
# handle multiindex case
midx = pd.MultiIndex.from_arrays([list("aab"), list("yzz")], names=["y1", "y2"])
original = Dataset({"x": [1, 2, 3], "y": ("x", midx), "z": 42})
midx_coords = Coordinates.from_pandas_multiindex(midx, "y")
midx_coords["x"] = ("y", [1, 2, 3])
expected = Dataset({"z": 42}, midx_coords)
actual = original.swap_dims({"x": "y"})
assert_identical(expected, actual)
assert isinstance(actual.variables["y"], IndexVariable)
assert isinstance(actual.variables["x"], Variable)
assert actual.xindexes["y"].equals(expected.xindexes["y"])
def test_expand_dims_error(self) -> None:
original = Dataset(
{
"x": ("a", np.random.randn(3)),
"y": (["b", "a"], np.random.randn(4, 3)),
"z": ("a", np.random.randn(3)),
},
coords={
"a": np.linspace(0, 1, 3),
"b": np.linspace(0, 1, 4),
"c": np.linspace(0, 1, 5),
},
attrs={"key": "entry"},
)
with pytest.raises(ValueError, match=r"already exists"):
original.expand_dims(dim=["x"])
# Make sure it raises true error also for non-dimensional coordinates
# which has dimension.
original = original.set_coords("z")
with pytest.raises(ValueError, match=r"already exists"):
original.expand_dims(dim=["z"])
original = Dataset(
{
"x": ("a", np.random.randn(3)),
"y": (["b", "a"], np.random.randn(4, 3)),
"z": ("a", np.random.randn(3)),
},
coords={
"a": np.linspace(0, 1, 3),
"b": np.linspace(0, 1, 4),
"c": np.linspace(0, 1, 5),
},
attrs={"key": "entry"},
)
with pytest.raises(TypeError, match=r"value of new dimension"):
original.expand_dims({"d": 3.2})
with pytest.raises(ValueError, match=r"both keyword and positional"):
original.expand_dims({"d": 4}, e=4)
def test_expand_dims_int(self) -> None:
original = Dataset(
{"x": ("a", np.random.randn(3)), "y": (["b", "a"], np.random.randn(4, 3))},
coords={
"a": np.linspace(0, 1, 3),
"b": np.linspace(0, 1, 4),
"c": np.linspace(0, 1, 5),
},
attrs={"key": "entry"},
)
actual = original.expand_dims(["z"], [1])
expected = Dataset(
{
"x": original["x"].expand_dims("z", 1),
"y": original["y"].expand_dims("z", 1),
},
coords={
"a": np.linspace(0, 1, 3),
"b": np.linspace(0, 1, 4),
"c": np.linspace(0, 1, 5),
},
attrs={"key": "entry"},
)
assert_identical(expected, actual)
# make sure squeeze restores the original data set.
roundtripped = actual.squeeze("z")
assert_identical(original, roundtripped)
# another test with a negative axis
actual = original.expand_dims(["z"], [-1])
expected = Dataset(
{
"x": original["x"].expand_dims("z", -1),
"y": original["y"].expand_dims("z", -1),
},
coords={
"a": np.linspace(0, 1, 3),
"b": np.linspace(0, 1, 4),
"c": np.linspace(0, 1, 5),
},
attrs={"key": "entry"},
)
assert_identical(expected, actual)
# make sure squeeze restores the original data set.
roundtripped = actual.squeeze("z")
assert_identical(original, roundtripped)
def test_expand_dims_coords(self) -> None:
original = Dataset({"x": ("a", np.array([1, 2, 3]))})
expected = Dataset(
{"x": (("b", "a"), np.array([[1, 2, 3], [1, 2, 3]]))}, coords={"b": [1, 2]}
)
actual = original.expand_dims(dict(b=[1, 2]))
assert_identical(expected, actual)
assert "b" not in original._coord_names
def test_expand_dims_existing_scalar_coord(self) -> None:
original = Dataset({"x": 1}, {"a": 2})
expected = Dataset({"x": (("a",), [1])}, {"a": [2]})
actual = original.expand_dims("a")
assert_identical(expected, actual)
def test_isel_expand_dims_roundtrip(self) -> None:
original = Dataset({"x": (("a",), [1])}, {"a": [2]})
actual = original.isel(a=0).expand_dims("a")
assert_identical(actual, original)
def test_expand_dims_mixed_int_and_coords(self) -> None:
# Test expanding one dimension to have size > 1 that doesn't have
# coordinates, and also expanding another dimension to have size > 1
# that DOES have coordinates.
original = Dataset(
{"x": ("a", np.random.randn(3)), "y": (["b", "a"], np.random.randn(4, 3))},
coords={
"a": np.linspace(0, 1, 3),
"b": np.linspace(0, 1, 4),
"c": np.linspace(0, 1, 5),
},
)
actual = original.expand_dims({"d": 4, "e": ["l", "m", "n"]})
expected = Dataset(
{
"x": xr.DataArray(
original["x"].values * np.ones([4, 3, 3]),
coords=dict(d=range(4), e=["l", "m", "n"], a=np.linspace(0, 1, 3)),
dims=["d", "e", "a"],
).drop_vars("d"),
"y": xr.DataArray(
original["y"].values * np.ones([4, 3, 4, 3]),
coords=dict(
d=range(4),
e=["l", "m", "n"],
b=np.linspace(0, 1, 4),
a=np.linspace(0, 1, 3),
),
dims=["d", "e", "b", "a"],
).drop_vars("d"),
},
coords={"c": np.linspace(0, 1, 5)},
)
assert_identical(actual, expected)
def test_expand_dims_kwargs_python36plus(self) -> None:
original = Dataset(
{"x": ("a", np.random.randn(3)), "y": (["b", "a"], np.random.randn(4, 3))},
coords={
"a": np.linspace(0, 1, 3),
"b": np.linspace(0, 1, 4),
"c": np.linspace(0, 1, 5),
},
attrs={"key": "entry"},
)
other_way = original.expand_dims(e=["l", "m", "n"])
other_way_expected = Dataset(
{
"x": xr.DataArray(
original["x"].values * np.ones([3, 3]),
coords=dict(e=["l", "m", "n"], a=np.linspace(0, 1, 3)),
dims=["e", "a"],
),
"y": xr.DataArray(
original["y"].values * np.ones([3, 4, 3]),
coords=dict(
e=["l", "m", "n"],
b=np.linspace(0, 1, 4),
a=np.linspace(0, 1, 3),
),
dims=["e", "b", "a"],
),
},
coords={"c": np.linspace(0, 1, 5)},
attrs={"key": "entry"},
)
assert_identical(other_way_expected, other_way)
@pytest.mark.parametrize("create_index_for_new_dim_flag", [True, False])
def test_expand_dims_create_index_data_variable(
self, create_index_for_new_dim_flag
):
# data variables should not gain an index ever
ds = Dataset({"x": 0})
if create_index_for_new_dim_flag:
with pytest.warns(UserWarning, match="No index created"):
expanded = ds.expand_dims(
"x", create_index_for_new_dim=create_index_for_new_dim_flag
)
else:
expanded = ds.expand_dims(
"x", create_index_for_new_dim=create_index_for_new_dim_flag
)
# TODO Can't just create the expected dataset directly using constructor because of GH issue 8959
expected = Dataset({"x": ("x", [0])}).drop_indexes("x").reset_coords("x")
assert_identical(expanded, expected, check_default_indexes=False)
assert expanded.indexes == {}
def test_expand_dims_create_index_coordinate_variable(self):
# coordinate variables should gain an index only if create_index_for_new_dim is True (the default)
ds = Dataset(coords={"x": 0})
expanded = ds.expand_dims("x")
expected = Dataset({"x": ("x", [0])})
assert_identical(expanded, expected)
expanded_no_index = ds.expand_dims("x", create_index_for_new_dim=False)
# TODO Can't just create the expected dataset directly using constructor because of GH issue 8959
expected = Dataset(coords={"x": ("x", [0])}).drop_indexes("x")
assert_identical(expanded_no_index, expected, check_default_indexes=False)
assert expanded_no_index.indexes == {}
def test_expand_dims_create_index_from_iterable(self):
ds = Dataset(coords={"x": 0})
expanded = ds.expand_dims(x=[0, 1])
expected = Dataset({"x": ("x", [0, 1])})
assert_identical(expanded, expected)
expanded_no_index = ds.expand_dims(x=[0, 1], create_index_for_new_dim=False)
# TODO Can't just create the expected dataset directly using constructor because of GH issue 8959
expected = Dataset(coords={"x": ("x", [0, 1])}).drop_indexes("x")
assert_identical(expanded, expected, check_default_indexes=False)
assert expanded_no_index.indexes == {}
def test_expand_dims_non_nanosecond_conversion(self) -> None:
# Regression test for https://github.com/pydata/xarray/issues/7493#issuecomment-1953091000
# todo: test still needed?
ds = Dataset().expand_dims({"time": [np.datetime64("2018-01-01", "m")]})
assert ds.time.dtype == np.dtype("datetime64[s]")
def test_set_index(self) -> None:
expected = create_test_multiindex()
mindex = expected["x"].to_index()
indexes = [mindex.get_level_values(str(n)) for n in mindex.names]
coords = {idx.name: ("x", idx) for idx in indexes}
ds = Dataset({}, coords=coords)
obj = ds.set_index(x=mindex.names)
assert_identical(obj, expected)
# ensure pre-existing indexes involved are removed
# (level_2 should be a coordinate with no index)
ds = create_test_multiindex()
coords = {"x": coords["level_1"], "level_2": coords["level_2"]}
expected = Dataset({}, coords=coords)
obj = ds.set_index(x="level_1")
assert_identical(obj, expected)
# ensure set_index with no existing index and a single data var given
# doesn't return multi-index
ds = Dataset(data_vars={"x_var": ("x", [0, 1, 2])})
expected = Dataset(coords={"x": [0, 1, 2]})
assert_identical(ds.set_index(x="x_var"), expected)
with pytest.raises(ValueError, match=r"bar variable\(s\) do not exist"):
ds.set_index(foo="bar")
with pytest.raises(ValueError, match=r"dimension mismatch.*"):
ds.set_index(y="x_var")
ds = Dataset(coords={"x": 1})
with pytest.raises(
ValueError, match=r".*cannot set a PandasIndex.*scalar variable.*"
):
ds.set_index(x="x")
def test_set_index_deindexed_coords(self) -> None:
# test de-indexed coordinates are converted to base variable
# https://github.com/pydata/xarray/issues/6969
one = ["a", "a", "b", "b"]
two = [1, 2, 1, 2]
three = ["c", "c", "d", "d"]
four = [3, 4, 3, 4]
midx_12 = pd.MultiIndex.from_arrays([one, two], names=["one", "two"])
midx_34 = pd.MultiIndex.from_arrays([three, four], names=["three", "four"])
coords = Coordinates.from_pandas_multiindex(midx_12, "x")
coords["three"] = ("x", three)
coords["four"] = ("x", four)
ds = xr.Dataset(coords=coords)
actual = ds.set_index(x=["three", "four"])
coords_expected = Coordinates.from_pandas_multiindex(midx_34, "x")
coords_expected["one"] = ("x", one)
coords_expected["two"] = ("x", two)
expected = xr.Dataset(coords=coords_expected)
assert_identical(actual, expected)
def test_reset_index(self) -> None:
ds = create_test_multiindex()
mindex = ds["x"].to_index()
indexes = [mindex.get_level_values(str(n)) for n in mindex.names]
coords = {idx.name: ("x", idx) for idx in indexes}
expected = Dataset({}, coords=coords)
obj = ds.reset_index("x")
assert_identical(obj, expected, check_default_indexes=False)
assert len(obj.xindexes) == 0
ds = Dataset(coords={"y": ("x", [1, 2, 3])})
with pytest.raises(ValueError, match=r".*not coordinates with an index"):
ds.reset_index("y")
def test_reset_index_keep_attrs(self) -> None:
coord_1 = DataArray([1, 2], dims=["coord_1"], attrs={"attrs": True})
ds = Dataset({}, {"coord_1": coord_1})
obj = ds.reset_index("coord_1")
assert ds.coord_1.attrs == obj.coord_1.attrs
assert len(obj.xindexes) == 0
def test_reset_index_drop_dims(self) -> None:
ds = Dataset(coords={"x": [1, 2]})
reset = ds.reset_index("x", drop=True)
assert len(reset.dims) == 0
@pytest.mark.parametrize(
["arg", "drop", "dropped", "converted", "renamed"],
[
("foo", False, [], [], {"bar": "x"}),
("foo", True, ["foo"], [], {"bar": "x"}),
("x", False, ["x"], ["foo", "bar"], {}),
("x", True, ["x", "foo", "bar"], [], {}),
(["foo", "bar"], False, ["x"], ["foo", "bar"], {}),
(["foo", "bar"], True, ["x", "foo", "bar"], [], {}),
(["x", "foo"], False, ["x"], ["foo", "bar"], {}),
(["foo", "x"], True, ["x", "foo", "bar"], [], {}),
],
)
def test_reset_index_drop_convert(
self,
arg: str | list[str],
drop: bool,
dropped: list[str],
converted: list[str],
renamed: dict[str, str],
) -> None:
# regressions https://github.com/pydata/xarray/issues/6946 and
# https://github.com/pydata/xarray/issues/6989
# check that multi-index dimension or level coordinates are dropped, converted
# from IndexVariable to Variable or renamed to dimension as expected
midx = pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("foo", "bar"))
midx_coords = Coordinates.from_pandas_multiindex(midx, "x")
ds = xr.Dataset(coords=midx_coords)
reset = ds.reset_index(arg, drop=drop)
for name in dropped:
assert name not in reset.variables
for name in converted:
assert_identical(reset[name].variable, ds[name].variable.to_base_variable())
for old_name, new_name in renamed.items():
assert_identical(ds[old_name].variable, reset[new_name].variable)
def test_reorder_levels(self) -> None:
ds = create_test_multiindex()
mindex = ds["x"].to_index()
assert isinstance(mindex, pd.MultiIndex)
midx = mindex.reorder_levels(["level_2", "level_1"])
midx_coords = Coordinates.from_pandas_multiindex(midx, "x")
expected = Dataset({}, coords=midx_coords)
# check attrs propagated
ds["level_1"].attrs["foo"] = "bar"
expected["level_1"].attrs["foo"] = "bar"
reindexed = ds.reorder_levels(x=["level_2", "level_1"])
assert_identical(reindexed, expected)
ds = Dataset({}, coords={"x": [1, 2]})
with pytest.raises(ValueError, match=r"has no MultiIndex"):
ds.reorder_levels(x=["level_1", "level_2"])
def test_set_xindex(self) -> None:
ds = Dataset(
coords={"foo": ("x", ["a", "a", "b", "b"]), "bar": ("x", [0, 1, 2, 3])}
)
actual = ds.set_xindex("foo")
expected = ds.set_index(x="foo").rename_vars(x="foo")
assert_identical(actual, expected, check_default_indexes=False)
actual_mindex = ds.set_xindex(["foo", "bar"])
expected_mindex = ds.set_index(x=["foo", "bar"])
assert_identical(actual_mindex, expected_mindex)
class NotAnIndex: ...
with pytest.raises(TypeError, match=r".*not a subclass of xarray.Index"):
ds.set_xindex("foo", NotAnIndex) # type: ignore[arg-type]
with pytest.raises(ValueError, match="those variables don't exist"):
ds.set_xindex("not_a_coordinate", PandasIndex)
ds["data_var"] = ("x", [1, 2, 3, 4])
with pytest.raises(ValueError, match="those variables are data variables"):
ds.set_xindex("data_var", PandasIndex)
ds2 = Dataset(coords={"x": ("x", [0, 1, 2, 3])})
with pytest.raises(ValueError, match="those coordinates already have an index"):
ds2.set_xindex("x", PandasIndex)
def test_set_xindex_options(self) -> None:
ds = Dataset(coords={"foo": ("x", ["a", "a", "b", "b"])})
class IndexWithOptions(Index):
def __init__(self, opt):
self.opt = opt
@classmethod
def from_variables(cls, variables, options):
return cls(options["opt"])
indexed = ds.set_xindex("foo", IndexWithOptions, opt=1)
assert indexed.xindexes["foo"].opt == 1 # type: ignore[attr-defined]
def test_stack(self) -> None:
ds = Dataset(
data_vars={"b": (("x", "y"), [[0, 1], [2, 3]])},
coords={"x": ("x", [0, 1]), "y": ["a", "b"]},
)
midx_expected = pd.MultiIndex.from_product(
[[0, 1], ["a", "b"]], names=["x", "y"]
)
midx_coords_expected = Coordinates.from_pandas_multiindex(midx_expected, "z")
expected = Dataset(
data_vars={"b": ("z", [0, 1, 2, 3])}, coords=midx_coords_expected
)
# check attrs propagated
ds["x"].attrs["foo"] = "bar"
expected["x"].attrs["foo"] = "bar"
actual = ds.stack(z=["x", "y"])
assert_identical(expected, actual)
assert list(actual.xindexes) == ["z", "x", "y"]
actual = ds.stack(z=[...])
assert_identical(expected, actual)
# non list dims with ellipsis
actual = ds.stack(z=(...,))
assert_identical(expected, actual)
# ellipsis with given dim
actual = ds.stack(z=[..., "y"])
assert_identical(expected, actual)
midx_expected = pd.MultiIndex.from_product(
[["a", "b"], [0, 1]], names=["y", "x"]
)
midx_coords_expected = Coordinates.from_pandas_multiindex(midx_expected, "z")
expected = Dataset(
data_vars={"b": ("z", [0, 2, 1, 3])}, coords=midx_coords_expected
)
expected["x"].attrs["foo"] = "bar"
actual = ds.stack(z=["y", "x"])
assert_identical(expected, actual)
assert list(actual.xindexes) == ["z", "y", "x"]
@pytest.mark.parametrize(
"create_index,expected_keys",
[
(True, ["z", "x", "y"]),
(False, []),
(None, ["z", "x", "y"]),
],
)
def test_stack_create_index(self, create_index, expected_keys) -> None:
ds = Dataset(
data_vars={"b": (("x", "y"), [[0, 1], [2, 3]])},
coords={"x": ("x", [0, 1]), "y": ["a", "b"]},
)
actual = ds.stack(z=["x", "y"], create_index=create_index)
assert list(actual.xindexes) == expected_keys
# TODO: benbovy (flexible indexes) - test error multiple indexes found
# along dimension + create_index=True
def test_stack_multi_index(self) -> None:
# multi-index on a dimension to stack is discarded too
midx = pd.MultiIndex.from_product([["a", "b"], [0, 1]], names=("lvl1", "lvl2"))
coords = Coordinates.from_pandas_multiindex(midx, "x")
coords["y"] = [0, 1]
ds = xr.Dataset(
data_vars={"b": (("x", "y"), [[0, 1], [2, 3], [4, 5], [6, 7]])},
coords=coords,
)
expected = Dataset(
data_vars={"b": ("z", [0, 1, 2, 3, 4, 5, 6, 7])},
coords={
"x": ("z", np.repeat(midx.values, 2)),
"lvl1": ("z", np.repeat(midx.get_level_values("lvl1"), 2)),
"lvl2": ("z", np.repeat(midx.get_level_values("lvl2"), 2)),
"y": ("z", [0, 1, 0, 1] * 2),
},
)
actual = ds.stack(z=["x", "y"], create_index=False)
assert_identical(expected, actual)
assert len(actual.xindexes) == 0
with pytest.raises(ValueError, match=r"cannot create.*wraps a multi-index"):
ds.stack(z=["x", "y"], create_index=True)
def test_stack_non_dim_coords(self) -> None:
ds = Dataset(
data_vars={"b": (("x", "y"), [[0, 1], [2, 3]])},
coords={"x": ("x", [0, 1]), "y": ["a", "b"]},
).rename_vars(x="xx")
exp_index = pd.MultiIndex.from_product([[0, 1], ["a", "b"]], names=["xx", "y"])
exp_coords = Coordinates.from_pandas_multiindex(exp_index, "z")
expected = Dataset(data_vars={"b": ("z", [0, 1, 2, 3])}, coords=exp_coords)
actual = ds.stack(z=["x", "y"])
assert_identical(expected, actual)
assert list(actual.xindexes) == ["z", "xx", "y"]
def test_unstack(self) -> None:
index = pd.MultiIndex.from_product([[0, 1], ["a", "b"]], names=["x", "y"])
coords = Coordinates.from_pandas_multiindex(index, "z")
ds = Dataset(data_vars={"b": ("z", [0, 1, 2, 3])}, coords=coords)
expected = Dataset(
{"b": (("x", "y"), [[0, 1], [2, 3]]), "x": [0, 1], "y": ["a", "b"]}
)
# check attrs propagated
ds["x"].attrs["foo"] = "bar"
expected["x"].attrs["foo"] = "bar"
for dim in ["z", ["z"], None]:
actual = ds.unstack(dim)
assert_identical(actual, expected)
def test_unstack_errors(self) -> None:
ds = Dataset({"x": [1, 2, 3]})
with pytest.raises(
ValueError,
match=re.escape("Dimensions ('foo',) not found in data dimensions ('x',)"),
):
ds.unstack("foo")
with pytest.raises(ValueError, match=r".*do not have exactly one multi-index"):
ds.unstack("x")
ds = Dataset({"da": [1, 2]}, coords={"y": ("x", [1, 1]), "z": ("x", [0, 0])})
ds = ds.set_index(x=("y", "z"))
with pytest.raises(
ValueError, match="Cannot unstack MultiIndex containing duplicates"
):
ds.unstack("x")
def test_unstack_fill_value(self) -> None:
ds = xr.Dataset(
{"var": (("x",), np.arange(6)), "other_var": (("x",), np.arange(3, 9))},
coords={"x": [0, 1, 2] * 2, "y": (("x",), ["a"] * 3 + ["b"] * 3)},
)
# make ds incomplete
ds = ds.isel(x=[0, 2, 3, 4]).set_index(index=["x", "y"])
# test fill_value
actual1 = ds.unstack("index", fill_value=-1)
expected1 = ds.unstack("index").fillna(-1).astype(int)
assert actual1["var"].dtype == int
assert_equal(actual1, expected1)
actual2 = ds["var"].unstack("index", fill_value=-1)
expected2 = ds["var"].unstack("index").fillna(-1).astype(int)
assert_equal(actual2, expected2)
actual3 = ds.unstack("index", fill_value={"var": -1, "other_var": 1})
expected3 = ds.unstack("index").fillna({"var": -1, "other_var": 1}).astype(int)
assert_equal(actual3, expected3)
actual4 = ds.unstack("index", fill_value={"var": -1})
expected4 = ds.unstack("index").fillna({"var": -1, "other_var": np.nan})
assert_equal(actual4, expected4)
@requires_sparse
def test_unstack_sparse(self) -> None:
ds = xr.Dataset(
{"var": (("x",), np.arange(6))},
coords={"x": [0, 1, 2] * 2, "y": (("x",), ["a"] * 3 + ["b"] * 3)},
)
# make ds incomplete
ds = ds.isel(x=[0, 2, 3, 4]).set_index(index=["x", "y"])
# test fill_value
actual1 = ds.unstack("index", sparse=True)
expected1 = ds.unstack("index")
assert isinstance(actual1["var"].data, sparse_array_type)
assert actual1["var"].variable._to_dense().equals(expected1["var"].variable)
assert actual1["var"].data.density < 1.0
actual2 = ds["var"].unstack("index", sparse=True)
expected2 = ds["var"].unstack("index")
assert isinstance(actual2.data, sparse_array_type)
assert actual2.variable._to_dense().equals(expected2.variable)
assert actual2.data.density < 1.0
midx = pd.MultiIndex.from_arrays([np.arange(3), np.arange(3)], names=["a", "b"])
coords = Coordinates.from_pandas_multiindex(midx, "z")
coords["foo"] = np.arange(4)
coords["bar"] = np.arange(5)
ds_eye = Dataset(
{"var": (("z", "foo", "bar"), np.ones((3, 4, 5)))}, coords=coords
)
actual3 = ds_eye.unstack(sparse=True, fill_value=0)
assert isinstance(actual3["var"].data, sparse_array_type)
expected3 = xr.Dataset(
{
"var": (
("foo", "bar", "a", "b"),
np.broadcast_to(np.eye(3, 3), (4, 5, 3, 3)),
)
},
coords={
"foo": np.arange(4),
"bar": np.arange(5),
"a": np.arange(3),
"b": np.arange(3),
},
)
actual3["var"].data = actual3["var"].data.todense()
assert_equal(expected3, actual3)
def test_stack_unstack_fast(self) -> None:
ds = Dataset(
{
"a": ("x", [0, 1]),
"b": (("x", "y"), [[0, 1], [2, 3]]),
"x": [0, 1],
"y": ["a", "b"],
}
)
actual = ds.stack(z=["x", "y"]).unstack("z")
assert actual.broadcast_equals(ds)
actual = ds[["b"]].stack(z=["x", "y"]).unstack("z")
assert actual.identical(ds[["b"]])
def test_stack_unstack_slow(self) -> None:
ds = Dataset(
data_vars={
"a": ("x", [0, 1]),
"b": (("x", "y"), [[0, 1], [2, 3]]),
},
coords={"x": [0, 1], "y": ["a", "b"]},
)
stacked = ds.stack(z=["x", "y"])
actual = stacked.isel(z=slice(None, None, -1)).unstack("z")
assert actual.broadcast_equals(ds)
stacked = ds[["b"]].stack(z=["x", "y"])
actual = stacked.isel(z=slice(None, None, -1)).unstack("z")
assert actual.identical(ds[["b"]])
def test_to_stacked_array_invalid_sample_dims(self) -> None:
data = xr.Dataset(
data_vars={"a": (("x", "y"), [[0, 1, 2], [3, 4, 5]]), "b": ("x", [6, 7])},
coords={"y": ["u", "v", "w"]},
)
with pytest.raises(
ValueError,
match=r"Variables in the dataset must contain all ``sample_dims`` \(\['y'\]\) but 'b' misses \['y'\]",
):
data.to_stacked_array("features", sample_dims=["y"])
def test_to_stacked_array_name(self) -> None:
name = "adf9d"
# make a two dimensional dataset
a, b = create_test_stacked_array()
D = xr.Dataset({"a": a, "b": b})
sample_dims = ["x"]
y = D.to_stacked_array("features", sample_dims, name=name)
assert y.name == name
def test_to_stacked_array_dtype_dims(self) -> None:
# make a two dimensional dataset
a, b = create_test_stacked_array()
D = xr.Dataset({"a": a, "b": b})
sample_dims = ["x"]
y = D.to_stacked_array("features", sample_dims)
mindex = y.xindexes["features"].to_pandas_index()
assert isinstance(mindex, pd.MultiIndex)
assert mindex.levels[1].dtype == D.y.dtype
assert y.dims == ("x", "features")
def test_to_stacked_array_to_unstacked_dataset(self) -> None:
# single dimension: regression test for GH4049
arr = xr.DataArray(np.arange(3), coords=[("x", [0, 1, 2])])
data = xr.Dataset({"a": arr, "b": arr})
stacked = data.to_stacked_array("y", sample_dims=["x"])
unstacked = stacked.to_unstacked_dataset("y")
assert_identical(unstacked, data)
# make a two dimensional dataset
a, b = create_test_stacked_array()
D = xr.Dataset({"a": a, "b": b})
sample_dims = ["x"]
y = D.to_stacked_array("features", sample_dims).transpose("x", "features")
x = y.to_unstacked_dataset("features")
assert_identical(D, x)
# test on just one sample
x0 = y[0].to_unstacked_dataset("features")
d0 = D.isel(x=0)
assert_identical(d0, x0)
def test_to_stacked_array_to_unstacked_dataset_different_dimension(self) -> None:
# test when variables have different dimensionality
a, b = create_test_stacked_array()
sample_dims = ["x"]
D = xr.Dataset({"a": a, "b": b.isel(y=0)})
y = D.to_stacked_array("features", sample_dims)
x = y.to_unstacked_dataset("features")
assert_identical(D, x)
def test_to_stacked_array_preserves_dtype(self) -> None:
# regression test for bug found in https://github.com/pydata/xarray/pull/8872#issuecomment-2081218616
ds = xr.Dataset(
data_vars={
"a": (("x", "y"), [[0, 1, 2], [3, 4, 5]]),
"b": ("x", [6, 7]),
},
coords={"y": ["u", "v", "w"]},
)
stacked = ds.to_stacked_array("z", sample_dims=["x"])
# coordinate created from variables names should be of string dtype
data = np.array(["a", "a", "a", "b"], dtype="<U1")
expected_stacked_variable = DataArray(name="variable", data=data, dims="z")
assert_identical(
stacked.coords["variable"].drop_vars(["z", "variable", "y"]),
expected_stacked_variable,
)
def test_to_stacked_array_transposed(self) -> None:
# test that to_stacked_array uses updated dim order after transposition
ds = xr.Dataset(
data_vars=dict(
v1=(["d1", "d2"], np.arange(6).reshape((2, 3))),
),
coords=dict(
d1=(["d1"], np.arange(2)),
d2=(["d2"], np.arange(3)),
),
)
da = ds.to_stacked_array(
new_dim="new_dim",
sample_dims=[],
variable_dim="variable",
)
dsT = ds.transpose()
daT = dsT.to_stacked_array(
new_dim="new_dim",
sample_dims=[],
variable_dim="variable",
)
v1 = np.arange(6)
v1T = np.arange(6).reshape((2, 3)).T.flatten()
np.testing.assert_equal(da.to_numpy(), v1)
np.testing.assert_equal(daT.to_numpy(), v1T)
def test_update(self) -> None:
data = create_test_data(seed=0)
expected = data.copy()
var2 = Variable("dim1", np.arange(8))
actual = data
actual.update({"var2": var2})
expected["var2"] = var2
assert_identical(expected, actual)
actual = data.copy()
actual.update(data)
assert_identical(expected, actual)
other = Dataset(attrs={"new": "attr"})
actual = data.copy()
actual.update(other)
assert_identical(expected, actual)
def test_update_overwrite_coords(self) -> None:
data = Dataset({"a": ("x", [1, 2])}, {"b": 3})
data.update(Dataset(coords={"b": 4}))
expected = Dataset({"a": ("x", [1, 2])}, {"b": 4})
assert_identical(data, expected)
data = Dataset({"a": ("x", [1, 2])}, {"b": 3})
data.update(Dataset({"c": 5}, coords={"b": 4}))
expected = Dataset({"a": ("x", [1, 2]), "c": 5}, {"b": 4})
assert_identical(data, expected)
data = Dataset({"a": ("x", [1, 2])}, {"b": 3})
data.update({"c": DataArray(5, coords={"b": 4})})
expected = Dataset({"a": ("x", [1, 2]), "c": 5}, {"b": 3})
assert_identical(data, expected)
def test_update_multiindex_level(self) -> None:
data = create_test_multiindex()
with pytest.raises(
ValueError, match=r"cannot set or update variable.*corrupt.*index "
):
data.update({"level_1": range(4)})
def test_update_auto_align(self) -> None:
ds = Dataset({"x": ("t", [3, 4])}, {"t": [0, 1]})
expected1 = Dataset(
{"x": ("t", [3, 4]), "y": ("t", [np.nan, 5])}, {"t": [0, 1]}
)
actual1 = ds.copy()
other1 = {"y": ("t", [5]), "t": [1]}
with pytest.raises(ValueError, match=r"conflicting sizes"):
actual1.update(other1)
actual1.update(Dataset(other1))
assert_identical(expected1, actual1)
actual2 = ds.copy()
other2 = Dataset({"y": ("t", [5]), "t": [100]})
actual2.update(other2)
expected2 = Dataset(
{"x": ("t", [3, 4]), "y": ("t", [np.nan] * 2)}, {"t": [0, 1]}
)
assert_identical(expected2, actual2)
def test_getitem(self) -> None:
data = create_test_data()
assert isinstance(data["var1"], DataArray)
assert_equal(data["var1"].variable, data.variables["var1"])
with pytest.raises(KeyError):
data["notfound"]
with pytest.raises(KeyError):
data[["var1", "notfound"]]
with pytest.raises(
KeyError,
match=r"Hint: use a list to select multiple variables, for example `ds\[\['var1', 'var2'\]\]`",
):
data["var1", "var2"]
actual1 = data[["var1", "var2"]]
expected1 = Dataset({"var1": data["var1"], "var2": data["var2"]})
assert_equal(expected1, actual1)
actual2 = data["numbers"]
expected2 = DataArray(
data["numbers"].variable,
{"dim3": data["dim3"], "numbers": data["numbers"]},
dims="dim3",
name="numbers",
)
assert_identical(expected2, actual2)
actual3 = data[dict(dim1=0)]
expected3 = data.isel(dim1=0)
assert_identical(expected3, actual3)
def test_getitem_hashable(self) -> None:
data = create_test_data()
data[(3, 4)] = data["var1"] + 1
expected = data["var1"] + 1
expected.name = (3, 4)
assert_identical(expected, data[(3, 4)])
with pytest.raises(KeyError, match=r"('var1', 'var2')"):
data[("var1", "var2")]
def test_getitem_multiple_dtype(self) -> None:
keys = ["foo", 1]
dataset = Dataset({key: ("dim0", range(1)) for key in keys})
assert_identical(dataset, dataset[keys])
def test_getitem_extra_dim_index_coord(self) -> None:
class AnyIndex(Index):
def should_add_coord_to_array(self, name, var, dims):
return True
idx = AnyIndex()
coords = Coordinates(
coords={
"x": ("x", [1, 2]),
"x_bounds": (("x", "x_bnds"), [(0.5, 1.5), (1.5, 2.5)]),
},
indexes={"x": idx, "x_bounds": idx},
)
ds = Dataset({"foo": (("x"), [1.0, 2.0])}, coords=coords)
actual = ds["foo"]
assert_identical(actual.coords, coords, check_default_indexes=False)
assert "x_bnds" not in actual.dims
def test_virtual_variables_default_coords(self) -> None:
dataset = Dataset({"foo": ("x", range(10))})
expected1 = DataArray(range(10), dims="x", name="x")
actual1 = dataset["x"]
assert_identical(expected1, actual1)
assert isinstance(actual1.variable, IndexVariable)
actual2 = dataset[["x", "foo"]]
expected2 = dataset.assign_coords(x=range(10))
assert_identical(expected2, actual2)
def test_virtual_variables_time(self) -> None:
# access virtual variables
data = create_test_data()
index = data.variables["time"].to_index()
assert isinstance(index, pd.DatetimeIndex)
assert_array_equal(data["time.month"].values, index.month)
assert_array_equal(data["time.season"].values, "DJF")
# test virtual variable math
assert_array_equal(data["time.dayofyear"] + 1, 2 + np.arange(20))
assert_array_equal(np.sin(data["time.dayofyear"]), np.sin(1 + np.arange(20)))
# ensure they become coordinates
expected = Dataset({}, {"dayofyear": data["time.dayofyear"]})
actual = data[["time.dayofyear"]]
assert_equal(expected, actual)
# non-coordinate variables
ds = Dataset({"t": ("x", pd.date_range("2000-01-01", periods=3))})
assert (ds["t.year"] == 2000).all()
def test_virtual_variable_same_name(self) -> None:
# regression test for GH367
times = pd.date_range("2000-01-01", freq="h", periods=5)
data = Dataset({"time": times})
actual = data["time.time"]
expected = DataArray(times.time, [("time", times)], name="time")
assert_identical(actual, expected)
def test_time_season(self) -> None:
time = xr.date_range("2000-01-01", periods=12, freq="ME", use_cftime=False)
ds = Dataset({"t": time})
seas = ["DJF"] * 2 + ["MAM"] * 3 + ["JJA"] * 3 + ["SON"] * 3 + ["DJF"]
assert_array_equal(seas, ds["t.season"])
def test_slice_virtual_variable(self) -> None:
data = create_test_data()
assert_equal(
data["time.dayofyear"][:10].variable, Variable(["time"], 1 + np.arange(10))
)
assert_equal(data["time.dayofyear"][0].variable, Variable([], 1))
def test_setitem(self) -> None:
# assign a variable
var = Variable(["dim1"], np.random.randn(8))
data1 = create_test_data()
data1["A"] = var
data2 = data1.copy()
data2["A"] = var
assert_identical(data1, data2)
# assign a dataset array
dv = 2 * data2["A"]
data1["B"] = dv.variable
data2["B"] = dv
assert_identical(data1, data2)
# can't assign an ND array without dimensions
with pytest.raises(ValueError, match=r"without explicit dimension names"):
data2["C"] = var.values.reshape(2, 4)
# but can assign a 1D array
data1["C"] = var.values
data2["C"] = ("C", var.values)
assert_identical(data1, data2)
# can assign a scalar
data1["scalar"] = 0
data2["scalar"] = ([], 0)
assert_identical(data1, data2)
# can't use the same dimension name as a scalar var
with pytest.raises(ValueError, match=r"already exists as a scalar"):
data1["newvar"] = ("scalar", [3, 4, 5])
# can't resize a used dimension
with pytest.raises(ValueError, match=r"conflicting dimension sizes"):
data1["dim1"] = data1["dim1"][:5]
# override an existing value
data1["A"] = 3 * data2["A"]
assert_equal(data1["A"], 3 * data2["A"])
# can't assign a dataset to a single key
with pytest.raises(TypeError, match="Cannot assign a Dataset to a single key"):
data1["D"] = xr.Dataset()
# test assignment with positional and label-based indexing
data3 = data1[["var1", "var2"]]
data3["var3"] = data3.var1.isel(dim1=0)
data4 = data3.copy()
err_msg = (
"can only set locations defined by dictionaries from Dataset.loc. Got: a"
)
with pytest.raises(TypeError, match=err_msg):
data1.loc["a"] = 0
err_msg = r"Variables \['A', 'B', 'scalar'\] in new values not available in original dataset:"
with pytest.raises(ValueError, match=err_msg):
data4[{"dim2": 1}] = data1[{"dim2": 2}]
err_msg = "Variable 'var3': indexer {'dim2': 0} not available"
with pytest.raises(ValueError, match=err_msg):
data1[{"dim2": 0}] = 0.0
err_msg = "Variable 'var1': indexer {'dim2': 10} not available"
with pytest.raises(ValueError, match=err_msg):
data4[{"dim2": 10}] = data3[{"dim2": 2}]
err_msg = "Variable 'var1': dimension 'dim2' appears in new values"
with pytest.raises(KeyError, match=err_msg):
data4[{"dim2": 2}] = data3[{"dim2": [2]}]
err_msg = (
"Variable 'var2': dimension order differs between original and new data"
)
data3["var2"] = data3["var2"].T
with pytest.raises(ValueError, match=err_msg):
data4[{"dim2": [2, 3]}] = data3[{"dim2": [2, 3]}]
data3["var2"] = data3["var2"].T
err_msg = r"cannot align objects.*not equal along these coordinates.*"
with pytest.raises(ValueError, match=err_msg):
data4[{"dim2": [2, 3]}] = data3[{"dim2": [2, 3, 4]}]
err_msg = "Dataset assignment only accepts DataArrays, Datasets, and scalars."
with pytest.raises(TypeError, match=err_msg):
data4[{"dim2": [2, 3]}] = data3["var1"][{"dim2": [3, 4]}].values
data5 = data4.astype(str)
data5["var4"] = data4["var1"]
# convert to `np.str_('a')` once `numpy<2.0` has been dropped
err_msg = "could not convert string to float: .*'a'.*"
with pytest.raises(ValueError, match=err_msg):
data5[{"dim2": 1}] = "a"
data4[{"dim2": 0}] = 0.0
data4[{"dim2": 1}] = data3[{"dim2": 2}]
data4.loc[{"dim2": 1.5}] = 1.0
data4.loc[{"dim2": 2.0}] = data3.loc[{"dim2": 2.5}]
for v, dat3 in data3.items():
dat4 = data4[v]
assert_array_equal(dat4[{"dim2": 0}], 0.0)
assert_array_equal(dat4[{"dim2": 1}], dat3[{"dim2": 2}])
assert_array_equal(dat4.loc[{"dim2": 1.5}], 1.0)
assert_array_equal(dat4.loc[{"dim2": 2.0}], dat3.loc[{"dim2": 2.5}])
unchanged = [1.0, 2.5, 3.0, 3.5, 4.0]
assert_identical(
dat4.loc[{"dim2": unchanged}], dat3.loc[{"dim2": unchanged}]
)
def test_setitem_pandas(self) -> None:
ds = self.make_example_math_dataset()
ds["x"] = np.arange(3)
ds_copy = ds.copy()
ds_copy["bar"] = ds["bar"].to_pandas()
assert_equal(ds, ds_copy)
def test_setitem_auto_align(self) -> None:
ds = Dataset()
ds["x"] = ("y", range(3))
ds["y"] = 1 + np.arange(3)
expected = Dataset({"x": ("y", range(3)), "y": 1 + np.arange(3)})
assert_identical(ds, expected)
ds["y"] = DataArray(range(3), dims="y")
expected = Dataset({"x": ("y", range(3))}, {"y": range(3)})
assert_identical(ds, expected)
ds["x"] = DataArray([1, 2], coords=[("y", [0, 1])])
expected = Dataset({"x": ("y", [1, 2, np.nan])}, {"y": range(3)})
assert_identical(ds, expected)
ds["x"] = 42
expected = Dataset({"x": 42, "y": range(3)})
assert_identical(ds, expected)
ds["x"] = DataArray([4, 5, 6, 7], coords=[("y", [0, 1, 2, 3])])
expected = Dataset({"x": ("y", [4, 5, 6])}, {"y": range(3)})
assert_identical(ds, expected)
def test_setitem_dimension_override(self) -> None:
# regression test for GH-3377
ds = xr.Dataset({"x": [0, 1, 2]})
ds["x"] = ds["x"][:2]
expected = Dataset({"x": [0, 1]})
assert_identical(ds, expected)
ds = xr.Dataset({"x": [0, 1, 2]})
ds["x"] = np.array([0, 1])
assert_identical(ds, expected)
ds = xr.Dataset({"x": [0, 1, 2]})
ds.coords["x"] = [0, 1]
assert_identical(ds, expected)
def test_setitem_with_coords(self) -> None:
# Regression test for GH:2068
ds = create_test_data()
other = DataArray(
np.arange(10), dims="dim3", coords={"numbers": ("dim3", np.arange(10))}
)
expected = ds.copy()
expected["var3"] = other.drop_vars("numbers")
actual = ds.copy()
actual["var3"] = other
assert_identical(expected, actual)
assert "numbers" in other.coords # should not change other
# with alignment
other = ds["var3"].isel(dim3=slice(1, -1))
other["numbers"] = ("dim3", np.arange(8))
actual = ds.copy()
actual["var3"] = other
assert "numbers" in other.coords # should not change other
expected = ds.copy()
expected["var3"] = ds["var3"].isel(dim3=slice(1, -1))
assert_identical(expected, actual)
# with non-duplicate coords
other = ds["var3"].isel(dim3=slice(1, -1))
other["numbers"] = ("dim3", np.arange(8))
other["position"] = ("dim3", np.arange(8))
actual = ds.copy()
actual["var3"] = other
assert "position" in actual
assert "position" in other.coords
# assigning a coordinate-only dataarray
actual = ds.copy()
other = actual["numbers"]
other[0] = 10
actual["numbers"] = other
assert actual["numbers"][0] == 10
# GH: 2099
ds = Dataset(
{"var": ("x", [1, 2, 3])},
coords={"x": [0, 1, 2], "z1": ("x", [1, 2, 3]), "z2": ("x", [1, 2, 3])},
)
ds["var"] = ds["var"] * 2
assert np.allclose(ds["var"], [2, 4, 6])
def test_setitem_align_new_indexes(self) -> None:
ds = Dataset({"foo": ("x", [1, 2, 3])}, {"x": [0, 1, 2]})
ds["bar"] = DataArray([2, 3, 4], [("x", [1, 2, 3])])
expected = Dataset(
{"foo": ("x", [1, 2, 3]), "bar": ("x", [np.nan, 2, 3])}, {"x": [0, 1, 2]}
)
assert_identical(ds, expected)
def test_setitem_vectorized(self) -> None:
# Regression test for GH:7030
# Positional indexing
da = xr.DataArray(np.r_[:120].reshape(2, 3, 4, 5), dims=["a", "b", "c", "d"])
ds = xr.Dataset({"da": da})
b = xr.DataArray([[0, 0], [1, 0]], dims=["u", "v"])
c = xr.DataArray([[0, 1], [2, 3]], dims=["u", "v"])
w = xr.DataArray([-1, -2], dims=["u"])
index = dict(b=b, c=c)
ds[index] = xr.Dataset({"da": w})
assert (ds[index]["da"] == w).all()
# Indexing with coordinates
da = xr.DataArray(np.r_[:120].reshape(2, 3, 4, 5), dims=["a", "b", "c", "d"])
ds = xr.Dataset({"da": da})
ds.coords["b"] = [2, 4, 6]
b = xr.DataArray([[2, 2], [4, 2]], dims=["u", "v"])
c = xr.DataArray([[0, 1], [2, 3]], dims=["u", "v"])
w = xr.DataArray([-1, -2], dims=["u"])
index = dict(b=b, c=c)
ds.loc[index] = xr.Dataset({"da": w}, coords={"b": ds.coords["b"]})
assert (ds.loc[index]["da"] == w).all()
@pytest.mark.parametrize("dtype", [str, bytes])
def test_setitem_str_dtype(self, dtype) -> None:
ds = xr.Dataset(coords={"x": np.array(["x", "y"], dtype=dtype)})
# test Dataset update
ds["foo"] = xr.DataArray(np.array([0, 0]), dims=["x"])
assert np.issubdtype(ds.x.dtype, dtype)
def test_setitem_using_list(self) -> None:
# assign a list of variables
var1 = Variable(["dim1"], np.random.randn(8))
var2 = Variable(["dim1"], np.random.randn(8))
actual = create_test_data()
expected = actual.copy()
expected["A"] = var1
expected["B"] = var2
actual[["A", "B"]] = [var1, var2]
assert_identical(actual, expected)
# assign a list of dataset arrays
dv = 2 * expected[["A", "B"]]
actual[["C", "D"]] = [d.variable for d in dv.data_vars.values()]
expected[["C", "D"]] = dv
assert_identical(actual, expected)
@pytest.mark.parametrize(
"var_list, data, error_regex",
[
(
["A", "B"],
[Variable(["dim1"], np.random.randn(8))],
r"Different lengths",
),
([], [Variable(["dim1"], np.random.randn(8))], r"Empty list of variables"),
(["A", "B"], xr.DataArray([1, 2]), r"assign single DataArray"),
],
)
def test_setitem_using_list_errors(self, var_list, data, error_regex) -> None:
actual = create_test_data()
with pytest.raises(ValueError, match=error_regex):
actual[var_list] = data
def test_assign(self) -> None:
ds = Dataset()
actual = ds.assign(x=[0, 1, 2], y=2)
expected = Dataset({"x": [0, 1, 2], "y": 2})
assert_identical(actual, expected)
assert list(actual.variables) == ["x", "y"]
assert_identical(ds, Dataset())
actual = actual.assign(y=lambda ds: ds.x**2)
expected = Dataset({"y": ("x", [0, 1, 4]), "x": [0, 1, 2]})
assert_identical(actual, expected)
actual = actual.assign_coords(z=2)
expected = Dataset({"y": ("x", [0, 1, 4])}, {"z": 2, "x": [0, 1, 2]})
assert_identical(actual, expected)
def test_assign_coords(self) -> None:
ds = Dataset()
actual = ds.assign(x=[0, 1, 2], y=2)
actual = actual.assign_coords(x=list("abc"))
expected = Dataset({"x": list("abc"), "y": 2})
assert_identical(actual, expected)
actual = ds.assign(x=[0, 1, 2], y=[2, 3])
actual = actual.assign_coords({"y": [2.0, 3.0]})
expected = ds.assign(x=[0, 1, 2], y=[2.0, 3.0])
assert_identical(actual, expected)
def test_assign_attrs(self) -> None:
expected = Dataset(attrs=dict(a=1, b=2))
new = Dataset()
actual = new.assign_attrs(a=1, b=2)
assert_identical(actual, expected)
assert new.attrs == {}
expected.attrs["c"] = 3
new_actual = actual.assign_attrs({"c": 3})
assert_identical(new_actual, expected)
assert actual.attrs == dict(a=1, b=2)
def test_drop_attrs(self) -> None:
# Simple example
ds = Dataset().assign_attrs(a=1, b=2)
original = ds.copy()
expected = Dataset()
result = ds.drop_attrs()
assert_identical(result, expected)
# Doesn't change original
assert_identical(ds, original)
# Example with variables and coords with attrs, and a multiindex. (arguably
# should have used a canonical dataset with all the features we're should
# support...)
var = Variable("x", [1, 2, 3], attrs=dict(x=1, y=2))
idx = IndexVariable("y", [1, 2, 3], attrs=dict(c=1, d=2))
mx = xr.Coordinates.from_pandas_multiindex(
pd.MultiIndex.from_tuples([(1, 2), (3, 4)], names=["d", "e"]), "z"
)
ds = Dataset(dict(var1=var), coords=dict(y=idx, z=mx)).assign_attrs(a=1, b=2)
assert ds.attrs != {}
assert ds["var1"].attrs != {}
assert ds["y"].attrs != {}
assert ds.coords["y"].attrs != {}
original = ds.copy(deep=True)
result = ds.drop_attrs()
assert result.attrs == {}
assert result["var1"].attrs == {}
assert result["y"].attrs == {}
assert list(result.data_vars) == list(ds.data_vars)
assert list(result.coords) == list(ds.coords)
# Doesn't change original
assert_identical(ds, original)
# Specifically test that the attrs on the coords are still there. (The index
# can't currently contain `attrs`, so we can't test those.)
assert ds.coords["y"].attrs != {}
# Test for deep=False
result_shallow = ds.drop_attrs(deep=False)
assert result_shallow.attrs == {}
assert result_shallow["var1"].attrs != {}
assert result_shallow["y"].attrs != {}
assert list(result.data_vars) == list(ds.data_vars)
assert list(result.coords) == list(ds.coords)
def test_assign_multiindex_level(self) -> None:
data = create_test_multiindex()
with pytest.raises(ValueError, match=r"cannot drop or update.*corrupt.*index "):
data.assign(level_1=range(4))
data.assign_coords(level_1=range(4))
def test_assign_new_multiindex(self) -> None:
midx = pd.MultiIndex.from_arrays([["a", "a", "b", "b"], [0, 1, 0, 1]])
midx_coords = Coordinates.from_pandas_multiindex(midx, "x")
ds = Dataset(coords={"x": [1, 2]})
expected = Dataset(coords=midx_coords)
with pytest.warns(
FutureWarning,
match=r".*`pandas.MultiIndex`.*no longer be implicitly promoted.*",
):
actual = ds.assign(x=midx)
assert_identical(actual, expected)
@pytest.mark.parametrize("orig_coords", [{}, {"x": range(4)}])
def test_assign_coords_new_multiindex(self, orig_coords) -> None:
ds = Dataset(coords=orig_coords)
midx = pd.MultiIndex.from_arrays(
[["a", "a", "b", "b"], [0, 1, 0, 1]], names=("one", "two")
)
midx_coords = Coordinates.from_pandas_multiindex(midx, "x")
expected = Dataset(coords=midx_coords)
with pytest.warns(
FutureWarning,
match=r".*`pandas.MultiIndex`.*no longer be implicitly promoted.*",
):
actual = ds.assign_coords({"x": midx})
assert_identical(actual, expected)
actual = ds.assign_coords(midx_coords)
assert_identical(actual, expected)
def test_assign_coords_existing_multiindex(self) -> None:
data = create_test_multiindex()
with pytest.warns(
FutureWarning, match=r"updating coordinate.*MultiIndex.*inconsistent"
):
updated = data.assign_coords(x=range(4))
# https://github.com/pydata/xarray/issues/7097 (coord names updated)
assert len(updated.coords) == 1
with pytest.warns(
FutureWarning, match=r"updating coordinate.*MultiIndex.*inconsistent"
):
updated = data.assign(x=range(4))
# https://github.com/pydata/xarray/issues/7097 (coord names updated)
assert len(updated.coords) == 1
def test_assign_all_multiindex_coords(self) -> None:
data = create_test_multiindex()
actual = data.assign(x=range(4), level_1=range(4), level_2=range(4))
# no error but multi-index dropped in favor of single indexes for each level
assert (
actual.xindexes["x"]
is not actual.xindexes["level_1"]
is not actual.xindexes["level_2"]
)
def test_assign_coords_custom_index_side_effect(self) -> None:
# test that assigning new coordinates do not reset other dimension coord indexes
# to default (pandas) index (https://github.com/pydata/xarray/issues/7346)
class CustomIndex(PandasIndex):
pass
ds = (
Dataset(coords={"x": [1, 2, 3]})
.drop_indexes("x")
.set_xindex("x", CustomIndex)
)
actual = ds.assign_coords(y=[4, 5, 6])
assert isinstance(actual.xindexes["x"], CustomIndex)
def test_assign_coords_custom_index(self) -> None:
class CustomIndex(Index):
pass
coords = Coordinates(
coords={"x": ("x", [1, 2, 3])}, indexes={"x": CustomIndex()}
)
ds = Dataset()
actual = ds.assign_coords(coords)
assert isinstance(actual.xindexes["x"], CustomIndex)
def test_assign_coords_no_default_index(self) -> None:
coords = Coordinates({"y": [1, 2, 3]}, indexes={})
ds = Dataset()
actual = ds.assign_coords(coords)
expected = coords.to_dataset()
assert_identical(expected, actual, check_default_indexes=False)
assert "y" not in actual.xindexes
def test_merge_multiindex_level(self) -> None:
data = create_test_multiindex()
other = Dataset({"level_1": ("x", [0, 1])})
with pytest.raises(ValueError, match=r".*conflicting dimension sizes.*"):
data.merge(other)
other = Dataset({"level_1": ("x", range(4))})
with pytest.raises(
ValueError, match=r"unable to determine.*coordinates or not.*"
):
data.merge(other)
# `other` Dataset coordinates are ignored (bug or feature?)
other = Dataset(coords={"level_1": ("x", range(4))})
assert_identical(data.merge(other), data)
def test_setitem_original_non_unique_index(self) -> None:
# regression test for GH943
original = Dataset({"data": ("x", np.arange(5))}, coords={"x": [0, 1, 2, 0, 1]})
expected = Dataset({"data": ("x", np.arange(5))}, {"x": range(5)})
actual = original.copy()
actual["x"] = list(range(5))
assert_identical(actual, expected)
actual = original.copy()
actual["x"] = ("x", list(range(5)))
assert_identical(actual, expected)
actual = original.copy()
actual.coords["x"] = list(range(5))
assert_identical(actual, expected)
def test_setitem_both_non_unique_index(self) -> None:
# regression test for GH956
names = ["joaquin", "manolo", "joaquin"]
values = np.random.randint(0, 256, (3, 4, 4))
array = DataArray(
values, dims=["name", "row", "column"], coords=[names, range(4), range(4)]
)
expected = Dataset({"first": array, "second": array})
actual = array.rename("first").to_dataset()
actual["second"] = array
assert_identical(expected, actual)
def test_setitem_multiindex_level(self) -> None:
data = create_test_multiindex()
with pytest.raises(
ValueError, match=r"cannot set or update variable.*corrupt.*index "
):
data["level_1"] = range(4)
def test_delitem(self) -> None:
data = create_test_data()
all_items = set(data.variables)
assert set(data.variables) == all_items
del data["var1"]
assert set(data.variables) == all_items - {"var1"}
del data["numbers"]
assert set(data.variables) == all_items - {"var1", "numbers"}
assert "numbers" not in data.coords
expected = Dataset()
actual = Dataset({"y": ("x", [1, 2])})
del actual["y"]
assert_identical(expected, actual)
def test_delitem_multiindex_level(self) -> None:
data = create_test_multiindex()
with pytest.raises(
ValueError, match=r"cannot remove coordinate.*corrupt.*index "
):
del data["level_1"]
def test_squeeze(self) -> None:
data = Dataset({"foo": (["x", "y", "z"], [[[1], [2]]])})
test_args: list[list] = [[], [["x"]], [["x", "z"]]]
for args in test_args:
def get_args(args, v):
return [set(args[0]) & set(v.dims)] if args else []
expected = Dataset(
{k: v.squeeze(*get_args(args, v)) for k, v in data.variables.items()}
)
expected = expected.set_coords(data.coords)
assert_identical(expected, data.squeeze(*args))
# invalid squeeze
with pytest.raises(ValueError, match=r"cannot select a dimension"):
data.squeeze("y")
def test_squeeze_drop(self) -> None:
data = Dataset({"foo": ("x", [1])}, {"x": [0]})
expected = Dataset({"foo": 1})
selected = data.squeeze(drop=True)
assert_identical(expected, selected)
expected = Dataset({"foo": 1}, {"x": 0})
selected = data.squeeze(drop=False)
assert_identical(expected, selected)
data = Dataset({"foo": (("x", "y"), [[1]])}, {"x": [0], "y": [0]})
expected = Dataset({"foo": 1})
selected = data.squeeze(drop=True)
assert_identical(expected, selected)
expected = Dataset({"foo": ("x", [1])}, {"x": [0]})
selected = data.squeeze(dim="y", drop=True)
assert_identical(expected, selected)
data = Dataset({"foo": (("x",), [])}, {"x": []})
selected = data.squeeze(drop=True)
assert_identical(data, selected)
def test_to_dataarray(self) -> None:
ds = Dataset(
{"a": 1, "b": ("x", [1, 2, 3])},
coords={"c": 42},
attrs={"Conventions": "None"},
)
data = [[1, 1, 1], [1, 2, 3]]
coords = {"c": 42, "variable": ["a", "b"]}
dims = ("variable", "x")
expected = DataArray(data, coords, dims, attrs=ds.attrs)
actual = ds.to_dataarray()
assert_identical(expected, actual)
actual = ds.to_dataarray("abc", name="foo")
expected = expected.rename({"variable": "abc"}).rename("foo")
assert_identical(expected, actual)
def test_to_and_from_dataframe(self) -> None:
x = np.random.randn(10)
y = np.random.randn(10)
t = list("abcdefghij")
cat = pd.Categorical(["a", "b"] * 5)
ds = Dataset({"a": ("t", x), "b": ("t", y), "t": ("t", t), "cat": ("t", cat)})
expected = pd.DataFrame(
np.array([x, y]).T, columns=["a", "b"], index=pd.Index(t, name="t")
)
expected["cat"] = cat
actual = ds.to_dataframe()
# use the .equals method to check all DataFrame metadata
assert expected.equals(actual), (expected, actual)
# verify coords are included
actual = ds.set_coords("b").to_dataframe()
assert expected.equals(actual), (expected, actual)
# check roundtrip
assert_identical(ds, Dataset.from_dataframe(actual))
assert isinstance(ds["cat"].variable.data.dtype, pd.CategoricalDtype)
# test a case with a MultiIndex
w = np.random.randn(2, 3)
cat = pd.Categorical(["a", "a", "c"])
ds = Dataset({"w": (("x", "y"), w), "cat": ("y", cat)})
ds["y"] = ("y", list("abc"))
exp_index = pd.MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1], ["a", "b", "c", "a", "b", "c"]], names=["x", "y"]
)
expected = pd.DataFrame(
{"w": w.reshape(-1), "cat": pd.Categorical(["a", "a", "c", "a", "a", "c"])},
index=exp_index,
)
actual = ds.to_dataframe()
assert expected.equals(actual)
# check roundtrip
# from_dataframe attempts to broadcast across because it doesn't know better, so cat must be converted
ds["cat"] = (("x", "y"), np.stack((ds["cat"].to_numpy(), ds["cat"].to_numpy())))
assert_identical(ds.assign_coords(x=[0, 1]), Dataset.from_dataframe(actual))
# Check multiindex reordering
new_order = ["x", "y"]
# revert broadcasting fix above for 1d arrays
ds["cat"] = ("y", cat)
actual = ds.to_dataframe(dim_order=new_order)
assert expected.equals(actual)
new_order = ["y", "x"]
exp_index = pd.MultiIndex.from_arrays(
[["a", "a", "b", "b", "c", "c"], [0, 1, 0, 1, 0, 1]], names=["y", "x"]
)
expected = pd.DataFrame(
{
"w": w.transpose().reshape(-1),
"cat": pd.Categorical(["a", "a", "a", "a", "c", "c"]),
},
index=exp_index,
)
actual = ds.to_dataframe(dim_order=new_order)
assert expected.equals(actual)
invalid_order = ["x"]
with pytest.raises(
ValueError, match="does not match the set of dimensions of this"
):
ds.to_dataframe(dim_order=invalid_order)
invalid_order = ["x", "z"]
with pytest.raises(
ValueError, match="does not match the set of dimensions of this"
):
ds.to_dataframe(dim_order=invalid_order)
# test a case with a MultiIndex along a single dimension
data_dict = dict(
x=[1, 2, 1, 2, 1], y=["a", "a", "b", "b", "b"], z=[5, 10, 15, 20, 25]
)
data_dict_w_dims = {k: ("single_dim", v) for k, v in data_dict.items()}
# Dataset multi-indexed along "single_dim" by "x" and "y"
ds = Dataset(data_dict_w_dims).set_coords(["x", "y"]).set_xindex(["x", "y"])
expected = pd.DataFrame(data_dict).set_index(["x", "y"])
actual = ds.to_dataframe()
assert expected.equals(actual)
# should be possible to reset index, as there should be no duplication
# between index and columns, and dataframes should still be equal
assert expected.reset_index().equals(actual.reset_index())
# MultiIndex deduplication should not affect other coordinates.
mindex_single = pd.MultiIndex.from_product(
[list(range(6)), list("ab")], names=["A", "B"]
)
ds = DataArray(
range(12), [("MI", mindex_single)], dims="MI", name="test"
)._to_dataset_whole()
ds.coords["C"] = "a single value"
ds.coords["D"] = ds.coords["A"] ** 2
expected = pd.DataFrame(
dict(
test=range(12),
C="a single value",
D=[0, 0, 1, 1, 4, 4, 9, 9, 16, 16, 25, 25],
)
).set_index(mindex_single)
actual = ds.to_dataframe()
assert expected.equals(actual)
assert expected.reset_index().equals(actual.reset_index())
# check pathological cases
df = pd.DataFrame([1])
actual_ds = Dataset.from_dataframe(df)
expected_ds = Dataset({0: ("index", [1])}, {"index": [0]})
assert_identical(expected_ds, actual_ds)
df = pd.DataFrame()
actual_ds = Dataset.from_dataframe(df)
expected_ds = Dataset(coords={"index": []})
assert_identical(expected_ds, actual_ds)
# GH697
df = pd.DataFrame({"A": []})
actual_ds = Dataset.from_dataframe(df)
expected_ds = Dataset({"A": DataArray([], dims=("index",))}, {"index": []})
assert_identical(expected_ds, actual_ds)
# regression test for GH278
# use int64 to ensure consistent results for the pandas .equals method
# on windows (which requires the same dtype)
ds = Dataset({"x": pd.Index(["bar"]), "a": ("y", np.array([1], "int64"))}).isel(
x=0
)
# use .loc to ensure consistent results on Python 3
actual = ds.to_dataframe().loc[:, ["a", "x"]]
expected = pd.DataFrame(
[[1, "bar"]], index=pd.Index([0], name="y"), columns=["a", "x"]
)
assert expected.equals(actual), (expected, actual)
ds = Dataset({"x": np.array([0], "int64"), "y": np.array([1], "int64")})
actual = ds.to_dataframe()
idx = pd.MultiIndex.from_arrays([[0], [1]], names=["x", "y"])
expected = pd.DataFrame([[]], index=idx)
assert expected.equals(actual), (expected, actual)
def test_from_dataframe_categorical_dtype_index(self) -> None:
cat = pd.CategoricalIndex(list("abcd"))
df = pd.DataFrame({"f": [0, 1, 2, 3]}, index=cat)
ds = df.to_xarray()
restored = ds.to_dataframe()
df.index.name = (
"index" # restored gets the name because it has the coord with the name
)
pd.testing.assert_frame_equal(df, restored)
def test_from_dataframe_categorical_index(self) -> None:
cat = pd.CategoricalDtype(
categories=["foo", "bar", "baz", "qux", "quux", "corge"]
)
i1 = pd.Series(["foo", "bar", "foo"], dtype=cat)
i2 = pd.Series(["bar", "bar", "baz"], dtype=cat)
df = pd.DataFrame({"i1": i1, "i2": i2, "values": [1, 2, 3]})
ds = df.set_index("i1").to_xarray()
assert len(ds["i1"]) == 3
ds = df.set_index(["i1", "i2"]).to_xarray()
assert len(ds["i1"]) == 2
assert len(ds["i2"]) == 2
def test_from_dataframe_categorical_index_string_categories(self) -> None:
cat = pd.CategoricalIndex(
pd.Categorical.from_codes(
np.array([1, 1, 0, 2], dtype=np.int64), # type: ignore[arg-type]
categories=pd.Index(["foo", "bar", "baz"], dtype="string"),
)
)
ser = pd.Series(1, index=cat)
ds = ser.to_xarray()
assert ds.coords.dtypes["index"] == ser.index.dtype
@requires_sparse
def test_from_dataframe_sparse(self) -> None:
import sparse
df_base = pd.DataFrame(
{"x": range(10), "y": list("abcdefghij"), "z": np.arange(0, 100, 10)}
)
ds_sparse = Dataset.from_dataframe(df_base.set_index("x"), sparse=True)
ds_dense = Dataset.from_dataframe(df_base.set_index("x"), sparse=False)
assert isinstance(ds_sparse["y"].data, sparse.COO)
assert isinstance(ds_sparse["z"].data, sparse.COO)
ds_sparse["y"].data = ds_sparse["y"].data.todense()
ds_sparse["z"].data = ds_sparse["z"].data.todense()
assert_identical(ds_dense, ds_sparse)
ds_sparse = Dataset.from_dataframe(df_base.set_index(["x", "y"]), sparse=True)
ds_dense = Dataset.from_dataframe(df_base.set_index(["x", "y"]), sparse=False)
assert isinstance(ds_sparse["z"].data, sparse.COO)
ds_sparse["z"].data = ds_sparse["z"].data.todense()
assert_identical(ds_dense, ds_sparse)
def test_to_and_from_empty_dataframe(self) -> None:
# GH697
expected = pd.DataFrame({"foo": []})
ds = Dataset.from_dataframe(expected)
assert len(ds["foo"]) == 0
actual = ds.to_dataframe()
assert len(actual) == 0
assert expected.equals(actual)
def test_from_dataframe_multiindex(self) -> None:
index = pd.MultiIndex.from_product([["a", "b"], [1, 2, 3]], names=["x", "y"])
df = pd.DataFrame({"z": np.arange(6)}, index=index)
expected = Dataset(
{"z": (("x", "y"), [[0, 1, 2], [3, 4, 5]])},
coords={"x": ["a", "b"], "y": [1, 2, 3]},
)
actual = Dataset.from_dataframe(df)
assert_identical(actual, expected)
df2 = df.iloc[[3, 2, 1, 0, 4, 5], :]
actual = Dataset.from_dataframe(df2)
assert_identical(actual, expected)
df3 = df.iloc[:4, :]
expected3 = Dataset(
{"z": (("x", "y"), [[0, 1, 2], [3, np.nan, np.nan]])},
coords={"x": ["a", "b"], "y": [1, 2, 3]},
)
actual = Dataset.from_dataframe(df3)
assert_identical(actual, expected3)
df_nonunique = df.iloc[[0, 0], :]
with pytest.raises(ValueError, match=r"non-unique MultiIndex"):
Dataset.from_dataframe(df_nonunique)
def test_from_dataframe_unsorted_levels(self) -> None:
# regression test for GH-4186
index = pd.MultiIndex(
levels=[["b", "a"], ["foo"]], codes=[[0, 1], [0, 0]], names=["lev1", "lev2"]
)
df = pd.DataFrame({"c1": [0, 2], "c2": [1, 3]}, index=index)
expected = Dataset(
{
"c1": (("lev1", "lev2"), [[0], [2]]),
"c2": (("lev1", "lev2"), [[1], [3]]),
},
coords={"lev1": ["b", "a"], "lev2": ["foo"]},
)
actual = Dataset.from_dataframe(df)
assert_identical(actual, expected)
def test_from_dataframe_non_unique_columns(self) -> None:
# regression test for GH449
df = pd.DataFrame(np.zeros((2, 2)))
df.columns = ["foo", "foo"] # type: ignore[assignment,list-item,unused-ignore]
with pytest.raises(ValueError, match=r"non-unique columns"):
Dataset.from_dataframe(df)
def test_convert_dataframe_with_many_types_and_multiindex(self) -> None:
# regression test for GH737
df = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="America/New_York"),
}
)
df.index = pd.MultiIndex.from_product([["a"], range(3)], names=["one", "two"])
roundtripped = Dataset.from_dataframe(df).to_dataframe()
# we can't do perfectly, but we should be at least as faithful as
# np.asarray
expected = df.apply(np.asarray)
assert roundtripped.equals(expected)
@pytest.mark.parametrize("encoding", [True, False])
@pytest.mark.parametrize("data", [True, "list", "array"])
def test_to_and_from_dict(
self, encoding: bool, data: bool | Literal["list", "array"]
) -> None:
# <xarray.Dataset>
# Dimensions: (t: 10)
# Coordinates:
# * t (t) <U1 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j'
# Data variables:
# a (t) float64 0.6916 -1.056 -1.163 0.9792 -0.7865 ...
# b (t) float64 1.32 0.1954 1.91 1.39 0.519 -0.2772 ...
x = np.random.randn(10)
y = np.random.randn(10)
t = list("abcdefghij")
ds = Dataset({"a": ("t", x), "b": ("t", y), "t": ("t", t)})
expected: dict[str, dict[str, Any]] = {
"coords": {"t": {"dims": ("t",), "data": t, "attrs": {}}},
"attrs": {},
"dims": {"t": 10},
"data_vars": {
"a": {"dims": ("t",), "data": x.tolist(), "attrs": {}},
"b": {"dims": ("t",), "data": y.tolist(), "attrs": {}},
},
}
if encoding:
ds.t.encoding.update({"foo": "bar"})
expected["encoding"] = {}
expected["coords"]["t"]["encoding"] = ds.t.encoding
for vvs in ["a", "b"]:
expected["data_vars"][vvs]["encoding"] = {}
actual = ds.to_dict(data=data, encoding=encoding)
# check that they are identical
np.testing.assert_equal(expected, actual)
# check roundtrip
ds_rt = Dataset.from_dict(actual)
assert_identical(ds, ds_rt)
if encoding:
assert set(ds_rt.variables) == set(ds.variables)
for vv in ds.variables:
np.testing.assert_equal(ds_rt[vv].encoding, ds[vv].encoding)
# check the data=False option
expected_no_data = expected.copy()
del expected_no_data["coords"]["t"]["data"]
del expected_no_data["data_vars"]["a"]["data"]
del expected_no_data["data_vars"]["b"]["data"]
endiantype = "<U1" if sys.byteorder == "little" else ">U1"
expected_no_data["coords"]["t"].update({"dtype": endiantype, "shape": (10,)})
expected_no_data["data_vars"]["a"].update({"dtype": "float64", "shape": (10,)})
expected_no_data["data_vars"]["b"].update({"dtype": "float64", "shape": (10,)})
actual_no_data = ds.to_dict(data=False, encoding=encoding)
assert expected_no_data == actual_no_data
# verify coords are included roundtrip
expected_ds = ds.set_coords("b")
actual2 = Dataset.from_dict(expected_ds.to_dict(data=data, encoding=encoding))
assert_identical(expected_ds, actual2)
if encoding:
assert set(expected_ds.variables) == set(actual2.variables)
for vv in ds.variables:
np.testing.assert_equal(expected_ds[vv].encoding, actual2[vv].encoding)
# test some incomplete dicts:
# this one has no attrs field, the dims are strings, and x, y are
# np.arrays
d = {
"coords": {"t": {"dims": "t", "data": t}},
"dims": "t",
"data_vars": {"a": {"dims": "t", "data": x}, "b": {"dims": "t", "data": y}},
}
assert_identical(ds, Dataset.from_dict(d))
# this is kind of a flattened version with no coords, or data_vars
d = {
"a": {"dims": "t", "data": x},
"t": {"data": t, "dims": "t"},
"b": {"dims": "t", "data": y},
}
assert_identical(ds, Dataset.from_dict(d))
# this one is missing some necessary information
d = {
"a": {"data": x},
"t": {"data": t, "dims": "t"},
"b": {"dims": "t", "data": y},
}
with pytest.raises(
ValueError, match=r"cannot convert dict without the key 'dims'"
):
Dataset.from_dict(d)
def test_to_and_from_dict_with_time_dim(self) -> None:
x = np.random.randn(10, 3)
y = np.random.randn(10, 3)
t = pd.date_range("20130101", periods=10)
lat = [77.7, 83.2, 76]
ds = Dataset(
{
"a": (["t", "lat"], x),
"b": (["t", "lat"], y),
"t": ("t", t),
"lat": ("lat", lat),
}
)
roundtripped = Dataset.from_dict(ds.to_dict())
assert_identical(ds, roundtripped)
@pytest.mark.parametrize("data", [True, "list", "array"])
def test_to_and_from_dict_with_nan_nat(
self, data: bool | Literal["list", "array"]
) -> None:
x = np.random.randn(10, 3)
y = np.random.randn(10, 3)
y[2] = np.nan
t = pd.Series(pd.date_range("20130101", periods=10))
t[2] = np.nan
lat = [77.7, 83.2, 76]
ds = Dataset(
{
"a": (["t", "lat"], x),
"b": (["t", "lat"], y),
"t": ("t", t),
"lat": ("lat", lat),
}
)
roundtripped = Dataset.from_dict(ds.to_dict(data=data))
assert_identical(ds, roundtripped)
def test_to_dict_with_numpy_attrs(self) -> None:
# this doesn't need to roundtrip
x = np.random.randn(10)
y = np.random.randn(10)
t = list("abcdefghij")
attrs = {
"created": np.float64(1998),
"coords": np.array([37, -110.1, 100]),
"maintainer": "bar",
}
ds = Dataset({"a": ("t", x, attrs), "b": ("t", y, attrs), "t": ("t", t)})
expected_attrs = {
"created": attrs["created"].item(), # type: ignore[attr-defined]
"coords": attrs["coords"].tolist(), # type: ignore[attr-defined]
"maintainer": "bar",
}
actual = ds.to_dict()
# check that they are identical
assert expected_attrs == actual["data_vars"]["a"]["attrs"]
def test_pickle(self) -> None:
data = create_test_data()
roundtripped = pickle.loads(pickle.dumps(data))
assert_identical(data, roundtripped)
# regression test for #167:
assert data.sizes == roundtripped.sizes
def test_lazy_load(self) -> None:
store = InaccessibleVariableDataStore()
create_test_data().dump_to_store(store)
for decode_cf in [True, False]:
ds = open_dataset(store, decode_cf=decode_cf)
with pytest.raises(UnexpectedDataAccess):
ds.load()
with pytest.raises(UnexpectedDataAccess):
_ = ds["var1"].values
# these should not raise UnexpectedDataAccess:
ds.isel(time=10)
ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1)
def test_lazy_load_duck_array(self) -> None:
store = AccessibleAsDuckArrayDataStore()
create_test_data().dump_to_store(store)
for decode_cf in [True, False]:
ds = open_dataset(store, decode_cf=decode_cf)
with pytest.raises(UnexpectedDataAccess):
_ = ds["var1"].values
# these should not raise UnexpectedDataAccess:
_ = ds.var1.data
ds.isel(time=10)
ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1)
repr(ds)
# preserve the duck array type and don't cast to array
assert isinstance(ds["var1"].load().data, DuckArrayWrapper)
assert isinstance(
ds["var1"].isel(dim2=0, dim1=0).load().data, DuckArrayWrapper
)
ds.close()
def test_dropna(self) -> None:
x = np.random.randn(4, 4)
x[::2, 0] = np.nan
y = np.random.randn(4)
y[-1] = np.nan
ds = Dataset({"foo": (("a", "b"), x), "bar": (("b", y))})
expected = ds.isel(a=slice(1, None, 2))
actual = ds.dropna("a")
assert_identical(actual, expected)
expected = ds.isel(b=slice(1, 3))
actual = ds.dropna("b")
assert_identical(actual, expected)
actual = ds.dropna("b", subset=["foo", "bar"])
assert_identical(actual, expected)
expected = ds.isel(b=slice(1, None))
actual = ds.dropna("b", subset=["foo"])
assert_identical(actual, expected)
expected = ds.isel(b=slice(3))
actual = ds.dropna("b", subset=["bar"])
assert_identical(actual, expected)
actual = ds.dropna("a", subset=[])
assert_identical(actual, ds)
actual = ds.dropna("a", subset=["bar"])
assert_identical(actual, ds)
actual = ds.dropna("a", how="all")
assert_identical(actual, ds)
actual = ds.dropna("b", how="all", subset=["bar"])
expected = ds.isel(b=[0, 1, 2])
assert_identical(actual, expected)
actual = ds.dropna("b", thresh=1, subset=["bar"])
assert_identical(actual, expected)
actual = ds.dropna("b", thresh=2)
assert_identical(actual, ds)
actual = ds.dropna("b", thresh=4)
expected = ds.isel(b=[1, 2, 3])
assert_identical(actual, expected)
actual = ds.dropna("a", thresh=3)
expected = ds.isel(a=[1, 3])
assert_identical(actual, ds)
with pytest.raises(
ValueError,
match=r"'foo' not found in data dimensions \('a', 'b'\)",
):
ds.dropna("foo")
with pytest.raises(ValueError, match=r"invalid how"):
ds.dropna("a", how="somehow") # type: ignore[arg-type]
with pytest.raises(TypeError, match=r"must specify how or thresh"):
ds.dropna("a", how=None) # type: ignore[arg-type]
def test_fillna(self) -> None:
ds = Dataset({"a": ("x", [np.nan, 1, np.nan, 3])}, {"x": [0, 1, 2, 3]})
# fill with -1
actual1 = ds.fillna(-1)
expected = Dataset({"a": ("x", [-1, 1, -1, 3])}, {"x": [0, 1, 2, 3]})
assert_identical(expected, actual1)
actual2 = ds.fillna({"a": -1})
assert_identical(expected, actual2)
other = Dataset({"a": -1})
actual3 = ds.fillna(other)
assert_identical(expected, actual3)
actual4 = ds.fillna({"a": other.a})
assert_identical(expected, actual4)
# fill with range(4)
b = DataArray(range(4), coords=[("x", range(4))])
actual5 = ds.fillna(b)
expected = b.rename("a").to_dataset()
assert_identical(expected, actual5)
actual6 = ds.fillna(expected)
assert_identical(expected, actual6)
actual7 = ds.fillna(np.arange(4))
assert_identical(expected, actual7)
actual8 = ds.fillna(b[:3])
assert_identical(expected, actual8)
# okay to only include some data variables
ds["b"] = np.nan
actual9 = ds.fillna({"a": -1})
expected = Dataset(
{"a": ("x", [-1, 1, -1, 3]), "b": np.nan}, {"x": [0, 1, 2, 3]}
)
assert_identical(expected, actual9)
# but new data variables is not okay
with pytest.raises(ValueError, match=r"must be contained"):
ds.fillna({"x": 0})
# empty argument should be OK
result1 = ds.fillna({})
assert_identical(ds, result1)
result2 = ds.fillna(Dataset(coords={"c": 42}))
expected = ds.assign_coords(c=42)
assert_identical(expected, result2)
da = DataArray(range(5), name="a", attrs={"attr": "da"})
actual10 = da.fillna(1)
assert actual10.name == "a"
assert actual10.attrs == da.attrs
ds = Dataset({"a": da}, attrs={"attr": "ds"})
actual11 = ds.fillna({"a": 1})
assert actual11.attrs == ds.attrs
assert actual11.a.name == "a"
assert actual11.a.attrs == ds.a.attrs
@pytest.mark.parametrize(
"func", [lambda x: x.clip(0, 1), lambda x: np.float64(1.0) * x, np.abs, abs]
)
def test_propagate_attrs(self, func) -> None:
da = DataArray(range(5), name="a", attrs={"attr": "da"})
ds = Dataset({"a": da}, attrs={"attr": "ds"})
# test defaults
assert func(ds).attrs == ds.attrs
with set_options(keep_attrs=False):
assert func(ds).attrs != ds.attrs
assert func(ds).a.attrs != ds.a.attrs
with set_options(keep_attrs=False):
assert func(ds).attrs != ds.attrs
assert func(ds).a.attrs != ds.a.attrs
with set_options(keep_attrs=True):
assert func(ds).attrs == ds.attrs
assert func(ds).a.attrs == ds.a.attrs
def test_where(self) -> None:
ds = Dataset({"a": ("x", range(5))})
expected1 = Dataset({"a": ("x", [np.nan, np.nan, 2, 3, 4])})
actual1 = ds.where(ds > 1)
assert_identical(expected1, actual1)
actual2 = ds.where(ds.a > 1)
assert_identical(expected1, actual2)
actual3 = ds.where(ds.a.values > 1)
assert_identical(expected1, actual3)
actual4 = ds.where(True)
assert_identical(ds, actual4)
expected5 = ds.copy(deep=True)
expected5["a"].values = np.array([np.nan] * 5)
actual5 = ds.where(False)
assert_identical(expected5, actual5)
# 2d
ds = Dataset({"a": (("x", "y"), [[0, 1], [2, 3]])})
expected6 = Dataset({"a": (("x", "y"), [[np.nan, 1], [2, 3]])})
actual6 = ds.where(ds > 0)
assert_identical(expected6, actual6)
# attrs
da = DataArray(range(5), name="a", attrs={"attr": "da"})
actual7 = da.where(da.values > 1)
assert actual7.name == "a"
assert actual7.attrs == da.attrs
ds = Dataset({"a": da}, attrs={"attr": "ds"})
actual8 = ds.where(ds > 0)
assert actual8.attrs == ds.attrs
assert actual8.a.name == "a"
assert actual8.a.attrs == ds.a.attrs
# lambda
ds = Dataset({"a": ("x", range(5))})
expected9 = Dataset({"a": ("x", [np.nan, np.nan, 2, 3, 4])})
actual9 = ds.where(lambda x: x > 1)
assert_identical(expected9, actual9)
def test_where_other(self) -> None:
ds = Dataset({"a": ("x", range(5))}, {"x": range(5)})
expected = Dataset({"a": ("x", [-1, -1, 2, 3, 4])}, {"x": range(5)})
actual = ds.where(ds > 1, -1)
assert_equal(expected, actual)
assert actual.a.dtype == int
actual = ds.where(lambda x: x > 1, -1)
assert_equal(expected, actual)
actual = ds.where(ds > 1, other=-1, drop=True)
expected_nodrop = ds.where(ds > 1, -1)
_, expected = xr.align(actual, expected_nodrop, join="left")
assert_equal(actual, expected)
assert actual.a.dtype == int
with pytest.raises(ValueError, match=r"cannot align .* are not equal"):
ds.where(ds > 1, ds.isel(x=slice(3)))
with pytest.raises(ValueError, match=r"exact match required"):
ds.where(ds > 1, ds.assign(b=2))
def test_where_drop(self) -> None:
# if drop=True
# 1d
# data array case
array = DataArray(range(5), coords=[range(5)], dims=["x"])
expected1 = DataArray(range(5)[2:], coords=[range(5)[2:]], dims=["x"])
actual1 = array.where(array > 1, drop=True)
assert_identical(expected1, actual1)
# dataset case
ds = Dataset({"a": array})
expected2 = Dataset({"a": expected1})
actual2 = ds.where(ds > 1, drop=True)
assert_identical(expected2, actual2)
actual3 = ds.where(ds.a > 1, drop=True)
assert_identical(expected2, actual3)
with pytest.raises(TypeError, match=r"must be a"):
ds.where(np.arange(5) > 1, drop=True)
# 1d with odd coordinates
array = DataArray(
np.array([2, 7, 1, 8, 3]), coords=[np.array([3, 1, 4, 5, 9])], dims=["x"]
)
expected4 = DataArray(
np.array([7, 8, 3]), coords=[np.array([1, 5, 9])], dims=["x"]
)
actual4 = array.where(array > 2, drop=True)
assert_identical(expected4, actual4)
# 1d multiple variables
ds = Dataset({"a": (("x"), [0, 1, 2, 3]), "b": (("x"), [4, 5, 6, 7])})
expected5 = Dataset(
{"a": (("x"), [np.nan, 1, 2, 3]), "b": (("x"), [4, 5, 6, np.nan])}
)
actual5 = ds.where((ds > 0) & (ds < 7), drop=True)
assert_identical(expected5, actual5)
# 2d
ds = Dataset({"a": (("x", "y"), [[0, 1], [2, 3]])})
expected6 = Dataset({"a": (("x", "y"), [[np.nan, 1], [2, 3]])})
actual6 = ds.where(ds > 0, drop=True)
assert_identical(expected6, actual6)
# 2d with odd coordinates
ds = Dataset(
{"a": (("x", "y"), [[0, 1], [2, 3]])},
coords={
"x": [4, 3],
"y": [1, 2],
"z": (["x", "y"], [[np.exp(1), np.pi], [np.pi * np.exp(1), np.pi * 3]]),
},
)
expected7 = Dataset(
{"a": (("x", "y"), [[3]])},
coords={"x": [3], "y": [2], "z": (["x", "y"], [[np.pi * 3]])},
)
actual7 = ds.where(ds > 2, drop=True)
assert_identical(expected7, actual7)
# 2d multiple variables
ds = Dataset(
{"a": (("x", "y"), [[0, 1], [2, 3]]), "b": (("x", "y"), [[4, 5], [6, 7]])}
)
expected8 = Dataset(
{
"a": (("x", "y"), [[np.nan, 1], [2, 3]]),
"b": (("x", "y"), [[4, 5], [6, 7]]),
}
)
actual8 = ds.where(ds > 0, drop=True)
assert_identical(expected8, actual8)
# mixed dimensions: PR#6690, Issue#6227
ds = xr.Dataset(
{
"a": ("x", [1, 2, 3]),
"b": ("y", [2, 3, 4]),
"c": (("x", "y"), np.arange(9).reshape((3, 3))),
}
)
expected9 = xr.Dataset(
{
"a": ("x", [np.nan, 3]),
"b": ("y", [np.nan, 3, 4]),
"c": (("x", "y"), np.arange(3.0, 9.0).reshape((2, 3))),
}
)
actual9 = ds.where(ds > 2, drop=True)
assert actual9.sizes["x"] == 2
assert_identical(expected9, actual9)
def test_where_drop_empty(self) -> None:
# regression test for GH1341
array = DataArray(np.random.rand(100, 10), dims=["nCells", "nVertLevels"])
mask = DataArray(np.zeros((100,), dtype="bool"), dims="nCells")
actual = array.where(mask, drop=True)
expected = DataArray(np.zeros((0, 10)), dims=["nCells", "nVertLevels"])
assert_identical(expected, actual)
def test_where_drop_no_indexes(self) -> None:
ds = Dataset({"foo": ("x", [0.0, 1.0])})
expected = Dataset({"foo": ("x", [1.0])})
actual = ds.where(ds == 1, drop=True)
assert_identical(expected, actual)
def test_reduce(self) -> None:
data = create_test_data()
assert len(data.mean().coords) == 0
actual = data.max()
expected = Dataset({k: v.max() for k, v in data.data_vars.items()})
assert_equal(expected, actual)
assert_equal(data.min(dim=["dim1"]), data.min(dim="dim1"))
for reduct, expected_dims in [
("dim2", ["dim3", "time", "dim1"]),
(["dim2", "time"], ["dim3", "dim1"]),
(("dim2", "time"), ["dim3", "dim1"]),
((), ["dim2", "dim3", "time", "dim1"]),
]:
actual_dims = list(data.min(dim=reduct).dims)
assert actual_dims == expected_dims
assert_equal(data.mean(dim=[]), data)
with pytest.raises(ValueError):
data.mean(axis=0)
def test_reduce_coords(self) -> None:
# regression test for GH1470
data = xr.Dataset({"a": ("x", [1, 2, 3])}, coords={"b": 4})
expected = xr.Dataset({"a": 2}, coords={"b": 4})
actual = data.mean("x")
assert_identical(actual, expected)
# should be consistent
actual = data["a"].mean("x").to_dataset()
assert_identical(actual, expected)
def test_mean_uint_dtype(self) -> None:
data = xr.Dataset(
{
"a": (("x", "y"), np.arange(6).reshape(3, 2).astype("uint")),
"b": (("x",), np.array([0.1, 0.2, np.nan])),
}
)
actual = data.mean("x", skipna=True)
expected = xr.Dataset(
{"a": data["a"].mean("x"), "b": data["b"].mean("x", skipna=True)}
)
assert_identical(actual, expected)
def test_reduce_bad_dim(self) -> None:
data = create_test_data()
with pytest.raises(
ValueError,
match=re.escape("Dimension(s) 'bad_dim' do not exist"),
):
data.mean(dim="bad_dim")
def test_reduce_cumsum(self) -> None:
data = xr.Dataset(
{"a": 1, "b": ("x", [1, 2]), "c": (("x", "y"), [[np.nan, 3], [0, 4]])}
)
assert_identical(data.fillna(0), data.cumsum("y"))
expected = xr.Dataset(
{"a": 1, "b": ("x", [1, 3]), "c": (("x", "y"), [[0, 3], [0, 7]])}
)
assert_identical(expected, data.cumsum())
@pytest.mark.parametrize(
"reduct, expected",
[
("dim1", ["dim2", "dim3", "time", "dim1"]),
("dim2", ["dim3", "time", "dim1", "dim2"]),
("dim3", ["dim2", "time", "dim1", "dim3"]),
("time", ["dim2", "dim3", "dim1"]),
],
)
@pytest.mark.parametrize("func", ["cumsum", "cumprod"])
def test_reduce_cumsum_test_dims(self, reduct, expected, func) -> None:
data = create_test_data()
with pytest.raises(
ValueError,
match=re.escape("Dimension(s) 'bad_dim' do not exist"),
):
getattr(data, func)(dim="bad_dim")
# ensure dimensions are correct
actual = getattr(data, func)(dim=reduct).dims
assert list(actual) == expected
def test_reduce_non_numeric(self) -> None:
data1 = create_test_data(seed=44, use_extension_array=True)
data2 = create_test_data(seed=44)
add_vars = {"var6": ["dim1", "dim2"], "var7": ["dim1"]}
for v, dims in sorted(add_vars.items()):
size = tuple(data1.sizes[d] for d in dims)
data = np.random.randint(0, 100, size=size).astype(np.str_)
data1[v] = (dims, data, {"foo": "variable"})
# var4 and var5 are extension arrays and should be dropped
assert (
"var4" not in data1.mean()
and "var5" not in data1.mean()
and "var6" not in data1.mean()
and "var7" not in data1.mean()
)
assert_equal(data1.mean(), data2.mean())
assert_equal(data1.mean(dim="dim1"), data2.mean(dim="dim1"))
assert "var6" not in data1.mean(dim="dim2") and "var7" in data1.mean(dim="dim2")
@pytest.mark.filterwarnings(
"ignore:Once the behaviour of DataArray:DeprecationWarning"
)
def test_reduce_strings(self) -> None:
expected = Dataset({"x": "a"})
ds = Dataset({"x": ("y", ["a", "b"])})
ds.coords["y"] = [-10, 10]
actual = ds.min()
assert_identical(expected, actual)
expected = Dataset({"x": "b"})
actual = ds.max()
assert_identical(expected, actual)
expected = Dataset({"x": 0})
actual = ds.argmin()
assert_identical(expected, actual)
expected = Dataset({"x": 1})
actual = ds.argmax()
assert_identical(expected, actual)
expected = Dataset({"x": -10})
actual = ds.idxmin()
assert_identical(expected, actual)
expected = Dataset({"x": 10})
actual = ds.idxmax()
assert_identical(expected, actual)
expected = Dataset({"x": b"a"})
ds = Dataset({"x": ("y", np.array(["a", "b"], "S1"))})
actual = ds.min()
assert_identical(expected, actual)
expected = Dataset({"x": "a"})
ds = Dataset({"x": ("y", np.array(["a", "b"], "U1"))})
actual = ds.min()
assert_identical(expected, actual)
def test_reduce_dtypes(self) -> None:
# regression test for GH342
expected = Dataset({"x": 1})
actual = Dataset({"x": True}).sum()
assert_identical(expected, actual)
# regression test for GH505
expected = Dataset({"x": 3})
actual = Dataset({"x": ("y", np.array([1, 2], "uint16"))}).sum()
assert_identical(expected, actual)
expected = Dataset({"x": 1 + 1j})
actual = Dataset({"x": ("y", [1, 1j])}).sum()
assert_identical(expected, actual)
def test_reduce_keep_attrs(self) -> None:
data = create_test_data()
_attrs = {"attr1": "value1", "attr2": 2929}
attrs = dict(_attrs)
data.attrs = attrs
# Test default behavior (keeps attrs for reduction operations)
ds = data.mean()
assert ds.attrs == attrs
for k, v in ds.data_vars.items():
assert v.attrs == data[k].attrs
# Test explicitly keeping attrs
ds = data.mean(keep_attrs=True)
assert ds.attrs == attrs
for k, v in ds.data_vars.items():
assert v.attrs == data[k].attrs
# Test explicitly dropping attrs
ds = data.mean(keep_attrs=False)
assert ds.attrs == {}
for v in ds.data_vars.values():
assert v.attrs == {}
@pytest.mark.filterwarnings(
"ignore:Once the behaviour of DataArray:DeprecationWarning"
)
def test_reduce_argmin(self) -> None:
# regression test for #205
ds = Dataset({"a": ("x", [0, 1])})
expected = Dataset({"a": ([], 0)})
actual = ds.argmin()
assert_identical(expected, actual)
actual = ds.argmin("x")
assert_identical(expected, actual)
def test_reduce_scalars(self) -> None:
ds = Dataset({"x": ("a", [2, 2]), "y": 2, "z": ("b", [2])})
expected = Dataset({"x": 0, "y": 0, "z": 0})
actual = ds.var()
assert_identical(expected, actual)
expected = Dataset({"x": 0, "y": 0, "z": ("b", [0])})
actual = ds.var("a")
assert_identical(expected, actual)
def test_reduce_only_one_axis(self) -> None:
def mean_only_one_axis(x, axis):
if not isinstance(axis, integer_types):
raise TypeError("non-integer axis")
return x.mean(axis)
ds = Dataset({"a": (["x", "y"], [[0, 1, 2, 3, 4]])})
expected = Dataset({"a": ("x", [2])})
actual = ds.reduce(mean_only_one_axis, "y")
assert_identical(expected, actual)
with pytest.raises(
TypeError, match=r"missing 1 required positional argument: 'axis'"
):
ds.reduce(mean_only_one_axis)
def test_reduce_no_axis(self) -> None:
def total_sum(x):
return np.sum(x.flatten())
ds = Dataset({"a": (["x", "y"], [[0, 1, 2, 3, 4]])})
expected = Dataset({"a": ((), 10)})
actual = ds.reduce(total_sum)
assert_identical(expected, actual)
with pytest.raises(TypeError, match=r"unexpected keyword argument 'axis'"):
ds.reduce(total_sum, dim="x")
def test_reduce_keepdims(self) -> None:
ds = Dataset(
{"a": (["x", "y"], [[0, 1, 2, 3, 4]])},
coords={
"y": [0, 1, 2, 3, 4],
"x": [0],
"lat": (["x", "y"], [[0, 1, 2, 3, 4]]),
"c": -999.0,
},
)
# Shape should match behaviour of numpy reductions with keepdims=True
# Coordinates involved in the reduction should be removed
actual = ds.mean(keepdims=True)
expected = Dataset(
{"a": (["x", "y"], np.mean(ds.a, keepdims=True).data)}, coords={"c": ds.c}
)
assert_identical(expected, actual)
actual = ds.mean("x", keepdims=True)
expected = Dataset(
{"a": (["x", "y"], np.mean(ds.a, axis=0, keepdims=True).data)},
coords={"y": ds.y, "c": ds.c},
)
assert_identical(expected, actual)
@pytest.mark.parametrize("compute_backend", ["numbagg", None], indirect=True)
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("q", [0.25, [0.50], [0.25, 0.75]])
def test_quantile(self, q, skipna, compute_backend) -> None:
ds = create_test_data(seed=123)
ds.var1.data[0, 0] = np.nan
for dim in [None, "dim1", ["dim1"]]:
ds_quantile = ds.quantile(q, dim=dim, skipna=skipna)
if is_scalar(q):
assert "quantile" not in ds_quantile.dims
else:
assert "quantile" in ds_quantile.dims
for var, dar in ds.data_vars.items():
assert var in ds_quantile
assert_identical(
ds_quantile[var], dar.quantile(q, dim=dim, skipna=skipna)
)
dim = ["dim1", "dim2"]
ds_quantile = ds.quantile(q, dim=dim, skipna=skipna)
assert "dim3" in ds_quantile.dims
assert all(d not in ds_quantile.dims for d in dim)
@pytest.mark.parametrize("compute_backend", ["numbagg", None], indirect=True)
@pytest.mark.parametrize("skipna", [True, False])
def test_quantile_skipna(self, skipna, compute_backend) -> None:
q = 0.1
dim = "time"
ds = Dataset({"a": ([dim], np.arange(0, 11))})
ds = ds.where(ds >= 1)
result = ds.quantile(q=q, dim=dim, skipna=skipna)
value = 1.9 if skipna else np.nan
expected = Dataset({"a": value}, coords={"quantile": q})
assert_identical(result, expected)
@pytest.mark.parametrize("method", ["midpoint", "lower"])
def test_quantile_method(self, method) -> None:
ds = create_test_data(seed=123)
q = [0.25, 0.5, 0.75]
result = ds.quantile(q, method=method)
assert_identical(result.var1, ds.var1.quantile(q, method=method))
assert_identical(result.var2, ds.var2.quantile(q, method=method))
assert_identical(result.var3, ds.var3.quantile(q, method=method))
@pytest.mark.filterwarnings(
"default:The `interpolation` argument to quantile was renamed to `method`:FutureWarning"
)
@pytest.mark.parametrize("method", ["midpoint", "lower"])
def test_quantile_interpolation_deprecated(self, method) -> None:
ds = create_test_data(seed=123)
q = [0.25, 0.5, 0.75]
with pytest.warns(
FutureWarning,
match="`interpolation` argument to quantile was renamed to `method`",
):
ds.quantile(q, interpolation=method)
with warnings.catch_warnings(record=True):
with pytest.raises(TypeError, match="interpolation and method keywords"):
ds.quantile(q, method=method, interpolation=method)
@requires_bottleneck
def test_rank(self) -> None:
ds = create_test_data(seed=1234)
# only ds.var3 depends on dim3
z = ds.rank("dim3")
assert ["var3"] == list(z.data_vars)
# same as dataarray version
x = z.var3
y = ds.var3.rank("dim3")
assert_equal(x, y)
# coordinates stick
assert list(z.coords) == list(ds.coords)
assert list(x.coords) == list(y.coords)
# invalid dim
with pytest.raises(
ValueError,
match=re.escape(
"Dimension 'invalid_dim' not found in data dimensions ('dim3', 'dim1')"
),
):
x.rank("invalid_dim")
def test_rank_use_bottleneck(self) -> None:
ds = Dataset({"a": ("x", [0, np.nan, 2]), "b": ("y", [4, 6, 3, 4])})
with xr.set_options(use_bottleneck=False):
with pytest.raises(RuntimeError):
ds.rank("x")
def test_count(self) -> None:
ds = Dataset({"x": ("a", [np.nan, 1]), "y": 0, "z": np.nan})
expected = Dataset({"x": 1, "y": 1, "z": 0})
actual = ds.count()
assert_identical(expected, actual)
def test_map(self) -> None:
data = create_test_data()
data.attrs["foo"] = "bar"
# data.map keeps all attrs by default
assert_identical(data.map(np.mean), data.mean())
expected = data.mean(keep_attrs=True)
actual = data.map(lambda x: x.mean(keep_attrs=True), keep_attrs=True)
assert_identical(expected, actual)
assert_identical(data.map(lambda x: x, keep_attrs=True), data.drop_vars("time"))
def scale(x, multiple=1):
return multiple * x
actual = data.map(scale, multiple=2)
assert_equal(actual["var1"], 2 * data["var1"])
assert_identical(actual["numbers"], data["numbers"])
actual = data.map(np.asarray)
expected = data.drop_vars("time") # time is not used on a data var
assert_equal(expected, actual)
def test_map_coords_attrs(self) -> None:
ds = xr.Dataset(
{
"a": (
["x", "y", "z"],
np.arange(24).reshape(3, 4, 2),
{"attr1": "value1"},
),
"b": ("y", np.arange(4), {"attr2": "value2"}),
},
coords={
"x": ("x", np.array([-1, 0, 1]), {"attr3": "value3"}),
"z": ("z", list("ab"), {"attr4": "value4"}),
},
)
def func(arr):
if "y" not in arr.dims:
return arr
# drop attrs from coords
return arr.mean(dim="y").drop_attrs()
expected = ds.mean(dim="y", keep_attrs=True)
actual = ds.map(func, keep_attrs=True)
assert_identical(actual, expected)
assert actual["x"].attrs
ds["x"].attrs["y"] = "x"
assert ds["x"].attrs != actual["x"].attrs
def test_map_non_dataarray_outputs(self) -> None:
# Test that map handles non-DataArray outputs by converting them
# Regression test for GH10835
ds = xr.Dataset({"foo": ("x", [1, 2, 3]), "bar": ("y", [4, 5])})
# Scalar output
result = ds.map(lambda x: 1)
expected = xr.Dataset({"foo": 1, "bar": 1})
assert_identical(result, expected)
# Numpy array output with same shape
result = ds.map(lambda x: x.values)
expected = ds.copy()
assert_identical(result, expected)
# Mixed: some return scalars, some return arrays
def mixed_func(x):
if "x" in x.dims:
return 42
return x
result = ds.map(mixed_func)
expected = xr.Dataset({"foo": 42, "bar": ("y", [4, 5])})
assert_identical(result, expected)
def test_apply_pending_deprecated_map(self) -> None:
data = create_test_data()
data.attrs["foo"] = "bar"
with pytest.warns(PendingDeprecationWarning):
# data.apply keeps all attrs by default
assert_identical(data.apply(np.mean), data.mean())
def make_example_math_dataset(self):
variables = {
"bar": ("x", np.arange(100, 400, 100)),
"foo": (("x", "y"), 1.0 * np.arange(12).reshape(3, 4)),
}
coords = {"abc": ("x", ["a", "b", "c"]), "y": 10 * np.arange(4)}
ds = Dataset(variables, coords)
ds["foo"][0, 0] = np.nan
return ds
def test_dataset_number_math(self) -> None:
ds = self.make_example_math_dataset()
assert_identical(ds, +ds)
assert_identical(ds, ds + 0)
assert_identical(ds, 0 + ds)
assert_identical(ds, ds + np.array(0))
assert_identical(ds, np.array(0) + ds)
actual = ds.copy(deep=True)
actual += 0
assert_identical(ds, actual)
# casting nan warns
@pytest.mark.filterwarnings("ignore:invalid value encountered in cast")
def test_unary_ops(self) -> None:
ds = self.make_example_math_dataset()
assert_identical(ds.map(abs), abs(ds))
assert_identical(ds.map(lambda x: x + 4), ds + 4)
for func in [
lambda x: x.isnull(),
lambda x: x.round(),
lambda x: x.astype(int),
]:
assert_identical(ds.map(func), func(ds))
assert_identical(ds.isnull(), ~ds.notnull())
# don't actually patch these methods in
with pytest.raises(AttributeError):
_ = ds.item
with pytest.raises(AttributeError):
_ = ds.searchsorted
def test_dataset_array_math(self) -> None:
ds = self.make_example_math_dataset()
expected = ds.map(lambda x: x - ds["foo"])
assert_identical(expected, ds - ds["foo"])
assert_identical(expected, -ds["foo"] + ds)
assert_identical(expected, ds - ds["foo"].variable)
assert_identical(expected, -ds["foo"].variable + ds)
actual = ds.copy(deep=True)
actual -= ds["foo"]
assert_identical(expected, actual)
expected = ds.map(lambda x: x + ds["bar"])
assert_identical(expected, ds + ds["bar"])
actual = ds.copy(deep=True)
actual += ds["bar"]
assert_identical(expected, actual)
expected = Dataset({"bar": ds["bar"] + np.arange(3)})
assert_identical(expected, ds[["bar"]] + np.arange(3))
assert_identical(expected, np.arange(3) + ds[["bar"]])
def test_dataset_dataset_math(self) -> None:
ds = self.make_example_math_dataset()
assert_identical(ds, ds + 0 * ds)
assert_identical(ds, ds + {"foo": 0, "bar": 0})
expected = ds.map(lambda x: 2 * x)
assert_identical(expected, 2 * ds)
assert_identical(expected, ds + ds)
assert_identical(expected, ds + ds.data_vars)
assert_identical(expected, ds + dict(ds.data_vars))
actual = ds.copy(deep=True)
expected_id = id(actual)
actual += ds
assert_identical(expected, actual)
assert expected_id == id(actual)
assert_identical(ds == ds, ds.notnull())
subsampled = ds.isel(y=slice(2))
expected = 2 * subsampled
assert_identical(expected, subsampled + ds)
assert_identical(expected, ds + subsampled)
def test_dataset_math_auto_align(self) -> None:
ds = self.make_example_math_dataset()
subset = ds.isel(y=[1, 3])
expected = 2 * subset
actual = ds + subset
assert_identical(expected, actual)
actual = ds.isel(y=slice(1)) + ds.isel(y=slice(1, None))
expected = 2 * ds.drop_sel(y=ds.y)
assert_equal(actual, expected)
actual = ds + ds[["bar"]]
expected = (2 * ds[["bar"]]).merge(ds.coords, compat="override")
assert_identical(expected, actual)
assert_identical(ds + Dataset(), ds.coords.to_dataset())
assert_identical(Dataset() + Dataset(), Dataset())
ds2 = Dataset(coords={"bar": 42})
assert_identical(ds + ds2, ds.coords.merge(ds2))
# maybe unary arithmetic with empty datasets should raise instead?
assert_identical(Dataset() + 1, Dataset())
actual = ds.copy(deep=True)
other = ds.isel(y=slice(2))
actual += other
expected = ds + other.reindex_like(ds)
assert_identical(expected, actual)
def test_dataset_math_errors(self) -> None:
ds = self.make_example_math_dataset()
with pytest.raises(TypeError):
ds["foo"] += ds
with pytest.raises(TypeError):
ds["foo"].variable += ds
with pytest.raises(ValueError, match=r"must have the same"):
ds += ds[["bar"]]
# verify we can rollback in-place operations if something goes wrong
# nb. inplace datetime64 math actually will work with an integer array
# but not floats thanks to numpy's inconsistent handling
other = DataArray(np.datetime64("2000-01-01"), coords={"c": 2})
actual = ds.copy(deep=True)
with pytest.raises(TypeError):
actual += other
assert_identical(actual, ds)
def test_dataset_transpose(self) -> None:
ds = Dataset(
{
"a": (("x", "y"), np.random.randn(3, 4)),
"b": (("y", "x"), np.random.randn(4, 3)),
},
coords={
"x": range(3),
"y": range(4),
"xy": (("x", "y"), np.random.randn(3, 4)),
},
)
actual = ds.transpose()
expected = Dataset(
{"a": (("y", "x"), ds.a.values.T), "b": (("x", "y"), ds.b.values.T)},
coords={
"x": ds.x.values,
"y": ds.y.values,
"xy": (("y", "x"), ds.xy.values.T),
},
)
assert_identical(expected, actual)
actual = ds.transpose(...)
expected = ds
assert_identical(expected, actual)
actual = ds.transpose("x", "y")
expected = ds.map(lambda x: x.transpose("x", "y", transpose_coords=True))
assert_identical(expected, actual)
ds = create_test_data()
actual = ds.transpose()
for k in ds.variables:
assert actual[k].dims[::-1] == ds[k].dims
new_order = ("dim2", "dim3", "dim1", "time")
actual = ds.transpose(*new_order)
for k in ds.variables:
expected_dims = tuple(d for d in new_order if d in ds[k].dims)
assert actual[k].dims == expected_dims
# same as above but with ellipsis
new_order = ("dim2", "dim3", "dim1", "time")
actual = ds.transpose("dim2", "dim3", ...)
for k in ds.variables:
expected_dims = tuple(d for d in new_order if d in ds[k].dims)
assert actual[k].dims == expected_dims
# test missing dimension, raise error
with pytest.raises(ValueError):
ds.transpose(..., "not_a_dim")
# test missing dimension, ignore error
actual = ds.transpose(..., "not_a_dim", missing_dims="ignore")
expected_ell = ds.transpose(...)
assert_identical(expected_ell, actual)
# test missing dimension, raise warning
with pytest.warns(UserWarning):
actual = ds.transpose(..., "not_a_dim", missing_dims="warn")
assert_identical(expected_ell, actual)
assert "T" not in dir(ds)
def test_dataset_ellipsis_transpose_different_ordered_vars(self) -> None:
# https://github.com/pydata/xarray/issues/1081#issuecomment-544350457
ds = Dataset(
dict(
a=(("w", "x", "y", "z"), np.ones((2, 3, 4, 5))),
b=(("x", "w", "y", "z"), np.zeros((3, 2, 4, 5))),
)
)
result = ds.transpose(..., "z", "y")
assert list(result["a"].dims) == list("wxzy")
assert list(result["b"].dims) == list("xwzy")
def test_dataset_retains_period_index_on_transpose(self) -> None:
ds = create_test_data()
ds["time"] = pd.period_range("2000-01-01", periods=20)
transposed = ds.transpose()
assert isinstance(transposed.time.to_index(), pd.PeriodIndex)
def test_dataset_diff_n1_simple(self) -> None:
ds = Dataset({"foo": ("x", [5, 5, 6, 6])})
actual = ds.diff("x")
expected = Dataset({"foo": ("x", [0, 1, 0])})
assert_equal(expected, actual)
def test_dataset_diff_n1_label(self) -> None:
ds = Dataset({"foo": ("x", [5, 5, 6, 6])}, {"x": [0, 1, 2, 3]})
actual = ds.diff("x", label="lower")
expected = Dataset({"foo": ("x", [0, 1, 0])}, {"x": [0, 1, 2]})
assert_equal(expected, actual)
actual = ds.diff("x", label="upper")
expected = Dataset({"foo": ("x", [0, 1, 0])}, {"x": [1, 2, 3]})
assert_equal(expected, actual)
def test_dataset_diff_n1(self) -> None:
ds = create_test_data(seed=1)
actual = ds.diff("dim2")
expected_dict = {}
expected_dict["var1"] = DataArray(
np.diff(ds["var1"].values, axis=1),
{"dim2": ds["dim2"].values[1:]},
["dim1", "dim2"],
)
expected_dict["var2"] = DataArray(
np.diff(ds["var2"].values, axis=1),
{"dim2": ds["dim2"].values[1:]},
["dim1", "dim2"],
)
expected_dict["var3"] = ds["var3"]
expected = Dataset(expected_dict, coords={"time": ds["time"].values})
expected.coords["numbers"] = ("dim3", ds["numbers"].values)
assert_equal(expected, actual)
def test_dataset_diff_n2(self) -> None:
ds = create_test_data(seed=1)
actual = ds.diff("dim2", n=2)
expected_dict = {}
expected_dict["var1"] = DataArray(
np.diff(ds["var1"].values, axis=1, n=2),
{"dim2": ds["dim2"].values[2:]},
["dim1", "dim2"],
)
expected_dict["var2"] = DataArray(
np.diff(ds["var2"].values, axis=1, n=2),
{"dim2": ds["dim2"].values[2:]},
["dim1", "dim2"],
)
expected_dict["var3"] = ds["var3"]
expected = Dataset(expected_dict, coords={"time": ds["time"].values})
expected.coords["numbers"] = ("dim3", ds["numbers"].values)
assert_equal(expected, actual)
def test_dataset_diff_exception_n_neg(self) -> None:
ds = create_test_data(seed=1)
with pytest.raises(ValueError, match=r"must be non-negative"):
ds.diff("dim2", n=-1)
def test_dataset_diff_exception_label_str(self) -> None:
ds = create_test_data(seed=1)
with pytest.raises(ValueError, match=r"'label' argument has to"):
ds.diff("dim2", label="raise_me") # type: ignore[arg-type]
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"foo": -10}])
def test_shift(self, fill_value) -> None:
coords = {"bar": ("x", list("abc")), "x": [-4, 3, 2]}
attrs = {"meta": "data"}
ds = Dataset({"foo": ("x", [1, 2, 3])}, coords, attrs)
actual = ds.shift(x=1, fill_value=fill_value)
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value = np.nan
elif isinstance(fill_value, dict):
fill_value = fill_value.get("foo", np.nan)
expected = Dataset({"foo": ("x", [fill_value, 1, 2])}, coords, attrs)
assert_identical(expected, actual)
with pytest.raises(ValueError, match=r"dimensions"):
ds.shift(foo=123)
def test_roll_coords(self) -> None:
coords = {"bar": ("x", list("abc")), "x": [-4, 3, 2]}
attrs = {"meta": "data"}
ds = Dataset({"foo": ("x", [1, 2, 3])}, coords, attrs)
actual = ds.roll(x=1, roll_coords=True)
ex_coords = {"bar": ("x", list("cab")), "x": [2, -4, 3]}
expected = Dataset({"foo": ("x", [3, 1, 2])}, ex_coords, attrs)
assert_identical(expected, actual)
with pytest.raises(ValueError, match=r"dimensions"):
ds.roll(foo=123, roll_coords=True)
def test_roll_no_coords(self) -> None:
coords = {"bar": ("x", list("abc")), "x": [-4, 3, 2]}
attrs = {"meta": "data"}
ds = Dataset({"foo": ("x", [1, 2, 3])}, coords, attrs)
actual = ds.roll(x=1)
expected = Dataset({"foo": ("x", [3, 1, 2])}, coords, attrs)
assert_identical(expected, actual)
with pytest.raises(ValueError, match=r"dimensions"):
ds.roll(abc=321)
def test_roll_multidim(self) -> None:
# regression test for 2445
arr = xr.DataArray(
[[1, 2, 3], [4, 5, 6]],
coords={"x": range(3), "y": range(2)},
dims=("y", "x"),
)
actual = arr.roll(x=1, roll_coords=True)
expected = xr.DataArray(
[[3, 1, 2], [6, 4, 5]], coords=[("y", [0, 1]), ("x", [2, 0, 1])]
)
assert_identical(expected, actual)
def test_real_and_imag(self) -> None:
attrs = {"foo": "bar"}
ds = Dataset({"x": ((), 1 + 2j, attrs)}, attrs=attrs)
expected_re = Dataset({"x": ((), 1, attrs)}, attrs=attrs)
assert_identical(ds.real, expected_re)
expected_im = Dataset({"x": ((), 2, attrs)}, attrs=attrs)
assert_identical(ds.imag, expected_im)
def test_setattr_raises(self) -> None:
ds = Dataset({}, coords={"scalar": 1}, attrs={"foo": "bar"})
with pytest.raises(AttributeError, match=r"cannot set attr"):
ds.scalar = 2
with pytest.raises(AttributeError, match=r"cannot set attr"):
ds.foo = 2
with pytest.raises(AttributeError, match=r"cannot set attr"):
ds.other = 2
def test_filter_by_attrs(self) -> None:
precip = dict(standard_name="convective_precipitation_flux")
temp0 = dict(standard_name="air_potential_temperature", height="0 m")
temp10 = dict(standard_name="air_potential_temperature", height="10 m")
ds = Dataset(
{
"temperature_0": (["t"], [0], temp0),
"temperature_10": (["t"], [0], temp10),
"precipitation": (["t"], [0], precip),
},
coords={"time": (["t"], [0], dict(axis="T", long_name="time_in_seconds"))},
)
# Test return empty Dataset.
ds.filter_by_attrs(standard_name="invalid_standard_name")
new_ds = ds.filter_by_attrs(standard_name="invalid_standard_name")
assert not bool(new_ds.data_vars)
# Test return one DataArray.
new_ds = ds.filter_by_attrs(standard_name="convective_precipitation_flux")
assert new_ds["precipitation"].standard_name == "convective_precipitation_flux"
assert_equal(new_ds["precipitation"], ds["precipitation"])
# Test filter coordinates
new_ds = ds.filter_by_attrs(long_name="time_in_seconds")
assert new_ds["time"].long_name == "time_in_seconds"
assert not bool(new_ds.data_vars)
# Test return more than one DataArray.
new_ds = ds.filter_by_attrs(standard_name="air_potential_temperature")
assert len(new_ds.data_vars) == 2
for var in new_ds.data_vars:
assert new_ds[var].standard_name == "air_potential_temperature"
# Test callable.
new_ds = ds.filter_by_attrs(height=lambda v: v is not None)
assert len(new_ds.data_vars) == 2
for var in new_ds.data_vars:
assert new_ds[var].standard_name == "air_potential_temperature"
new_ds = ds.filter_by_attrs(height="10 m")
assert len(new_ds.data_vars) == 1
for var in new_ds.data_vars:
assert new_ds[var].height == "10 m"
# Test return empty Dataset due to conflicting filters
new_ds = ds.filter_by_attrs(
standard_name="convective_precipitation_flux", height="0 m"
)
assert not bool(new_ds.data_vars)
# Test return one DataArray with two filter conditions
new_ds = ds.filter_by_attrs(
standard_name="air_potential_temperature", height="0 m"
)
for var in new_ds.data_vars:
assert new_ds[var].standard_name == "air_potential_temperature"
assert new_ds[var].height == "0 m"
assert new_ds[var].height != "10 m"
# Test return empty Dataset due to conflicting callables
new_ds = ds.filter_by_attrs(
standard_name=lambda v: False, height=lambda v: True
)
assert not bool(new_ds.data_vars)
def test_binary_op_propagate_indexes(self) -> None:
ds = Dataset(
{"d1": DataArray([1, 2, 3], dims=["x"], coords={"x": [10, 20, 30]})}
)
expected = ds.xindexes["x"]
actual = (ds * 2).xindexes["x"]
assert expected is actual
def test_binary_op_join_setting(self) -> None:
# arithmetic_join applies to data array coordinates
missing_2 = xr.Dataset({"x": [0, 1]})
missing_0 = xr.Dataset({"x": [1, 2]})
with xr.set_options(arithmetic_join="outer"):
actual = missing_2 + missing_0
expected = xr.Dataset({"x": [0, 1, 2]})
assert_equal(actual, expected)
# arithmetic join also applies to data_vars
ds1 = xr.Dataset({"foo": 1, "bar": 2})
ds2 = xr.Dataset({"bar": 2, "baz": 3})
expected = xr.Dataset({"bar": 4}) # default is inner joining
actual = ds1 + ds2
assert_equal(actual, expected)
with xr.set_options(arithmetic_join="outer"):
expected = xr.Dataset({"foo": np.nan, "bar": 4, "baz": np.nan})
actual = ds1 + ds2
assert_equal(actual, expected)
with xr.set_options(arithmetic_join="left"):
expected = xr.Dataset({"foo": np.nan, "bar": 4})
actual = ds1 + ds2
assert_equal(actual, expected)
with xr.set_options(arithmetic_join="right"):
expected = xr.Dataset({"bar": 4, "baz": np.nan})
actual = ds1 + ds2
assert_equal(actual, expected)
@pytest.mark.parametrize(
["keep_attrs", "expected"],
(
pytest.param(False, {}, id="False"),
pytest.param(
True, {"foo": "a", "bar": "b", "baz": "c"}, id="True"
), # drop_conflicts combines non-conflicting attrs
),
)
def test_binary_ops_keep_attrs(self, keep_attrs, expected) -> None:
ds1 = xr.Dataset({"a": 1}, attrs={"foo": "a", "bar": "b"})
ds2 = xr.Dataset({"a": 1}, attrs={"foo": "a", "baz": "c"})
with xr.set_options(keep_attrs=keep_attrs):
ds_result = ds1 + ds2
assert ds_result.attrs == expected
def test_binary_ops_attrs_drop_conflicts(self) -> None:
# Test that binary operations combine attrs with drop_conflicts behavior
attrs1 = {"units": "meters", "long_name": "distance", "source": "sensor_a"}
attrs2 = {"units": "feet", "resolution": "high", "source": "sensor_b"}
ds1 = xr.Dataset({"a": 1}, attrs=attrs1)
ds2 = xr.Dataset({"a": 2}, attrs=attrs2)
# With keep_attrs=True (default), should combine attrs dropping conflicts
result = ds1 + ds2
# "units" and "source" conflict, so they're dropped
# "long_name" only in ds1, "resolution" only in ds2, so they're kept
assert result.attrs == {"long_name": "distance", "resolution": "high"}
# Test with identical values for some attrs
attrs3 = {"units": "meters", "type": "data", "source": "sensor_c"}
ds3 = xr.Dataset({"a": 3}, attrs=attrs3)
result2 = ds1 + ds3
# "units" has same value, so kept; "source" conflicts, so dropped
# "long_name" from ds1, "type" from ds3
assert result2.attrs == {
"units": "meters",
"long_name": "distance",
"type": "data",
}
# With keep_attrs=False, attrs should be empty
with xr.set_options(keep_attrs=False):
result3 = ds1 + ds2
assert result3.attrs == {}
def test_full_like(self) -> None:
# For more thorough tests, see test_variable.py
# Note: testing data_vars with mismatched dtypes
ds = Dataset(
{
"d1": DataArray([1, 2, 3], dims=["x"], coords={"x": [10, 20, 30]}),
"d2": DataArray([1.1, 2.2, 3.3], dims=["y"]),
},
attrs={"foo": "bar"},
)
actual = full_like(ds, 2)
expected = ds.copy(deep=True)
# https://github.com/python/mypy/issues/3004
expected["d1"].values = [2, 2, 2] # type: ignore[assignment,unused-ignore]
expected["d2"].values = [2.0, 2.0, 2.0] # type: ignore[assignment,unused-ignore]
assert expected["d1"].dtype == int
assert expected["d2"].dtype == float
assert_identical(expected, actual)
# override dtype
actual = full_like(ds, fill_value=True, dtype=bool)
expected = ds.copy(deep=True)
expected["d1"].values = [True, True, True] # type: ignore[assignment,unused-ignore]
expected["d2"].values = [True, True, True] # type: ignore[assignment,unused-ignore]
assert expected["d1"].dtype == bool
assert expected["d2"].dtype == bool
assert_identical(expected, actual)
# with multiple fill values
actual = full_like(ds, {"d1": 1, "d2": 2.3})
expected = ds.assign(d1=("x", [1, 1, 1]), d2=("y", [2.3, 2.3, 2.3]))
assert expected["d1"].dtype == int
assert expected["d2"].dtype == float
assert_identical(expected, actual)
# override multiple dtypes
actual = full_like(ds, fill_value={"d1": 1, "d2": 2.3}, dtype={"d1": bool})
expected = ds.assign(d1=("x", [True, True, True]), d2=("y", [2.3, 2.3, 2.3]))
assert expected["d1"].dtype == bool
assert expected["d2"].dtype == float
assert_identical(expected, actual)
def test_combine_first(self) -> None:
dsx0 = DataArray([0, 0], [("x", ["a", "b"])]).to_dataset(name="dsx0")
dsx1 = DataArray([1, 1], [("x", ["b", "c"])]).to_dataset(name="dsx1")
actual = dsx0.combine_first(dsx1)
expected = Dataset(
{"dsx0": ("x", [0, 0, np.nan]), "dsx1": ("x", [np.nan, 1, 1])},
coords={"x": ["a", "b", "c"]},
)
assert_equal(actual, expected)
assert_equal(actual, xr.merge([dsx0, dsx1], join="outer"))
# works just like xr.merge([self, other])
dsy2 = DataArray([2, 2, 2], [("x", ["b", "c", "d"])]).to_dataset(name="dsy2")
actual = dsx0.combine_first(dsy2)
expected = xr.merge([dsy2, dsx0], join="outer")
assert_equal(actual, expected)
def test_sortby(self) -> None:
ds = Dataset(
{
"A": DataArray(
[[1, 2], [3, 4], [5, 6]], [("x", ["c", "b", "a"]), ("y", [1, 0])]
),
"B": DataArray([[5, 6], [7, 8], [9, 10]], dims=["x", "y"]),
}
)
sorted1d = Dataset(
{
"A": DataArray(
[[5, 6], [3, 4], [1, 2]], [("x", ["a", "b", "c"]), ("y", [1, 0])]
),
"B": DataArray([[9, 10], [7, 8], [5, 6]], dims=["x", "y"]),
}
)
sorted2d = Dataset(
{
"A": DataArray(
[[6, 5], [4, 3], [2, 1]], [("x", ["a", "b", "c"]), ("y", [0, 1])]
),
"B": DataArray([[10, 9], [8, 7], [6, 5]], dims=["x", "y"]),
}
)
expected = sorted1d
dax = DataArray([100, 99, 98], [("x", ["c", "b", "a"])])
actual = ds.sortby(dax)
assert_equal(actual, expected)
# test descending order sort
actual = ds.sortby(dax, ascending=False)
assert_equal(actual, ds)
# test alignment (fills in nan for 'c')
dax_short = DataArray([98, 97], [("x", ["b", "a"])])
actual = ds.sortby(dax_short)
assert_equal(actual, expected)
# test 1-D lexsort
# dax0 is sorted first to give indices of [1, 2, 0]
# and then dax1 would be used to move index 2 ahead of 1
dax0 = DataArray([100, 95, 95], [("x", ["c", "b", "a"])])
dax1 = DataArray([0, 1, 0], [("x", ["c", "b", "a"])])
actual = ds.sortby([dax0, dax1]) # lexsort underneath gives [2, 1, 0]
assert_equal(actual, expected)
expected = sorted2d
# test multi-dim sort by 1D dataarray values
day = DataArray([90, 80], [("y", [1, 0])])
actual = ds.sortby([day, dax])
assert_equal(actual, expected)
# test exception-raising
with pytest.raises(KeyError):
actual = ds.sortby("z")
with pytest.raises(ValueError) as excinfo:
actual = ds.sortby(ds["A"])
assert "DataArray is not 1-D" in str(excinfo.value)
expected = sorted1d
actual = ds.sortby("x")
assert_equal(actual, expected)
# test pandas.MultiIndex
indices = (("b", 1), ("b", 0), ("a", 1), ("a", 0))
midx = pd.MultiIndex.from_tuples(indices, names=["one", "two"])
ds_midx = Dataset(
{
"A": DataArray(
[[1, 2], [3, 4], [5, 6], [7, 8]], [("x", midx), ("y", [1, 0])]
),
"B": DataArray([[5, 6], [7, 8], [9, 10], [11, 12]], dims=["x", "y"]),
}
)
actual = ds_midx.sortby("x")
midx_reversed = pd.MultiIndex.from_tuples(
tuple(reversed(indices)), names=["one", "two"]
)
expected = Dataset(
{
"A": DataArray(
[[7, 8], [5, 6], [3, 4], [1, 2]],
[("x", midx_reversed), ("y", [1, 0])],
),
"B": DataArray([[11, 12], [9, 10], [7, 8], [5, 6]], dims=["x", "y"]),
}
)
assert_equal(actual, expected)
# multi-dim sort by coordinate objects
expected = sorted2d
actual = ds.sortby(["x", "y"])
assert_equal(actual, expected)
# test descending order sort
actual = ds.sortby(["x", "y"], ascending=False)
assert_equal(actual, ds)
def test_attribute_access(self) -> None:
ds = create_test_data(seed=1)
for key in ["var1", "var2", "var3", "time", "dim1", "dim2", "dim3", "numbers"]:
assert_equal(ds[key], getattr(ds, key))
assert key in dir(ds)
for key in ["dim3", "dim1", "numbers"]:
assert_equal(ds["var3"][key], getattr(ds.var3, key))
assert key in dir(ds["var3"])
# attrs
assert ds["var3"].attrs["foo"] == ds.var3.foo
assert "foo" in dir(ds["var3"])
def test_ipython_key_completion(self) -> None:
ds = create_test_data(seed=1)
actual = ds._ipython_key_completions_()
expected = ["var1", "var2", "var3", "time", "dim1", "dim2", "dim3", "numbers"]
for item in actual:
ds[item] # should not raise
assert sorted(actual) == sorted(expected)
# for dataarray
actual = ds["var3"]._ipython_key_completions_()
expected = ["dim3", "dim1", "numbers"]
for item in actual:
ds["var3"][item] # should not raise
assert sorted(actual) == sorted(expected)
# MultiIndex
ds_midx = ds.stack(dim12=["dim2", "dim3"])
actual = ds_midx._ipython_key_completions_()
expected = [
"var1",
"var2",
"var3",
"time",
"dim1",
"dim2",
"dim3",
"numbers",
"dim12",
]
for item in actual:
ds_midx[item] # should not raise
assert sorted(actual) == sorted(expected)
# coords
actual = ds.coords._ipython_key_completions_()
expected = ["time", "dim1", "dim2", "dim3", "numbers"]
for item in actual:
ds.coords[item] # should not raise
assert sorted(actual) == sorted(expected)
actual = ds["var3"].coords._ipython_key_completions_()
expected = ["dim1", "dim3", "numbers"]
for item in actual:
ds["var3"].coords[item] # should not raise
assert sorted(actual) == sorted(expected)
coords = Coordinates(ds.coords)
actual = coords._ipython_key_completions_()
expected = ["time", "dim2", "dim3", "numbers"]
for item in actual:
coords[item] # should not raise
assert sorted(actual) == sorted(expected)
# data_vars
actual = ds.data_vars._ipython_key_completions_()
expected = ["var1", "var2", "var3", "dim1"]
for item in actual:
ds.data_vars[item] # should not raise
assert sorted(actual) == sorted(expected)
def test_polyfit_output(self) -> None:
ds = create_test_data(seed=1)
out = ds.polyfit("dim2", 2, full=False)
assert "var1_polyfit_coefficients" in out
out = ds.polyfit("dim1", 2, full=True)
assert "var1_polyfit_coefficients" in out
assert "dim1_matrix_rank" in out
out = ds.polyfit("time", 2)
assert len(out.data_vars) == 0
def test_polyfit_weighted(self) -> None:
ds = create_test_data(seed=1)
ds = ds.broadcast_like(ds) # test more than 2 dimensions (issue #9972)
ds_copy = ds.copy(deep=True)
expected = ds.polyfit("dim2", 2)
actual = ds.polyfit("dim2", 2, w=np.ones(ds.sizes["dim2"]))
xr.testing.assert_identical(expected, actual)
# Make sure weighted polyfit does not change the original object (issue #5644)
xr.testing.assert_identical(ds, ds_copy)
def test_polyfit_coord(self) -> None:
# Make sure polyfit works when given a non-dimension coordinate.
ds = create_test_data(seed=1)
out = ds.polyfit("numbers", 2, full=False)
assert "var3_polyfit_coefficients" in out
assert "dim1" in out.dims
assert "dim2" not in out
assert "dim3" not in out
def test_polyfit_coord_output(self) -> None:
da = xr.DataArray(
[1, 3, 2], dims=["x"], coords=dict(x=["a", "b", "c"], y=("x", [0, 1, 2]))
)
out = da.polyfit("y", deg=1)["polyfit_coefficients"]
assert out.sel(degree=0).item() == pytest.approx(1.5)
assert out.sel(degree=1).item() == pytest.approx(0.5)
def test_polyfit_warnings(self) -> None:
ds = create_test_data(seed=1)
with warnings.catch_warnings(record=True) as ws:
ds.var1.polyfit("dim2", 10, full=False)
assert len(ws) == 1
assert ws[0].category == RankWarning
ds.var1.polyfit("dim2", 10, full=True)
assert len(ws) == 1
def test_polyfit_polyval(self) -> None:
da = xr.DataArray(
np.arange(1, 10).astype(np.float64), dims=["x"], coords=dict(x=np.arange(9))
)
out = da.polyfit("x", 3, full=False)
da_fitval = xr.polyval(da.x, out.polyfit_coefficients)
# polyval introduces very small errors (1e-16 here)
xr.testing.assert_allclose(da_fitval, da)
da = da.assign_coords(x=xr.date_range("2001-01-01", periods=9, freq="YS"))
out = da.polyfit("x", 3, full=False)
da_fitval = xr.polyval(da.x, out.polyfit_coefficients)
xr.testing.assert_allclose(da_fitval, da, rtol=1e-3)
@requires_cftime
def test_polyfit_polyval_cftime(self) -> None:
da = xr.DataArray(
np.arange(1, 10).astype(np.float64),
dims=["x"],
coords=dict(
x=xr.date_range("2001-01-01", periods=9, freq="YS", calendar="noleap")
),
)
out = da.polyfit("x", 3, full=False)
da_fitval = xr.polyval(da.x, out.polyfit_coefficients)
np.testing.assert_allclose(da_fitval, da)
@staticmethod
def _test_data_var_interior(
original_data_var, padded_data_var, padded_dim_name, expected_pad_values
):
np.testing.assert_equal(
np.unique(padded_data_var.isel({padded_dim_name: [0, -1]})),
expected_pad_values,
)
np.testing.assert_array_equal(
padded_data_var.isel({padded_dim_name: slice(1, -1)}), original_data_var
)
@pytest.mark.parametrize("padded_dim_name", ["dim1", "dim2", "dim3", "time"])
@pytest.mark.parametrize(
["constant_values"],
[
pytest.param(None, id="default"),
pytest.param(42, id="scalar"),
pytest.param((42, 43), id="tuple"),
pytest.param({"dim1": 42, "dim2": 43}, id="per dim scalar"),
pytest.param({"dim1": (42, 43), "dim2": (43, 44)}, id="per dim tuple"),
pytest.param({"var1": 42, "var2": (42, 43)}, id="per var"),
pytest.param({"var1": 42, "dim1": (42, 43)}, id="mixed"),
],
)
def test_pad(self, padded_dim_name, constant_values) -> None:
ds = create_test_data(seed=1)
padded = ds.pad({padded_dim_name: (1, 1)}, constant_values=constant_values)
# test padded dim values and size
for ds_dim_name, ds_dim in ds.sizes.items():
if ds_dim_name == padded_dim_name:
np.testing.assert_equal(padded.sizes[ds_dim_name], ds_dim + 2)
if ds_dim_name in padded.coords:
assert padded[ds_dim_name][[0, -1]].isnull().all()
else:
np.testing.assert_equal(padded.sizes[ds_dim_name], ds_dim)
# check if coord "numbers" with dimension dim3 is padded correctly
if padded_dim_name == "dim3":
assert padded["numbers"][[0, -1]].isnull().all()
# twarning: passes but dtype changes from int to float
np.testing.assert_array_equal(padded["numbers"][1:-1], ds["numbers"])
# test if data_vars are paded with correct values
for data_var_name, data_var in padded.data_vars.items():
if padded_dim_name in data_var.dims:
if utils.is_dict_like(constant_values):
if (
expected := constant_values.get(data_var_name, None)
) is not None or (
expected := constant_values.get(padded_dim_name, None)
) is not None:
self._test_data_var_interior(
ds[data_var_name], data_var, padded_dim_name, expected
)
else:
self._test_data_var_interior(
ds[data_var_name], data_var, padded_dim_name, 0
)
elif constant_values:
self._test_data_var_interior(
ds[data_var_name], data_var, padded_dim_name, constant_values
)
else:
self._test_data_var_interior(
ds[data_var_name], data_var, padded_dim_name, np.nan
)
else:
assert_array_equal(data_var, ds[data_var_name])
@pytest.mark.parametrize(
["keep_attrs", "attrs", "expected"],
[
pytest.param(None, {"a": 1, "b": 2}, {"a": 1, "b": 2}, id="default"),
pytest.param(False, {"a": 1, "b": 2}, {}, id="False"),
pytest.param(True, {"a": 1, "b": 2}, {"a": 1, "b": 2}, id="True"),
],
)
def test_pad_keep_attrs(self, keep_attrs, attrs, expected) -> None:
ds = xr.Dataset(
{"a": ("x", [1, 2], attrs), "b": ("y", [1, 2], attrs)},
coords={"c": ("x", [-1, 1], attrs), "d": ("y", [-1, 1], attrs)},
attrs=attrs,
)
expected = xr.Dataset(
{"a": ("x", [0, 1, 2, 0], expected), "b": ("y", [1, 2], attrs)},
coords={
"c": ("x", [np.nan, -1, 1, np.nan], expected),
"d": ("y", [-1, 1], attrs),
},
attrs=expected,
)
keep_attrs_ = "default" if keep_attrs is None else keep_attrs
with set_options(keep_attrs=keep_attrs_):
actual = ds.pad({"x": (1, 1)}, mode="constant", constant_values=0)
xr.testing.assert_identical(actual, expected)
actual = ds.pad(
{"x": (1, 1)}, mode="constant", constant_values=0, keep_attrs=keep_attrs
)
xr.testing.assert_identical(actual, expected)
def test_astype_attrs(self) -> None:
data = create_test_data(seed=123)
data.attrs["foo"] = "bar"
assert data.attrs == data.astype(float).attrs
assert data.var1.attrs == data.astype(float).var1.attrs
assert not data.astype(float, keep_attrs=False).attrs
assert not data.astype(float, keep_attrs=False).var1.attrs
@pytest.mark.parametrize("parser", ["pandas", "python"])
@pytest.mark.parametrize(
"engine", ["python", None, pytest.param("numexpr", marks=[requires_numexpr])]
)
@pytest.mark.parametrize(
"backend", ["numpy", pytest.param("dask", marks=[requires_dask])]
)
def test_query(self, backend, engine, parser) -> None:
"""Test querying a dataset."""
# setup test data
np.random.seed(42)
a = np.arange(0, 10, 1)
b = np.random.randint(0, 100, size=10)
c = np.linspace(0, 1, 20)
d = np.random.choice(["foo", "bar", "baz"], size=30, replace=True).astype(
object
)
e = np.arange(0, 10 * 20).reshape(10, 20)
f = np.random.normal(0, 1, size=(10, 20, 30))
if backend == "numpy":
ds = Dataset(
{
"a": ("x", a),
"b": ("x", b),
"c": ("y", c),
"d": ("z", d),
"e": (("x", "y"), e),
"f": (("x", "y", "z"), f),
},
coords={
"a2": ("x", a),
"b2": ("x", b),
"c2": ("y", c),
"d2": ("z", d),
"e2": (("x", "y"), e),
"f2": (("x", "y", "z"), f),
},
)
elif backend == "dask":
ds = Dataset(
{
"a": ("x", da.from_array(a, chunks=3)),
"b": ("x", da.from_array(b, chunks=3)),
"c": ("y", da.from_array(c, chunks=7)),
"d": ("z", da.from_array(d, chunks=12)),
"e": (("x", "y"), da.from_array(e, chunks=(3, 7))),
"f": (("x", "y", "z"), da.from_array(f, chunks=(3, 7, 12))),
},
coords={
"a2": ("x", a),
"b2": ("x", b),
"c2": ("y", c),
"d2": ("z", d),
"e2": (("x", "y"), e),
"f2": (("x", "y", "z"), f),
},
)
# query single dim, single variable
with raise_if_dask_computes():
actual = ds.query(x="a2 > 5", engine=engine, parser=parser)
expect = ds.isel(x=(a > 5))
assert_identical(expect, actual)
# query single dim, single variable, via dict
with raise_if_dask_computes():
actual = ds.query(dict(x="a2 > 5"), engine=engine, parser=parser)
expect = ds.isel(dict(x=(a > 5)))
assert_identical(expect, actual)
# query single dim, single variable
with raise_if_dask_computes():
actual = ds.query(x="b2 > 50", engine=engine, parser=parser)
expect = ds.isel(x=(b > 50))
assert_identical(expect, actual)
# query single dim, single variable
with raise_if_dask_computes():
actual = ds.query(y="c2 < .5", engine=engine, parser=parser)
expect = ds.isel(y=(c < 0.5))
assert_identical(expect, actual)
# query single dim, single string variable
if parser == "pandas":
# N.B., this query currently only works with the pandas parser
# xref https://github.com/pandas-dev/pandas/issues/40436
with raise_if_dask_computes():
actual = ds.query(z='d2 == "bar"', engine=engine, parser=parser)
expect = ds.isel(z=(d == "bar"))
assert_identical(expect, actual)
# query single dim, multiple variables
with raise_if_dask_computes():
actual = ds.query(x="(a2 > 5) & (b2 > 50)", engine=engine, parser=parser)
expect = ds.isel(x=((a > 5) & (b > 50)))
assert_identical(expect, actual)
# query single dim, multiple variables with computation
with raise_if_dask_computes():
actual = ds.query(x="(a2 * b2) > 250", engine=engine, parser=parser)
expect = ds.isel(x=(a * b) > 250)
assert_identical(expect, actual)
# check pandas query syntax is supported
if parser == "pandas":
with raise_if_dask_computes():
actual = ds.query(
x="(a2 > 5) and (b2 > 50)", engine=engine, parser=parser
)
expect = ds.isel(x=((a > 5) & (b > 50)))
assert_identical(expect, actual)
# query multiple dims via kwargs
with raise_if_dask_computes():
actual = ds.query(x="a2 > 5", y="c2 < .5", engine=engine, parser=parser)
expect = ds.isel(x=(a > 5), y=(c < 0.5))
assert_identical(expect, actual)
# query multiple dims via kwargs
if parser == "pandas":
with raise_if_dask_computes():
actual = ds.query(
x="a2 > 5",
y="c2 < .5",
z="d2 == 'bar'",
engine=engine,
parser=parser,
)
expect = ds.isel(x=(a > 5), y=(c < 0.5), z=(d == "bar"))
assert_identical(expect, actual)
# query multiple dims via dict
with raise_if_dask_computes():
actual = ds.query(
dict(x="a2 > 5", y="c2 < .5"), engine=engine, parser=parser
)
expect = ds.isel(dict(x=(a > 5), y=(c < 0.5)))
assert_identical(expect, actual)
# query multiple dims via dict
if parser == "pandas":
with raise_if_dask_computes():
actual = ds.query(
dict(x="a2 > 5", y="c2 < .5", z="d2 == 'bar'"),
engine=engine,
parser=parser,
)
expect = ds.isel(dict(x=(a > 5), y=(c < 0.5), z=(d == "bar")))
assert_identical(expect, actual)
# test error handling
with pytest.raises(ValueError):
ds.query("a > 5") # type: ignore[arg-type] # must be dict or kwargs
with pytest.raises(ValueError):
ds.query(x=(a > 5))
with pytest.raises(IndexError):
ds.query(y="a > 5") # wrong length dimension
with pytest.raises(IndexError):
ds.query(x="c < .5") # wrong length dimension
with pytest.raises(IndexError):
ds.query(x="e > 100") # wrong number of dimensions
with pytest.raises(UndefinedVariableError):
ds.query(x="spam > 50") # name not present
# pytest tests — new tests should go here, rather than in the class.
@pytest.mark.parametrize("parser", ["pandas", "python"])
def test_eval(ds, parser) -> None:
"""Currently much more minimal testing that `query` above, and much of the setup
isn't used. But the risks are fairly low — `query` shares much of the code, and
the method is currently experimental."""
actual = ds.eval("z1 + 5", parser=parser)
expect = ds["z1"] + 5
assert_identical(expect, actual)
# check pandas query syntax is supported
if parser == "pandas":
actual = ds.eval("(z1 > 5) and (z2 > 0)", parser=parser)
expect = (ds["z1"] > 5) & (ds["z2"] > 0)
assert_identical(expect, actual)
@pytest.mark.parametrize("test_elements", ([1, 2], np.array([1, 2]), DataArray([1, 2])))
def test_isin(test_elements, backend) -> None:
expected = Dataset(
data_vars={
"var1": (("dim1",), [0, 1]),
"var2": (("dim1",), [1, 1]),
"var3": (("dim1",), [0, 1]),
}
).astype("bool")
if backend == "dask":
expected = expected.chunk()
result = Dataset(
data_vars={
"var1": (("dim1",), [0, 1]),
"var2": (("dim1",), [1, 2]),
"var3": (("dim1",), [0, 1]),
}
).isin(test_elements)
assert_equal(result, expected)
def test_isin_dataset() -> None:
ds = Dataset({"x": [1, 2]})
with pytest.raises(TypeError):
ds.isin(ds)
@pytest.mark.parametrize(
"unaligned_coords",
(
{"x": [2, 1, 0]},
{"x": (["x"], np.asarray([2, 1, 0]))},
{"x": (["x"], np.asarray([1, 2, 0]))},
{"x": pd.Index([2, 1, 0])},
{"x": Variable(dims="x", data=[0, 2, 1])},
{"x": IndexVariable(dims="x", data=[0, 1, 2])},
{"y": 42},
{"y": ("x", [2, 1, 0])},
{"y": ("x", np.asarray([2, 1, 0]))},
{"y": (["x"], np.asarray([2, 1, 0]))},
),
)
@pytest.mark.parametrize("coords", ({"x": ("x", [0, 1, 2])}, {"x": [0, 1, 2]}))
def test_dataset_constructor_aligns_to_explicit_coords(
unaligned_coords, coords
) -> None:
a = xr.DataArray([1, 2, 3], dims=["x"], coords=unaligned_coords)
expected = xr.Dataset(coords=coords)
expected["a"] = a
result = xr.Dataset({"a": a}, coords=coords)
assert_equal(expected, result)
def test_error_message_on_set_supplied() -> None:
with pytest.raises(TypeError, match="has invalid type <class 'set'>"):
xr.Dataset(dict(date=[1, 2, 3], sec={4}))
@pytest.mark.parametrize("unaligned_coords", ({"y": ("b", np.asarray([2, 1, 0]))},))
def test_constructor_raises_with_invalid_coords(unaligned_coords) -> None:
with pytest.raises(ValueError, match="not a subset of the DataArray dimensions"):
xr.DataArray([1, 2, 3], dims=["x"], coords=unaligned_coords)
@pytest.mark.parametrize("ds", [3], indirect=True)
def test_dir_expected_attrs(ds) -> None:
some_expected_attrs = {"pipe", "mean", "isnull", "var1", "dim2", "numbers"}
result = dir(ds)
assert set(result) >= some_expected_attrs
def test_dir_non_string(ds) -> None:
# add a numbered key to ensure this doesn't break dir
ds[5] = "foo"
result = dir(ds)
assert 5 not in result
# GH2172
sample_data = np.random.uniform(size=[2, 2000, 10000])
x = xr.Dataset({"sample_data": (sample_data.shape, sample_data)})
x2 = x["sample_data"]
dir(x2)
def test_dir_unicode(ds) -> None:
ds["unicode"] = "uni"
result = dir(ds)
assert "unicode" in result
def test_raise_no_warning_for_nan_in_binary_ops() -> None:
with assert_no_warnings():
_ = Dataset(data_vars={"x": ("y", [1, 2, np.nan])}) > 0
@pytest.mark.filterwarnings("error")
@pytest.mark.parametrize("ds", (2,), indirect=True)
def test_raise_no_warning_assert_close(ds) -> None:
assert_allclose(ds, ds)
@pytest.mark.parametrize("dask", [True, False])
@pytest.mark.parametrize("edge_order", [1, 2])
def test_differentiate(dask, edge_order) -> None:
rs = np.random.default_rng(42)
coord = [0.2, 0.35, 0.4, 0.6, 0.7, 0.75, 0.76, 0.8]
da = xr.DataArray(
rs.random((8, 6)),
dims=["x", "y"],
coords={"x": coord, "z": 3, "x2d": (("x", "y"), rs.random((8, 6)))},
)
if dask and has_dask:
da = da.chunk({"x": 4})
ds = xr.Dataset({"var": da})
# along x
actual = da.differentiate("x", edge_order)
expected_x = xr.DataArray(
np.gradient(da, da["x"], axis=0, edge_order=edge_order),
dims=da.dims,
coords=da.coords,
)
assert_equal(expected_x, actual)
assert_equal(
ds["var"].differentiate("x", edge_order=edge_order),
ds.differentiate("x", edge_order=edge_order)["var"],
)
# coordinate should not change
assert_equal(da["x"], actual["x"])
# along y
actual = da.differentiate("y", edge_order)
expected_y = xr.DataArray(
np.gradient(da, da["y"], axis=1, edge_order=edge_order),
dims=da.dims,
coords=da.coords,
)
assert_equal(expected_y, actual)
assert_equal(actual, ds.differentiate("y", edge_order=edge_order)["var"])
assert_equal(
ds["var"].differentiate("y", edge_order=edge_order),
ds.differentiate("y", edge_order=edge_order)["var"],
)
with pytest.raises(ValueError):
da.differentiate("x2d")
@pytest.mark.parametrize("dask", [True, False])
def test_differentiate_datetime(dask) -> None:
rs = np.random.default_rng(42)
coord = np.array(
[
"2004-07-13",
"2006-01-13",
"2010-08-13",
"2010-09-13",
"2010-10-11",
"2010-12-13",
"2011-02-13",
"2012-08-13",
],
dtype="datetime64",
)
da = xr.DataArray(
rs.random((8, 6)),
dims=["x", "y"],
coords={"x": coord, "z": 3, "x2d": (("x", "y"), rs.random((8, 6)))},
)
if dask and has_dask:
da = da.chunk({"x": 4})
# along x
actual = da.differentiate("x", edge_order=1, datetime_unit="D")
expected_x = xr.DataArray(
np.gradient(
da, da["x"].variable._to_numeric(datetime_unit="D"), axis=0, edge_order=1
),
dims=da.dims,
coords=da.coords,
)
assert_equal(expected_x, actual)
actual2 = da.differentiate("x", edge_order=1, datetime_unit="h")
assert np.allclose(actual, actual2 * 24)
# for datetime variable
actual = da["x"].differentiate("x", edge_order=1, datetime_unit="D")
assert np.allclose(actual, 1.0)
# with different date unit
da = xr.DataArray(coord.astype("datetime64[ms]"), dims=["x"], coords={"x": coord})
actual = da.differentiate("x", edge_order=1)
assert np.allclose(actual, 1.0)
@requires_cftime
@pytest.mark.parametrize("dask", [True, False])
def test_differentiate_cftime(dask) -> None:
rs = np.random.default_rng(42)
coord = xr.date_range("2000", periods=8, freq="2ME", use_cftime=True)
da = xr.DataArray(
rs.random((8, 6)),
coords={"time": coord, "z": 3, "t2d": (("time", "y"), rs.random((8, 6)))},
dims=["time", "y"],
)
if dask and has_dask:
da = da.chunk({"time": 4})
actual = da.differentiate("time", edge_order=1, datetime_unit="D")
expected_data = np.gradient(
da, da["time"].variable._to_numeric(datetime_unit="D"), axis=0, edge_order=1
)
expected = xr.DataArray(expected_data, coords=da.coords, dims=da.dims)
assert_equal(expected, actual)
actual2 = da.differentiate("time", edge_order=1, datetime_unit="h")
assert_allclose(actual, actual2 * 24)
# Test the differentiation of datetimes themselves
actual = da["time"].differentiate("time", edge_order=1, datetime_unit="D")
assert_allclose(actual, xr.ones_like(da["time"]).astype(float))
@pytest.mark.parametrize("dask", [True, False])
def test_integrate(dask) -> None:
rs = np.random.default_rng(42)
coord = [0.2, 0.35, 0.4, 0.6, 0.7, 0.75, 0.76, 0.8]
da = xr.DataArray(
rs.random((8, 6)),
dims=["x", "y"],
coords={
"x": coord,
"x2": (("x",), rs.random(8)),
"z": 3,
"x2d": (("x", "y"), rs.random((8, 6))),
},
)
if dask and has_dask:
da = da.chunk({"x": 4})
ds = xr.Dataset({"var": da})
# along x
actual = da.integrate("x")
# coordinate that contains x should be dropped.
expected_x = xr.DataArray(
trapezoid(da.compute(), da["x"], axis=0),
dims=["y"],
coords={k: v for k, v in da.coords.items() if "x" not in v.dims},
)
assert_allclose(expected_x, actual.compute())
assert_equal(ds["var"].integrate("x"), ds.integrate("x")["var"])
# make sure result is also a dask array (if the source is dask array)
assert isinstance(actual.data, type(da.data))
# along y
actual = da.integrate("y")
expected_y = xr.DataArray(
trapezoid(da, da["y"], axis=1),
dims=["x"],
coords={k: v for k, v in da.coords.items() if "y" not in v.dims},
)
assert_allclose(expected_y, actual.compute())
assert_equal(actual, ds.integrate("y")["var"])
assert_equal(ds["var"].integrate("y"), ds.integrate("y")["var"])
# along x and y
actual = da.integrate(("y", "x"))
assert actual.ndim == 0
with pytest.raises(ValueError):
da.integrate("x2d")
@requires_scipy
@pytest.mark.parametrize("dask", [True, False])
def test_cumulative_integrate(dask) -> None:
rs = np.random.default_rng(43)
coord = [0.2, 0.35, 0.4, 0.6, 0.7, 0.75, 0.76, 0.8]
da = xr.DataArray(
rs.random((8, 6)),
dims=["x", "y"],
coords={
"x": coord,
"x2": (("x",), rs.random(8)),
"z": 3,
"x2d": (("x", "y"), rs.random((8, 6))),
},
)
if dask and has_dask:
da = da.chunk({"x": 4})
ds = xr.Dataset({"var": da})
# along x
actual = da.cumulative_integrate("x")
from scipy.integrate import cumulative_trapezoid
expected_x = xr.DataArray(
cumulative_trapezoid(da.compute(), da["x"], axis=0, initial=0.0), # type: ignore[call-overload,unused-ignore]
dims=["x", "y"],
coords=da.coords,
)
assert_allclose(expected_x, actual.compute())
assert_equal(
ds["var"].cumulative_integrate("x"),
ds.cumulative_integrate("x")["var"],
)
# make sure result is also a dask array (if the source is dask array)
assert isinstance(actual.data, type(da.data))
# along y
actual = da.cumulative_integrate("y")
expected_y = xr.DataArray(
cumulative_trapezoid(da, da["y"], axis=1, initial=0.0), # type: ignore[call-overload,unused-ignore]
dims=["x", "y"],
coords=da.coords,
)
assert_allclose(expected_y, actual.compute())
assert_equal(actual, ds.cumulative_integrate("y")["var"])
assert_equal(
ds["var"].cumulative_integrate("y"),
ds.cumulative_integrate("y")["var"],
)
# along x and y
actual = da.cumulative_integrate(("y", "x"))
assert actual.ndim == 2
with pytest.raises(ValueError):
da.cumulative_integrate("x2d")
@pytest.mark.parametrize("dask", [True, False])
@pytest.mark.parametrize("which_datetime", ["np", "cftime"])
def test_trapezoid_datetime(dask, which_datetime) -> None:
rs = np.random.default_rng(42)
coord: ArrayLike
if which_datetime == "np":
coord = np.array(
[
"2004-07-13",
"2006-01-13",
"2010-08-13",
"2010-09-13",
"2010-10-11",
"2010-12-13",
"2011-02-13",
"2012-08-13",
],
dtype="datetime64",
)
else:
if not has_cftime:
pytest.skip("Test requires cftime.")
coord = xr.date_range("2000", periods=8, freq="2D", use_cftime=True)
da = xr.DataArray(
rs.random((8, 6)),
coords={"time": coord, "z": 3, "t2d": (("time", "y"), rs.random((8, 6)))},
dims=["time", "y"],
)
if dask and has_dask:
da = da.chunk({"time": 4})
actual = da.integrate("time", datetime_unit="D")
expected_data = trapezoid(
da.compute().data,
duck_array_ops.datetime_to_numeric(da["time"].data, datetime_unit="D"),
axis=0,
)
expected = xr.DataArray(
expected_data,
dims=["y"],
coords={k: v for k, v in da.coords.items() if "time" not in v.dims},
)
assert_allclose(expected, actual.compute())
# make sure result is also a dask array (if the source is dask array)
assert isinstance(actual.data, type(da.data))
actual2 = da.integrate("time", datetime_unit="h")
assert_allclose(actual, actual2 / 24.0)
def test_no_dict() -> None:
d = Dataset()
with pytest.raises(AttributeError):
_ = d.__dict__
def test_subclass_slots() -> None:
"""Test that Dataset subclasses must explicitly define ``__slots__``.
.. note::
As of 0.13.0, this is actually mitigated into a FutureWarning for any class
defined outside of the xarray package.
"""
with pytest.raises(AttributeError) as e:
class MyDS(Dataset):
pass
assert str(e.value) == "MyDS must explicitly define __slots__"
def test_weakref() -> None:
"""Classes with __slots__ are incompatible with the weakref module unless they
explicitly state __weakref__ among their slots
"""
from weakref import ref
ds = Dataset()
r = ref(ds)
assert r() is ds
def test_deepcopy_obj_array() -> None:
x0 = Dataset(dict(foo=DataArray(np.array([object()]))))
x1 = deepcopy(x0)
assert x0["foo"].values[0] is not x1["foo"].values[0]
def test_deepcopy_recursive() -> None:
# GH:issue:7111
# direct recursion
ds = xr.Dataset({"a": (["x"], [1, 2])})
ds.attrs["other"] = ds
# TODO: cannot use assert_identical on recursive Vars yet...
# lets just ensure that deep copy works without RecursionError
ds.copy(deep=True)
# indirect recursion
ds2 = xr.Dataset({"b": (["y"], [3, 4])})
ds.attrs["other"] = ds2
ds2.attrs["other"] = ds
# TODO: cannot use assert_identical on recursive Vars yet...
# lets just ensure that deep copy works without RecursionError
ds.copy(deep=True)
ds2.copy(deep=True)
def test_clip(ds) -> None:
result = ds.clip(min=0.5)
assert all((result.min(...) >= 0.5).values())
result = ds.clip(max=0.5)
assert all((result.max(...) <= 0.5).values())
result = ds.clip(min=0.25, max=0.75)
assert all((result.min(...) >= 0.25).values())
assert all((result.max(...) <= 0.75).values())
result = ds.clip(min=ds.mean("y"), max=ds.mean("y"))
assert result.sizes == ds.sizes
| TestDataset |
python | langchain-ai__langchain | libs/core/tests/unit_tests/indexing/test_indexing.py | {
"start": 770,
"end": 80987
} | class ____(BaseLoader):
"""Toy loader that always returns the same documents."""
def __init__(self, documents: Sequence[Document]) -> None:
"""Initialize with the documents to return."""
self.documents = documents
def lazy_load(
self,
) -> Iterator[Document]:
yield from self.documents
async def alazy_load(
self,
) -> AsyncIterator[Document]:
for document in self.documents:
yield document
@pytest.fixture
def record_manager() -> InMemoryRecordManager:
"""Timestamped set fixture."""
record_manager = InMemoryRecordManager(namespace="hello")
record_manager.create_schema()
return record_manager
@pytest_asyncio.fixture
async def arecord_manager() -> InMemoryRecordManager:
"""Timestamped set fixture."""
record_manager = InMemoryRecordManager(namespace="hello")
await record_manager.acreate_schema()
return record_manager
@pytest.fixture
def vector_store() -> InMemoryVectorStore:
"""Vector store fixture."""
embeddings = DeterministicFakeEmbedding(size=5)
return InMemoryVectorStore(embeddings)
@pytest.fixture
def upserting_vector_store() -> InMemoryVectorStore:
"""Vector store fixture."""
embeddings = DeterministicFakeEmbedding(size=5)
return InMemoryVectorStore(embeddings)
def test_indexing_same_content(
record_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Indexing some content to confirm it gets added only once."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
),
Document(
page_content="This is another document.",
),
]
)
assert index(loader, record_manager, vector_store, key_encoder="sha256") == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
assert len(list(vector_store.store)) == 2
for _ in range(2):
# Run the indexing again
assert index(loader, record_manager, vector_store, key_encoder="sha256") == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
async def test_aindexing_same_content(
arecord_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Indexing some content to confirm it gets added only once."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
),
Document(
page_content="This is another document.",
),
]
)
assert await aindex(
loader,
arecord_manager,
vector_store,
key_encoder="sha256",
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
assert len(list(vector_store.store)) == 2
for _ in range(2):
# Run the indexing again
assert await aindex(
loader,
arecord_manager,
vector_store,
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
def test_index_simple_delete_full(
record_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Indexing some content to confirm it gets added only once."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
),
Document(
page_content="This is another document.",
),
]
)
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 1, tzinfo=timezone.utc).timestamp(),
):
assert index(
loader,
record_manager,
vector_store,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 1, tzinfo=timezone.utc).timestamp(),
):
assert index(
loader,
record_manager,
vector_store,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
loader = ToyLoader(
documents=[
Document(
page_content="mutated document 1",
),
Document(
page_content="This is another document.", # <-- Same as original
),
]
)
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 2, tzinfo=timezone.utc).timestamp(),
):
indexing_result = index(
loader,
record_manager,
vector_store,
cleanup="full",
key_encoder="sha256",
)
doc_texts = {
# Ignoring type since doc should be in the store and not a None
vector_store.get_by_ids([uid])[0].page_content
for uid in vector_store.store
}
assert doc_texts == {"mutated document 1", "This is another document."}
assert indexing_result == {
"num_added": 1,
"num_deleted": 1,
"num_skipped": 1,
"num_updated": 0,
}
# Attempt to index again verify that nothing changes
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 2, tzinfo=timezone.utc).timestamp(),
):
assert index(
loader,
record_manager,
vector_store,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
async def test_aindex_simple_delete_full(
arecord_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Indexing some content to confirm it gets added only once."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
),
Document(
page_content="This is another document.",
),
]
)
with patch.object(
arecord_manager,
"get_time",
return_value=datetime(2021, 1, 1, tzinfo=timezone.utc).timestamp(),
):
assert await aindex(
loader,
arecord_manager,
vector_store,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
with patch.object(
arecord_manager,
"get_time",
return_value=datetime(2021, 1, 1, tzinfo=timezone.utc).timestamp(),
):
assert await aindex(
loader,
arecord_manager,
vector_store,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
loader = ToyLoader(
documents=[
Document(
page_content="mutated document 1",
),
Document(
page_content="This is another document.", # <-- Same as original
),
]
)
with patch.object(
arecord_manager,
"get_time",
return_value=datetime(2021, 1, 2, tzinfo=timezone.utc).timestamp(),
):
assert await aindex(
loader,
arecord_manager,
vector_store,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 1,
"num_deleted": 1,
"num_skipped": 1,
"num_updated": 0,
}
doc_texts = {
# Ignoring type since doc should be in the store and not a None
vector_store.get_by_ids([uid])[0].page_content
for uid in vector_store.store
}
assert doc_texts == {"mutated document 1", "This is another document."}
# Attempt to index again verify that nothing changes
with patch.object(
arecord_manager,
"get_time",
return_value=datetime(2021, 1, 2, tzinfo=timezone.utc).timestamp(),
):
assert await aindex(
loader,
arecord_manager,
vector_store,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
def test_index_delete_full_recovery_after_deletion_failure(
record_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Indexing some content to confirm it gets added only once."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
),
Document(
page_content="This is another document.",
),
]
)
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 1, tzinfo=timezone.utc).timestamp(),
):
assert index(
loader,
record_manager,
vector_store,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
loader = ToyLoader(
documents=[
Document(
page_content="mutated document 1",
),
Document(
page_content="This is another document.", # <-- Same as original
),
]
)
with (
patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 2, tzinfo=timezone.utc).timestamp(),
),
patch.object(vector_store, "delete", return_value=False),
pytest.raises(IndexingException),
):
indexing_result = index(
loader,
record_manager,
vector_store,
cleanup="full",
key_encoder="sha256",
)
# At this point, there should be 3 records in both the record manager
# and the vector store
doc_texts = {
# Ignoring type since doc should be in the store and not a None
vector_store.get_by_ids([uid])[0].page_content
for uid in vector_store.store
}
assert doc_texts == {
"This is a test document.",
"mutated document 1",
"This is another document.",
}
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 3, tzinfo=timezone.utc).timestamp(),
):
indexing_result = index(
loader,
record_manager,
vector_store,
cleanup="full",
key_encoder="sha256",
)
doc_texts = {
# Ignoring type since doc should be in the store and not a None
vector_store.get_by_ids([uid])[0].page_content
for uid in vector_store.store
}
assert doc_texts == {"mutated document 1", "This is another document."}
assert indexing_result == {
"num_added": 0,
"num_deleted": 1,
"num_skipped": 2,
"num_updated": 0,
}
async def test_aindex_delete_full_recovery_after_deletion_failure(
arecord_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Indexing some content to confirm it gets added only once."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
),
Document(
page_content="This is another document.",
),
]
)
with patch.object(
arecord_manager,
"get_time",
return_value=datetime(2021, 1, 1, tzinfo=timezone.utc).timestamp(),
):
assert await aindex(
loader,
arecord_manager,
vector_store,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
loader = ToyLoader(
documents=[
Document(
page_content="mutated document 1",
),
Document(
page_content="This is another document.", # <-- Same as original
),
]
)
with (
patch.object(
arecord_manager,
"get_time",
return_value=datetime(2021, 1, 2, tzinfo=timezone.utc).timestamp(),
),
patch.object(vector_store, "adelete", return_value=False),
pytest.raises(IndexingException),
):
indexing_result = await aindex(
loader,
arecord_manager,
vector_store,
cleanup="full",
key_encoder="sha256",
)
# At this point, there should be 3 records in both the record manager
# and the vector store
doc_texts = {
# Ignoring type since doc should be in the store and not a None
vector_store.get_by_ids([uid])[0].page_content
for uid in vector_store.store
}
assert doc_texts == {
"This is a test document.",
"mutated document 1",
"This is another document.",
}
with patch.object(
arecord_manager,
"get_time",
return_value=datetime(2021, 1, 3, tzinfo=timezone.utc).timestamp(),
):
indexing_result = await aindex(
loader,
arecord_manager,
vector_store,
cleanup="full",
key_encoder="sha256",
)
doc_texts = {
# Ignoring type since doc should be in the store and not a None
vector_store.get_by_ids([uid])[0].page_content
for uid in vector_store.store
}
assert doc_texts == {"mutated document 1", "This is another document."}
assert indexing_result == {
"num_added": 0,
"num_deleted": 1,
"num_skipped": 2,
"num_updated": 0,
}
def test_incremental_fails_with_bad_source_ids(
record_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing with incremental deletion strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
Document(
page_content="This is yet another document.",
metadata={"source": None},
),
]
)
with pytest.raises(
ValueError,
match="Source id key is required when cleanup mode is "
"incremental or scoped_full",
):
# Should raise an error because no source id function was specified
index(
loader,
record_manager,
vector_store,
cleanup="incremental",
key_encoder="sha256",
)
with pytest.raises(
ValueError,
match="Source IDs are required when cleanup mode is incremental or scoped_full",
):
# Should raise an error because no source id function was specified
index(
loader,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
key_encoder="sha256",
)
async def test_aincremental_fails_with_bad_source_ids(
arecord_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing with incremental deletion strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
Document(
page_content="This is yet another document.",
metadata={"source": None},
),
]
)
with pytest.raises(
ValueError,
match="Source id key is required when cleanup mode "
"is incremental or scoped_full",
):
# Should raise an error because no source id function was specified
await aindex(
loader,
arecord_manager,
vector_store,
cleanup="incremental",
key_encoder="sha256",
)
with pytest.raises(
ValueError,
match="Source IDs are required when cleanup mode is incremental or scoped_full",
):
# Should raise an error because no source id function was specified
await aindex(
loader,
arecord_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
key_encoder="sha256",
)
def test_index_simple_delete_scoped_full(
record_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test Indexing with scoped_full strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "1"},
),
Document(
page_content="This is yet another document.",
metadata={"source": "1"},
),
Document(
page_content="This is a test document from another source.",
metadata={"source": "2"},
),
]
)
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 1, tzinfo=timezone.utc).timestamp(),
):
assert index(
loader,
record_manager,
vector_store,
cleanup="scoped_full",
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 4,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 2, tzinfo=timezone.utc).timestamp(),
):
assert index(
loader,
record_manager,
vector_store,
cleanup="scoped_full",
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 4,
"num_updated": 0,
}
loader = ToyLoader(
documents=[
Document(
page_content="mutated document 1",
metadata={"source": "1"},
),
Document(
page_content="This is another document.", # <-- Same as original
metadata={"source": "1"},
),
]
)
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 3, tzinfo=timezone.utc).timestamp(),
):
assert index(
loader,
record_manager,
vector_store,
cleanup="scoped_full",
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 1,
"num_deleted": 2,
"num_skipped": 1,
"num_updated": 0,
}
doc_texts = {
# Ignoring type since doc should be in the store and not a None
vector_store.get_by_ids([uid])[0].page_content
for uid in vector_store.store
}
assert doc_texts == {
"mutated document 1",
"This is another document.",
"This is a test document from another source.",
}
# Attempt to index again verify that nothing changes
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 4, tzinfo=timezone.utc).timestamp(),
):
assert index(
loader,
record_manager,
vector_store,
cleanup="scoped_full",
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
async def test_aindex_simple_delete_scoped_full(
arecord_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test Indexing with scoped_full strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "1"},
),
Document(
page_content="This is yet another document.",
metadata={"source": "1"},
),
Document(
page_content="This is a test document from another source.",
metadata={"source": "2"},
),
]
)
with patch.object(
arecord_manager,
"get_time",
return_value=datetime(2021, 1, 1, tzinfo=timezone.utc).timestamp(),
):
assert await aindex(
loader,
arecord_manager,
vector_store,
cleanup="scoped_full",
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 4,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
with patch.object(
arecord_manager,
"get_time",
return_value=datetime(2021, 1, 2, tzinfo=timezone.utc).timestamp(),
):
assert await aindex(
loader,
arecord_manager,
vector_store,
cleanup="scoped_full",
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 4,
"num_updated": 0,
}
loader = ToyLoader(
documents=[
Document(
page_content="mutated document 1",
metadata={"source": "1"},
),
Document(
page_content="This is another document.", # <-- Same as original
metadata={"source": "1"},
),
]
)
with patch.object(
arecord_manager,
"get_time",
return_value=datetime(2021, 1, 3, tzinfo=timezone.utc).timestamp(),
):
assert await aindex(
loader,
arecord_manager,
vector_store,
cleanup="scoped_full",
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 1,
"num_deleted": 2,
"num_skipped": 1,
"num_updated": 0,
}
doc_texts = {
# Ignoring type since doc should be in the store and not a None
vector_store.get_by_ids([uid])[0].page_content
for uid in vector_store.store
}
assert doc_texts == {
"mutated document 1",
"This is another document.",
"This is a test document from another source.",
}
# Attempt to index again verify that nothing changes
with patch.object(
arecord_manager,
"get_time",
return_value=datetime(2021, 1, 4, tzinfo=timezone.utc).timestamp(),
):
assert await aindex(
loader,
arecord_manager,
vector_store,
cleanup="scoped_full",
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
def test_scoped_full_fails_with_bad_source_ids(
record_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test Indexing with scoped_full strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
Document(
page_content="This is yet another document.",
metadata={"source": None},
),
]
)
with pytest.raises(
ValueError,
match="Source id key is required when cleanup mode "
"is incremental or scoped_full",
):
# Should raise an error because no source id function was specified
index(
loader,
record_manager,
vector_store,
cleanup="scoped_full",
key_encoder="sha256",
)
with pytest.raises(
ValueError,
match="Source IDs are required when cleanup mode is incremental or scoped_full",
):
# Should raise an error because no source id function was specified
index(
loader,
record_manager,
vector_store,
cleanup="scoped_full",
source_id_key="source",
key_encoder="sha256",
)
async def test_ascoped_full_fails_with_bad_source_ids(
arecord_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test Indexing with scoped_full strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
Document(
page_content="This is yet another document.",
metadata={"source": None},
),
]
)
with pytest.raises(
ValueError,
match="Source id key is required when cleanup mode "
"is incremental or scoped_full",
):
# Should raise an error because no source id function was specified
await aindex(
loader,
arecord_manager,
vector_store,
cleanup="scoped_full",
key_encoder="sha256",
)
with pytest.raises(
ValueError,
match="Source IDs are required when cleanup mode is incremental or scoped_full",
):
# Should raise an error because no source id function was specified
await aindex(
loader,
arecord_manager,
vector_store,
cleanup="scoped_full",
source_id_key="source",
key_encoder="sha256",
)
def test_index_empty_doc_scoped_full(
record_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test Indexing with scoped_full strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "1"},
),
Document(
page_content="This is yet another document.",
metadata={"source": "1"},
),
Document(
page_content="This is a test document from another source.",
metadata={"source": "2"},
),
]
)
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 1, tzinfo=timezone.utc).timestamp(),
):
assert index(
loader,
record_manager,
vector_store,
cleanup="scoped_full",
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 4,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 2, tzinfo=timezone.utc).timestamp(),
):
assert index(
loader,
record_manager,
vector_store,
cleanup="scoped_full",
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 4,
"num_updated": 0,
}
loader = ToyLoader(documents=[])
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 3, tzinfo=timezone.utc).timestamp(),
):
assert index(
loader,
record_manager,
vector_store,
cleanup="scoped_full",
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
async def test_aindex_empty_doc_scoped_full(
arecord_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test Indexing with scoped_full strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "1"},
),
Document(
page_content="This is yet another document.",
metadata={"source": "1"},
),
Document(
page_content="This is a test document from another source.",
metadata={"source": "2"},
),
]
)
with patch.object(
arecord_manager,
"get_time",
return_value=datetime(2021, 1, 1, tzinfo=timezone.utc).timestamp(),
):
assert await aindex(
loader,
arecord_manager,
vector_store,
cleanup="scoped_full",
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 4,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
with patch.object(
arecord_manager,
"get_time",
return_value=datetime(2021, 1, 2, tzinfo=timezone.utc).timestamp(),
):
assert await aindex(
loader,
arecord_manager,
vector_store,
cleanup="scoped_full",
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 4,
"num_updated": 0,
}
loader = ToyLoader(documents=[])
with patch.object(
arecord_manager,
"get_time",
return_value=datetime(2021, 1, 3, tzinfo=timezone.utc).timestamp(),
):
assert await aindex(
loader,
arecord_manager,
vector_store,
cleanup="scoped_full",
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
def test_no_delete(
record_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing without a deletion strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
]
)
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 2, tzinfo=timezone.utc).timestamp(),
):
assert index(
loader,
record_manager,
vector_store,
cleanup=None,
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
# If we add the same content twice it should be skipped
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 2, tzinfo=timezone.utc).timestamp(),
):
assert index(
loader,
record_manager,
vector_store,
cleanup=None,
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
loader = ToyLoader(
documents=[
Document(
page_content="mutated content",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
]
)
# Should result in no updates or deletions!
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 2, tzinfo=timezone.utc).timestamp(),
):
assert index(
loader,
record_manager,
vector_store,
cleanup=None,
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 1,
"num_deleted": 0,
"num_skipped": 1,
"num_updated": 0,
}
async def test_ano_delete(
arecord_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing without a deletion strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
]
)
with patch.object(
arecord_manager,
"get_time",
return_value=datetime(2021, 1, 2, tzinfo=timezone.utc).timestamp(),
):
assert await aindex(
loader,
arecord_manager,
vector_store,
cleanup=None,
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
# If we add the same content twice it should be skipped
with patch.object(
arecord_manager,
"get_time",
return_value=datetime(2021, 1, 2, tzinfo=timezone.utc).timestamp(),
):
assert await aindex(
loader,
arecord_manager,
vector_store,
cleanup=None,
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
loader = ToyLoader(
documents=[
Document(
page_content="mutated content",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
]
)
# Should result in no updates or deletions!
with patch.object(
arecord_manager,
"get_time",
return_value=datetime(2021, 1, 2, tzinfo=timezone.utc).timestamp(),
):
assert await aindex(
loader,
arecord_manager,
vector_store,
cleanup=None,
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 1,
"num_deleted": 0,
"num_skipped": 1,
"num_updated": 0,
}
def test_incremental_delete(
record_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing with incremental deletion strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
]
)
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 1, tzinfo=timezone.utc).timestamp(),
):
assert index(
loader,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
doc_texts = {
# Ignoring type since doc should be in the store and not a None
vector_store.get_by_ids([uid])[0].page_content
for uid in vector_store.store
}
assert doc_texts == {"This is another document.", "This is a test document."}
# Attempt to index again verify that nothing changes
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 2, tzinfo=timezone.utc).timestamp(),
):
assert index(
loader,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
# Create 2 documents from the same source all with mutated content
loader = ToyLoader(
documents=[
Document(
page_content="mutated document 1",
metadata={"source": "1"},
),
Document(
page_content="mutated document 2",
metadata={"source": "1"},
),
Document(
page_content="This is another document.", # <-- Same as original
metadata={"source": "2"},
),
]
)
# Attempt to index again verify that nothing changes
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 3, tzinfo=timezone.utc).timestamp(),
):
assert index(
loader,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 2,
"num_deleted": 1,
"num_skipped": 1,
"num_updated": 0,
}
doc_texts = {
# Ignoring type since doc should be in the store and not a None
vector_store.get_by_ids([uid])[0].page_content
for uid in vector_store.store
}
assert doc_texts == {
"mutated document 1",
"mutated document 2",
"This is another document.",
}
def test_incremental_delete_with_same_source(
record_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing with incremental deletion strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "1"},
),
]
)
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 1, tzinfo=timezone.utc).timestamp(),
):
assert index(
loader,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
doc_texts = {
# Ignoring type since doc should be in the store and not a None
vector_store.get_by_ids([uid])[0].page_content
for uid in vector_store.store
}
assert doc_texts == {"This is another document.", "This is a test document."}
# Delete 1 document and unchange 1 document
loader = ToyLoader(
documents=[
Document(
page_content="This is another document.", # <-- Same as original
metadata={"source": "1"},
),
]
)
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 2, tzinfo=timezone.utc).timestamp(),
):
assert index(
loader,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 1,
"num_skipped": 1,
"num_updated": 0,
}
doc_texts = {
# Ignoring type since doc should be in the store and not a None
vector_store.get_by_ids([uid])[0].page_content
for uid in vector_store.store
}
assert doc_texts == {
"This is another document.",
}
def test_incremental_indexing_with_batch_size(
record_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing with incremental indexing."""
loader = ToyLoader(
documents=[
Document(
page_content="1",
metadata={"source": "1"},
),
Document(
page_content="2",
metadata={"source": "1"},
),
Document(
page_content="3",
metadata={"source": "1"},
),
Document(
page_content="4",
metadata={"source": "1"},
),
]
)
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 1, tzinfo=timezone.utc).timestamp(),
):
assert index(
loader,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
batch_size=2,
key_encoder="sha256",
) == {
"num_added": 4,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
doc_texts = {
# Ignoring type since doc should be in the store and not a None
vector_store.get_by_ids([uid])[0].page_content
for uid in vector_store.store
}
assert doc_texts == {"1", "2", "3", "4"}
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 2, tzinfo=timezone.utc).timestamp(),
):
assert index(
loader,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
batch_size=2,
key_encoder="sha256",
) == {
"num_added": 2,
"num_deleted": 2,
"num_skipped": 2,
"num_updated": 0,
}
doc_texts = {
# Ignoring type since doc should be in the store and not a None
vector_store.get_by_ids([uid])[0].page_content
for uid in vector_store.store
}
assert doc_texts == {"1", "2", "3", "4"}
def test_incremental_delete_with_batch_size(
record_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing with incremental deletion strategy and batch size."""
loader = ToyLoader(
documents=[
Document(
page_content="1",
metadata={"source": "1"},
),
Document(
page_content="2",
metadata={"source": "2"},
),
Document(
page_content="3",
metadata={"source": "3"},
),
Document(
page_content="4",
metadata={"source": "4"},
),
]
)
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 1, tzinfo=timezone.utc).timestamp(),
):
assert index(
loader,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
batch_size=3,
key_encoder="sha256",
) == {
"num_added": 4,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
doc_texts = {
# Ignoring type since doc should be in the store and not a None
vector_store.get_by_ids([uid])[0].page_content
for uid in vector_store.store
}
assert doc_texts == {"1", "2", "3", "4"}
# Attempt to index again verify that nothing changes
with patch.object(
record_manager,
"get_time",
return_value=datetime(2021, 1, 2, tzinfo=timezone.utc).timestamp(),
):
assert index(
loader,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
batch_size=3,
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 4,
"num_updated": 0,
}
doc_texts = {
# Ignoring type since doc should be in the store and not a None
vector_store.get_by_ids([uid])[0].page_content
for uid in vector_store.store
}
assert doc_texts == {"1", "2", "3", "4"}
# Attempt to index again verify that nothing changes
with patch.object(
record_manager,
"get_time",
return_value=datetime(2022, 1, 3, tzinfo=timezone.utc).timestamp(),
):
# Docs with same content
docs = [
Document(
page_content="1",
metadata={"source": "1"},
),
Document(
page_content="2",
metadata={"source": "2"},
),
]
assert index(
docs,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
batch_size=1,
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
doc_texts = {
# Ignoring type since doc should be in the store and not a None
vector_store.get_by_ids([uid])[0].page_content
for uid in vector_store.store
}
assert doc_texts == {"1", "2", "3", "4"}
# Attempt to index again verify that nothing changes
with patch.object(
record_manager,
"get_time",
return_value=datetime(2023, 1, 4, tzinfo=timezone.utc).timestamp(),
):
# Docs with same content
docs = [
Document(
page_content="1",
metadata={"source": "1"},
),
Document(
page_content="2",
metadata={"source": "2"},
),
]
assert index(
docs,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
batch_size=1,
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
doc_texts = {
# Ignoring type since doc should be in the store and not a None
vector_store.get_by_ids([uid])[0].page_content
for uid in vector_store.store
}
assert doc_texts == {"1", "2", "3", "4"}
# Try to index with changed docs now
with patch.object(
record_manager,
"get_time",
return_value=datetime(2024, 1, 5, tzinfo=timezone.utc).timestamp(),
):
# Docs with same content
docs = [
Document(
page_content="changed 1",
metadata={"source": "1"},
),
Document(
page_content="changed 2",
metadata={"source": "2"},
),
]
assert index(
docs,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 2,
"num_deleted": 2,
"num_skipped": 0,
"num_updated": 0,
}
doc_texts = {
# Ignoring type since doc should be in the store and not a None
vector_store.get_by_ids([uid])[0].page_content
for uid in vector_store.store
}
assert doc_texts == {"changed 1", "changed 2", "3", "4"}
async def test_aincremental_delete(
arecord_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing with incremental deletion strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
]
)
with patch.object(
arecord_manager,
"get_time",
return_value=datetime(2021, 1, 2, tzinfo=timezone.utc).timestamp(),
):
assert await aindex(
loader.lazy_load(),
arecord_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
doc_texts = {
# Ignoring type since doc should be in the store and not a None
vector_store.get_by_ids([uid])[0].page_content
for uid in vector_store.store
}
assert doc_texts == {"This is another document.", "This is a test document."}
# Attempt to index again verify that nothing changes
with patch.object(
arecord_manager,
"get_time",
return_value=datetime(2021, 1, 2, tzinfo=timezone.utc).timestamp(),
):
assert await aindex(
loader.lazy_load(),
arecord_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
# Create 2 documents from the same source all with mutated content
loader = ToyLoader(
documents=[
Document(
page_content="mutated document 1",
metadata={"source": "1"},
),
Document(
page_content="mutated document 2",
metadata={"source": "1"},
),
Document(
page_content="This is another document.", # <-- Same as original
metadata={"source": "2"},
),
]
)
# Attempt to index again verify that nothing changes
with patch.object(
arecord_manager,
"get_time",
return_value=datetime(2021, 1, 3, tzinfo=timezone.utc).timestamp(),
):
assert await aindex(
loader.lazy_load(),
arecord_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
key_encoder="sha256",
) == {
"num_added": 2,
"num_deleted": 1,
"num_skipped": 1,
"num_updated": 0,
}
doc_texts = {
# Ignoring type since doc should be in the store and not a None
vector_store.get_by_ids([uid])[0].page_content
for uid in vector_store.store
}
assert doc_texts == {
"mutated document 1",
"mutated document 2",
"This is another document.",
}
def test_indexing_with_no_docs(
record_manager: InMemoryRecordManager, vector_store: VectorStore
) -> None:
"""Check edge case when loader returns no new docs."""
loader = ToyLoader(documents=[])
assert index(
loader,
record_manager,
vector_store,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
async def test_aindexing_with_no_docs(
arecord_manager: InMemoryRecordManager, vector_store: VectorStore
) -> None:
"""Check edge case when loader returns no new docs."""
loader = ToyLoader(documents=[])
assert await aindex(
loader,
arecord_manager,
vector_store,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
def test_deduplication(
record_manager: InMemoryRecordManager, vector_store: VectorStore
) -> None:
"""Check edge case when loader returns no new docs."""
docs = [
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
]
# Should result in only a single document being added
assert index(
docs,
record_manager,
vector_store,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 1,
"num_deleted": 0,
"num_skipped": 1,
"num_updated": 0,
}
async def test_adeduplication(
arecord_manager: InMemoryRecordManager, vector_store: VectorStore
) -> None:
"""Check edge case when loader returns no new docs."""
docs = [
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
]
# Should result in only a single document being added
assert await aindex(
docs,
arecord_manager,
vector_store,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 1,
"num_deleted": 0,
"num_skipped": 1,
"num_updated": 0,
}
def test_within_batch_deduplication_counting(
record_manager: InMemoryRecordManager, vector_store: VectorStore
) -> None:
"""Test that within-batch deduplicated documents are counted in num_skipped."""
# Create documents with within-batch duplicates
docs = [
Document(
page_content="Document A",
metadata={"source": "1"},
),
Document(
page_content="Document A", # Duplicate in same batch
metadata={"source": "1"},
),
Document(
page_content="Document B",
metadata={"source": "2"},
),
Document(
page_content="Document B", # Duplicate in same batch
metadata={"source": "2"},
),
Document(
page_content="Document C",
metadata={"source": "3"},
),
]
# Index with large batch size to ensure all docs are in one batch
result = index(
docs,
record_manager,
vector_store,
batch_size=10, # All docs in one batch
cleanup="full",
key_encoder="sha256",
)
# Should have 3 unique documents added
assert result["num_added"] == 3
# Should have 2 documents skipped due to within-batch deduplication
assert result["num_skipped"] == 2
# Total should match input
assert result["num_added"] + result["num_skipped"] == len(docs)
assert result["num_deleted"] == 0
assert result["num_updated"] == 0
# Verify the content
assert isinstance(vector_store, InMemoryVectorStore)
ids = list(vector_store.store.keys())
contents = sorted(
[document.page_content for document in vector_store.get_by_ids(ids)]
)
assert contents == ["Document A", "Document B", "Document C"]
async def test_awithin_batch_deduplication_counting(
arecord_manager: InMemoryRecordManager, vector_store: VectorStore
) -> None:
"""Test that within-batch deduplicated documents are counted in num_skipped."""
# Create documents with within-batch duplicates
docs = [
Document(
page_content="Document A",
metadata={"source": "1"},
),
Document(
page_content="Document A", # Duplicate in same batch
metadata={"source": "1"},
),
Document(
page_content="Document B",
metadata={"source": "2"},
),
Document(
page_content="Document B", # Duplicate in same batch
metadata={"source": "2"},
),
Document(
page_content="Document C",
metadata={"source": "3"},
),
]
# Index with large batch size to ensure all docs are in one batch
result = await aindex(
docs,
arecord_manager,
vector_store,
batch_size=10, # All docs in one batch
cleanup="full",
key_encoder="sha256",
)
# Should have 3 unique documents added
assert result["num_added"] == 3
# Should have 2 documents skipped due to within-batch deduplication
assert result["num_skipped"] == 2
# Total should match input
assert result["num_added"] + result["num_skipped"] == len(docs)
assert result["num_deleted"] == 0
assert result["num_updated"] == 0
# Verify the content
assert isinstance(vector_store, InMemoryVectorStore)
ids = list(vector_store.store.keys())
contents = sorted(
[document.page_content for document in vector_store.get_by_ids(ids)]
)
assert contents == ["Document A", "Document B", "Document C"]
def test_full_cleanup_with_different_batchsize(
record_manager: InMemoryRecordManager, vector_store: VectorStore
) -> None:
"""Check that we can clean up with different batch size."""
docs = [
Document(
page_content="This is a test document.",
metadata={"source": str(d)},
)
for d in range(1000)
]
assert index(
docs,
record_manager,
vector_store,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 1000,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
docs = [
Document(
page_content="Different doc",
metadata={"source": str(d)},
)
for d in range(1001)
]
assert index(
docs,
record_manager,
vector_store,
cleanup="full",
cleanup_batch_size=17,
key_encoder="sha256",
) == {
"num_added": 1001,
"num_deleted": 1000,
"num_skipped": 0,
"num_updated": 0,
}
def test_incremental_cleanup_with_different_batchsize(
record_manager: InMemoryRecordManager, vector_store: VectorStore
) -> None:
"""Check that we can clean up with different batch size."""
docs = [
Document(
page_content="This is a test document.",
metadata={"source": str(d)},
)
for d in range(1000)
]
assert index(
docs,
record_manager,
vector_store,
source_id_key="source",
cleanup="incremental",
key_encoder="sha256",
) == {
"num_added": 1000,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
docs = [
Document(
page_content="Different doc",
metadata={"source": str(d)},
)
for d in range(1001)
]
assert index(
docs,
record_manager,
vector_store,
source_id_key="source",
cleanup="incremental",
cleanup_batch_size=17,
key_encoder="sha256",
) == {
"num_added": 1001,
"num_deleted": 1000,
"num_skipped": 0,
"num_updated": 0,
}
async def test_afull_cleanup_with_different_batchsize(
arecord_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Check that we can clean up with different batch size."""
docs = [
Document(
page_content="This is a test document.",
metadata={"source": str(d)},
)
for d in range(1000)
]
assert await aindex(
docs,
arecord_manager,
vector_store,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 1000,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
docs = [
Document(
page_content="Different doc",
metadata={"source": str(d)},
)
for d in range(1001)
]
assert await aindex(
docs,
arecord_manager,
vector_store,
cleanup="full",
cleanup_batch_size=17,
key_encoder="sha256",
) == {
"num_added": 1001,
"num_deleted": 1000,
"num_skipped": 0,
"num_updated": 0,
}
async def test_aincremental_cleanup_with_different_batchsize(
arecord_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Check that we can clean up with different batch size."""
docs = [
Document(
page_content="This is a test document.",
metadata={"source": str(d)},
)
for d in range(1000)
]
assert await aindex(
docs,
arecord_manager,
vector_store,
source_id_key="source",
cleanup="incremental",
key_encoder="sha256",
) == {
"num_added": 1000,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
docs = [
Document(
page_content="Different doc",
metadata={"source": str(d)},
)
for d in range(1001)
]
assert await aindex(
docs,
arecord_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
cleanup_batch_size=17,
key_encoder="sha256",
) == {
"num_added": 1001,
"num_deleted": 1000,
"num_skipped": 0,
"num_updated": 0,
}
def test_deduplication_v2(
record_manager: InMemoryRecordManager, vector_store: VectorStore
) -> None:
"""Check edge case when loader returns no new docs."""
docs = [
Document(
page_content="1",
metadata={"source": "1"},
),
Document(
page_content="1",
metadata={"source": "1"},
),
Document(
page_content="2",
metadata={"source": "2"},
),
Document(
page_content="3",
metadata={"source": "3"},
),
]
assert index(
docs,
record_manager,
vector_store,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 3,
"num_deleted": 0,
"num_skipped": 1,
"num_updated": 0,
}
# using in memory implementation here
assert isinstance(vector_store, InMemoryVectorStore)
ids = list(vector_store.store.keys())
contents = sorted(
[document.page_content for document in vector_store.get_by_ids(ids)]
)
assert contents == ["1", "2", "3"]
async def _to_async_iter(it: Iterable[Any]) -> AsyncIterator[Any]:
"""Convert an iterable to an async iterator."""
for i in it:
yield i
async def test_abatch() -> None:
"""Test the abatch function."""
batches = _abatch(5, _to_async_iter(range(12)))
assert isinstance(batches, AsyncIterator)
assert [batch async for batch in batches] == [
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11],
]
batches = _abatch(1, _to_async_iter(range(3)))
assert isinstance(batches, AsyncIterator)
assert [batch async for batch in batches] == [[0], [1], [2]]
batches = _abatch(2, _to_async_iter(range(5)))
assert isinstance(batches, AsyncIterator)
assert [batch async for batch in batches] == [[0, 1], [2, 3], [4]]
def test_indexing_force_update(
record_manager: InMemoryRecordManager, upserting_vector_store: VectorStore
) -> None:
"""Test indexing with force update."""
docs = [
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
]
assert index(
docs,
record_manager,
upserting_vector_store,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 1,
"num_updated": 0,
}
assert index(
docs,
record_manager,
upserting_vector_store,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 3,
"num_updated": 0,
}
assert index(
docs,
record_manager,
upserting_vector_store,
cleanup="full",
force_update=True,
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 1,
"num_updated": 2,
}
async def test_aindexing_force_update(
arecord_manager: InMemoryRecordManager, upserting_vector_store: VectorStore
) -> None:
"""Test indexing with force update."""
docs = [
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
]
assert await aindex(
docs,
arecord_manager,
upserting_vector_store,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 1,
"num_updated": 0,
}
assert await aindex(
docs,
arecord_manager,
upserting_vector_store,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 3,
"num_updated": 0,
}
assert await aindex(
docs,
arecord_manager,
upserting_vector_store,
cleanup="full",
force_update=True,
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 1,
"num_updated": 2,
}
def test_indexing_custom_batch_size(
record_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing with a custom batch size."""
docs = [
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
]
ids = [_get_document_with_hash(doc, key_encoder="sha256").id for doc in docs]
batch_size = 1
original = vector_store.add_documents
try:
mock_add_documents = MagicMock()
vector_store.add_documents = mock_add_documents # type: ignore[method-assign]
index(
docs,
record_manager,
vector_store,
batch_size=batch_size,
key_encoder="sha256",
)
args, kwargs = mock_add_documents.call_args
doc_with_id = Document(
id=ids[0], page_content="This is a test document.", metadata={"source": "1"}
)
assert args == ([doc_with_id],)
assert kwargs == {"ids": ids, "batch_size": batch_size}
finally:
vector_store.add_documents = original # type: ignore[method-assign]
async def test_aindexing_custom_batch_size(
arecord_manager: InMemoryRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing with a custom batch size."""
docs = [
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
]
ids = [_get_document_with_hash(doc, key_encoder="sha256").id for doc in docs]
batch_size = 1
mock_add_documents = AsyncMock()
doc_with_id = Document(
id=ids[0], page_content="This is a test document.", metadata={"source": "1"}
)
vector_store.aadd_documents = mock_add_documents # type: ignore[method-assign]
await aindex(
docs,
arecord_manager,
vector_store,
batch_size=batch_size,
key_encoder="sha256",
)
args, kwargs = mock_add_documents.call_args
assert args == ([doc_with_id],)
assert kwargs == {"ids": ids, "batch_size": batch_size}
def test_index_into_document_index(record_manager: InMemoryRecordManager) -> None:
"""Get an in memory index."""
document_index = InMemoryDocumentIndex()
docs = [
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
]
assert index(
docs,
record_manager,
document_index,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
assert index(
docs,
record_manager,
document_index,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
assert index(
docs,
record_manager,
document_index,
cleanup="full",
force_update=True,
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 2,
}
assert index(
[],
record_manager,
document_index,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 2,
"num_skipped": 0,
"num_updated": 0,
}
async def test_aindex_into_document_index(
arecord_manager: InMemoryRecordManager,
) -> None:
"""Get an in memory index."""
document_index = InMemoryDocumentIndex()
docs = [
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
]
assert await aindex(
docs,
arecord_manager,
document_index,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
assert await aindex(
docs,
arecord_manager,
document_index,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
assert await aindex(
docs,
arecord_manager,
document_index,
cleanup="full",
force_update=True,
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 2,
}
assert await aindex(
[],
arecord_manager,
document_index,
cleanup="full",
key_encoder="sha256",
) == {
"num_added": 0,
"num_deleted": 2,
"num_skipped": 0,
"num_updated": 0,
}
def test_index_with_upsert_kwargs(
record_manager: InMemoryRecordManager, upserting_vector_store: InMemoryVectorStore
) -> None:
"""Test indexing with upsert_kwargs parameter."""
mock_add_documents = MagicMock()
with patch.object(upserting_vector_store, "add_documents", mock_add_documents):
docs = [
Document(
page_content="Test document 1",
metadata={"source": "1"},
),
Document(
page_content="Test document 2",
metadata={"source": "2"},
),
]
upsert_kwargs = {"vector_field": "embedding"}
index(
docs,
record_manager,
upserting_vector_store,
upsert_kwargs=upsert_kwargs,
key_encoder="sha256",
)
# Assert that add_documents was called with the correct arguments
mock_add_documents.assert_called_once()
call_args = mock_add_documents.call_args
assert call_args is not None
args, kwargs = call_args
# Check that the documents are correct (ignoring ids)
assert len(args[0]) == 2
assert all(isinstance(doc, Document) for doc in args[0])
assert [doc.page_content for doc in args[0]] == [
"Test document 1",
"Test document 2",
]
assert [doc.metadata for doc in args[0]] == [{"source": "1"}, {"source": "2"}]
# Check that IDs are present
assert "ids" in kwargs
assert isinstance(kwargs["ids"], list)
assert len(kwargs["ids"]) == 2
# Check other arguments
assert kwargs["batch_size"] == 100
assert kwargs["vector_field"] == "embedding"
def test_index_with_upsert_kwargs_for_document_indexer(
record_manager: InMemoryRecordManager,
mocker: MockerFixture,
) -> None:
"""Test that kwargs are passed to the upsert method of the document indexer."""
document_index = InMemoryDocumentIndex()
upsert_spy = mocker.spy(document_index.__class__, "upsert")
docs = [
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
]
upsert_kwargs = {"vector_field": "embedding"}
assert index(
docs,
record_manager,
document_index,
cleanup="full",
upsert_kwargs=upsert_kwargs,
key_encoder="sha256",
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
assert upsert_spy.call_count == 1
# assert call kwargs were passed as kwargs
assert upsert_spy.call_args.kwargs == upsert_kwargs
async def test_aindex_with_upsert_kwargs_for_document_indexer(
arecord_manager: InMemoryRecordManager,
mocker: MockerFixture,
) -> None:
"""Test that kwargs are passed to the upsert method of the document indexer."""
document_index = InMemoryDocumentIndex()
upsert_spy = mocker.spy(document_index.__class__, "aupsert")
docs = [
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
]
upsert_kwargs = {"vector_field": "embedding"}
assert await aindex(
docs,
arecord_manager,
document_index,
cleanup="full",
upsert_kwargs=upsert_kwargs,
key_encoder="sha256",
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
assert upsert_spy.call_count == 1
# assert call kwargs were passed as kwargs
assert upsert_spy.call_args.kwargs == upsert_kwargs
async def test_aindex_with_upsert_kwargs(
arecord_manager: InMemoryRecordManager, upserting_vector_store: InMemoryVectorStore
) -> None:
"""Test async indexing with upsert_kwargs parameter."""
mock_aadd_documents = AsyncMock()
with patch.object(upserting_vector_store, "aadd_documents", mock_aadd_documents):
docs = [
Document(
page_content="Async test document 1",
metadata={"source": "1"},
),
Document(
page_content="Async test document 2",
metadata={"source": "2"},
),
]
upsert_kwargs = {"vector_field": "embedding"}
await aindex(
docs,
arecord_manager,
upserting_vector_store,
upsert_kwargs=upsert_kwargs,
key_encoder="sha256",
)
# Assert that aadd_documents was called with the correct arguments
mock_aadd_documents.assert_called_once()
call_args = mock_aadd_documents.call_args
assert call_args is not None
args, kwargs = call_args
# Check that the documents are correct (ignoring ids)
assert len(args[0]) == 2
assert all(isinstance(doc, Document) for doc in args[0])
assert [doc.page_content for doc in args[0]] == [
"Async test document 1",
"Async test document 2",
]
assert [doc.metadata for doc in args[0]] == [{"source": "1"}, {"source": "2"}]
# Check that IDs are present
assert "ids" in kwargs
assert isinstance(kwargs["ids"], list)
assert len(kwargs["ids"]) == 2
# Check other arguments
assert kwargs["batch_size"] == 100
assert kwargs["vector_field"] == "embedding"
| ToyLoader |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/sparse/conjugate_gradient_test.py | {
"start": 1075,
"end": 4106
} | class ____(test.TestCase, parameterized.TestCase):
@parameterized.parameters(
itertools.product([np.float32, np.float64], [1, 4, 10], [True, False])
)
def test_conjugate_gradient(self, dtype, size, use_static_shape):
shape = [size, size]
np.random.seed(1)
a_np = (
np.random.uniform(low=-1.0, high=1.0, size=np.prod(shape))
.reshape(shape)
.astype(dtype)
)
# Make a self-adjoint, positive definite.
a_np = np.dot(a_np.T, a_np)
# jacobi preconditioner
jacobi_np = np.zeros_like(a_np)
jacobi_np[range(a_np.shape[0]), range(a_np.shape[1])] = (
1.0 / a_np.diagonal()
)
rhs_np = np.random.uniform(low=-1.0, high=1.0, size=shape[0]).astype(dtype)
x_np = np.zeros_like(rhs_np)
tol = 1e-6 if dtype == np.float64 else 1e-3
max_iter = 20
if use_static_shape:
a = constant_op.constant(a_np)
rhs = constant_op.constant(rhs_np)
x = constant_op.constant(x_np)
jacobi = constant_op.constant(jacobi_np)
else:
a = array_ops.placeholder_with_default(a_np, shape=None)
rhs = array_ops.placeholder_with_default(rhs_np, shape=None)
x = array_ops.placeholder_with_default(x_np, shape=None)
jacobi = array_ops.placeholder_with_default(jacobi_np, shape=None)
operator = linalg.LinearOperatorFullMatrix(
a, is_positive_definite=True, is_self_adjoint=True
)
preconditioners = [
None,
# Preconditioner that does nothing beyond change shape.
linalg.LinearOperatorIdentity(
a_np.shape[-1],
dtype=a_np.dtype,
is_positive_definite=True,
is_self_adjoint=True,
),
# Jacobi preconditioner.
linalg.LinearOperatorFullMatrix(
jacobi, is_positive_definite=True, is_self_adjoint=True
),
]
cg_results = []
for preconditioner in preconditioners:
cg_graph = conjugate_gradient.conjugate_gradient(
operator,
rhs,
preconditioner=preconditioner,
x=x,
tol=tol,
max_iter=max_iter,
)
cg_val = self.evaluate(cg_graph)
norm_r0 = np.linalg.norm(rhs_np)
norm_r = np.linalg.norm(cg_val.r)
self.assertLessEqual(norm_r, tol * norm_r0)
# Validate that we get an equally small residual norm with numpy
# using the computed solution.
r_np = rhs_np - np.dot(a_np, cg_val.x)
norm_r_np = np.linalg.norm(r_np)
self.assertLessEqual(norm_r_np, tol * norm_r0)
cg_results.append(cg_val)
# Validate that we get same results using identity_preconditioner
# and None
self.assertEqual(cg_results[0].i, cg_results[1].i)
self.assertAlmostEqual(cg_results[0].gamma, cg_results[1].gamma)
self.assertAllClose(cg_results[0].r, cg_results[1].r, rtol=tol)
self.assertAllClose(cg_results[0].x, cg_results[1].x, rtol=tol)
self.assertAllClose(cg_results[0].p, cg_results[1].p, rtol=tol)
if __name__ == "__main__":
test.main()
| ConjugateGradientTest |
python | dagster-io__dagster | .buildkite/buildkite-shared/buildkite_shared/step_builders/command_step_builder.py | {
"start": 550,
"end": 928
} | class ____:
def __init__(self, cpu, memory, docker_cpu: str = "500m"):
self._cpu = cpu
self._memory = memory
self._docker_cpu = docker_cpu
@property
def cpu(self):
return self._cpu
@property
def memory(self):
return self._memory
@property
def docker_cpu(self):
return self._docker_cpu
| ResourceRequests |
python | ansible__ansible | test/units/module_utils/facts/test_collector.py | {
"start": 14132,
"end": 14789
} | class ____(unittest.TestCase):
def test(self):
names = ['network', 'virtual', 'env']
all_fact_subsets = {'env': [default_collectors.EnvFactCollector],
'network': [default_collectors.LinuxNetworkCollector],
'virtual': [default_collectors.LinuxVirtualCollector]}
res = collector.build_dep_data(names, all_fact_subsets)
self.assertIsInstance(res, defaultdict)
self.assertEqual(dict(res),
{'network': set(['platform', 'distribution']),
'virtual': set(),
'env': set()})
| TestBuildDepData |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/executors/aws_lambda/utils.py | {
"start": 1596,
"end": 1789
} | class ____(BaseConfigKeys):
"""Config keys loaded which are valid lambda invoke args."""
FUNCTION_NAME = "function_name"
QUALIFIER = "function_qualifier"
| InvokeLambdaKwargsConfigKeys |
python | ray-project__ray | rllib/connectors/agent/synced_filter.py | {
"start": 198,
"end": 1938
} | class ____(AgentConnector):
"""An agent connector that filters with synchronized parameters."""
def __init__(self, ctx: ConnectorContext, *args, **kwargs):
super().__init__(ctx)
if args or kwargs:
raise ValueError(
"SyncedFilterAgentConnector does not take any additional arguments, "
"but got args=`{}` and kwargs={}.".format(args, kwargs)
)
def apply_changes(self, other: "Filter", *args, **kwargs) -> None:
"""Updates self with state from other filter."""
# TODO: (artur) inline this as soon as we deprecate ordinary filter with
# non-connecto env_runner
return self.filter.apply_changes(other, *args, **kwargs)
def copy(self) -> "Filter":
"""Creates a new object with same state as self.
This is a legacy Filter method that we need to keep around for now
Returns:
A copy of self.
"""
# inline this as soon as we deprecate ordinary filter with non-connector
# env_runner
return self.filter.copy()
def sync(self, other: "AgentConnector") -> None:
"""Copies all state from other filter to self."""
# TODO: (artur) inline this as soon as we deprecate ordinary filter with
# non-connector env_runner
return self.filter.sync(other.filter)
def reset_state(self) -> None:
"""Creates copy of current state and resets accumulated state"""
raise NotImplementedError
def as_serializable(self) -> "Filter":
# TODO: (artur) inline this as soon as we deprecate ordinary filter with
# non-connector env_runner
return self.filter.as_serializable()
| SyncedFilterAgentConnector |
python | fastapi__sqlmodel | docs_src/tutorial/fastapi/update/tutorial002.py | {
"start": 519,
"end": 2863
} | class ____(SQLModel):
name: Optional[str] = None
secret_name: Optional[str] = None
age: Optional[int] = None
password: Optional[str] = None
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
connect_args = {"check_same_thread": False}
engine = create_engine(sqlite_url, echo=True, connect_args=connect_args)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def hash_password(password: str) -> str:
# Use something like passlib here
return f"not really hashed {password} hehehe"
app = FastAPI()
@app.on_event("startup")
def on_startup():
create_db_and_tables()
@app.post("/heroes/", response_model=HeroPublic)
def create_hero(hero: HeroCreate):
hashed_password = hash_password(hero.password)
with Session(engine) as session:
extra_data = {"hashed_password": hashed_password}
db_hero = Hero.model_validate(hero, update=extra_data)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
@app.get("/heroes/", response_model=List[HeroPublic])
def read_heroes(offset: int = 0, limit: int = Query(default=100, le=100)):
with Session(engine) as session:
heroes = session.exec(select(Hero).offset(offset).limit(limit)).all()
return heroes
@app.get("/heroes/{hero_id}", response_model=HeroPublic)
def read_hero(hero_id: int):
with Session(engine) as session:
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
return hero
@app.patch("/heroes/{hero_id}", response_model=HeroPublic)
def update_hero(hero_id: int, hero: HeroUpdate):
with Session(engine) as session:
db_hero = session.get(Hero, hero_id)
if not db_hero:
raise HTTPException(status_code=404, detail="Hero not found")
hero_data = hero.model_dump(exclude_unset=True)
extra_data = {}
if "password" in hero_data:
password = hero_data["password"]
hashed_password = hash_password(password)
extra_data["hashed_password"] = hashed_password
db_hero.sqlmodel_update(hero_data, update=extra_data)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
| HeroUpdate |
python | oauthlib__oauthlib | oauthlib/openid/connect/core/grant_types/hybrid.py | {
"start": 419,
"end": 2714
} | class ____(GrantTypeBase):
def __init__(self, request_validator=None, **kwargs):
self.request_validator = request_validator or RequestValidator()
self.proxy_target = OAuth2AuthorizationCodeGrant(
request_validator=request_validator, **kwargs)
# All hybrid response types should be fragment-encoded.
self.proxy_target.default_response_mode = "fragment"
self.register_response_type('code id_token')
self.register_response_type('code token')
self.register_response_type('code id_token token')
self.custom_validators.post_auth.append(
self.openid_authorization_validator)
# Hybrid flows can return the id_token from the authorization
# endpoint as part of the 'code' response
self.register_code_modifier(self.add_token)
self.register_code_modifier(self.add_id_token)
self.register_token_modifier(self.add_id_token)
def add_id_token(self, token, token_handler, request):
return super().add_id_token(token, token_handler, request, nonce=request.nonce)
def openid_authorization_validator(self, request):
"""Additional validation when following the Authorization Code flow.
"""
request_info = super().openid_authorization_validator(request)
if not request_info: # returns immediately if OAuth2.0
return request_info
# REQUIRED if the Response Type of the request is `code
# id_token` or `code id_token token` and OPTIONAL when the
# Response Type of the request is `code token`. It is a string
# value used to associate a Client session with an ID Token,
# and to mitigate replay attacks. The value is passed through
# unmodified from the Authentication Request to the ID
# Token. Sufficient entropy MUST be present in the `nonce`
# values used to prevent attackers from guessing values. For
# implementation notes, see Section 15.5.2.
if request.response_type in ["code id_token", "code id_token token"] and not request.nonce:
raise InvalidRequestError(
request=request,
description='Request is missing mandatory nonce parameter.'
)
return request_info
| HybridGrant |
python | huggingface__transformers | src/transformers/models/omdet_turbo/configuration_omdet_turbo.py | {
"start": 903,
"end": 14568
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`OmDetTurboForObjectDetection`].
It is used to instantiate a OmDet-Turbo model according to the specified arguments, defining the model architecture
Instantiating a configuration with the defaults will yield a similar configuration to that of the OmDet-Turbo
[omlab/omdet-turbo-swin-tiny-hf](https://huggingface.co/omlab/omdet-turbo-swin-tiny-hf) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`PreTrainedConfig`, *optional*):
The configuration of the text backbone.
backbone_config (`PreTrainedConfig`, *optional*):
The configuration of the vision backbone.
use_timm_backbone (`bool`, *optional*, defaults to `True`):
Whether to use the timm for the vision backbone.
backbone (`str`, *optional*, defaults to `"swin_tiny_patch4_window7_224"`):
The name of the pretrained vision backbone to use. If `use_pretrained_backbone=False` a randomly initialized
backbone with the same architecture `backbone` is used.
backbone_kwargs (`dict`, *optional*):
Additional kwargs for the vision backbone.
use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
Whether to use a pretrained vision backbone.
apply_layernorm_after_vision_backbone (`bool`, *optional*, defaults to `True`):
Whether to apply layer normalization on the feature maps of the vision backbone output.
image_size (`int`, *optional*, defaults to 640):
The size (resolution) of each image.
disable_custom_kernels (`bool`, *optional*, defaults to `False`):
Whether to disable custom kernels.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon value for layer normalization.
batch_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon value for batch normalization.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
text_projection_in_dim (`int`, *optional*, defaults to 512):
The input dimension for the text projection.
text_projection_out_dim (`int`, *optional*, defaults to 512):
The output dimension for the text projection.
task_encoder_hidden_dim (`int`, *optional*, defaults to 1024):
The feedforward dimension for the task encoder.
class_embed_dim (`int`, *optional*, defaults to 512):
The dimension of the classes embeddings.
class_distance_type (`str`, *optional*, defaults to `"cosine"`):
The type of of distance to compare predicted classes to projected classes embeddings.
Can be `"cosine"` or `"dot"`.
num_queries (`int`, *optional*, defaults to 900):
The number of queries.
csp_activation (`str`, *optional*, defaults to `"silu"`):
The activation function of the Cross Stage Partial (CSP) networks of the encoder.
conv_norm_activation (`str`, *optional*, defaults to `"gelu"`):
The activation function of the ConvNormLayer layers of the encoder.
encoder_feedforward_activation (`str`, *optional*, defaults to `"relu"`):
The activation function for the feedforward network of the encoder.
encoder_feedforward_dropout (`float`, *optional*, defaults to 0.0):
The dropout rate following the activation of the encoder feedforward network.
encoder_dropout (`float`, *optional*, defaults to 0.0):
The dropout rate of the encoder multi-head attention module.
hidden_expansion (`int`, *optional*, defaults to 1):
The hidden expansion of the CSP networks in the encoder.
vision_features_channels (`tuple(int)`, *optional*, defaults to `[256, 256, 256]`):
The projected vision features channels used as inputs for the decoder.
encoder_hidden_dim (`int`, *optional*, defaults to 256):
The hidden dimension of the encoder.
encoder_in_channels (`List(int)`, *optional*, defaults to `[192, 384, 768]`):
The input channels for the encoder.
encoder_projection_indices (`List(int)`, *optional*, defaults to `[2]`):
The indices of the input features projected by each layers.
encoder_attention_heads (`int`, *optional*, defaults to 8):
The number of attention heads for the encoder.
encoder_dim_feedforward (`int`, *optional*, defaults to 2048):
The feedforward dimension for the encoder.
encoder_layers (`int`, *optional*, defaults to 1):
The number of layers in the encoder.
positional_encoding_temperature (`int`, *optional*, defaults to 10000):
The positional encoding temperature in the encoder.
num_feature_levels (`int`, *optional*, defaults to 3):
The number of feature levels for the multi-scale deformable attention module of the decoder.
decoder_hidden_dim (`int`, *optional*, defaults to 256):
The hidden dimension of the decoder.
decoder_num_heads (`int`, *optional*, defaults to 8):
The number of heads for the decoder.
decoder_num_layers (`int`, *optional*, defaults to 6):
The number of layers for the decoder.
decoder_activation (`str`, *optional*, defaults to `"relu"`):
The activation function for the decoder.
decoder_dim_feedforward (`int`, *optional*, defaults to 2048):
The feedforward dimension for the decoder.
decoder_num_points (`int`, *optional*, defaults to 4):
The number of points sampled in the decoder multi-scale deformable attention module.
decoder_dropout (`float`, *optional*, defaults to 0.0):
The dropout rate for the decoder.
eval_size (`tuple[int, int]`, *optional*):
Height and width used to computes the effective height and width of the position embeddings after taking
into account the stride (see RTDetr).
learn_initial_query (`bool`, *optional*, defaults to `False`):
Whether to learn the initial query.
cache_size (`int`, *optional*, defaults to 100):
The cache size for the classes and prompts caches.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the model is used as an encoder-decoder model or not.
kwargs (`dict[str, Any]`, *optional*):
Additional parameters from the architecture. The values in kwargs will be saved as part of the configuration
and can be used to control the model outputs.
Examples:
```python
>>> from transformers import OmDetTurboConfig, OmDetTurboForObjectDetection
>>> # Initializing a OmDet-Turbo omlab/omdet-turbo-swin-tiny-hf style configuration
>>> configuration = OmDetTurboConfig()
>>> # Initializing a model (with random weights) from the omlab/omdet-turbo-swin-tiny-hf style configuration
>>> model = OmDetTurboForObjectDetection(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "omdet-turbo"
sub_configs = {"backbone_config": AutoConfig, "text_config": AutoConfig}
attribute_map = {
"encoder_hidden_dim": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__(
self,
text_config=None,
backbone_config=None,
use_timm_backbone=True,
backbone="swin_tiny_patch4_window7_224",
backbone_kwargs=None,
use_pretrained_backbone=False,
apply_layernorm_after_vision_backbone=True,
image_size=640,
disable_custom_kernels=False,
layer_norm_eps=1e-5,
batch_norm_eps=1e-5,
init_std=0.02,
text_projection_in_dim=512,
text_projection_out_dim=512,
task_encoder_hidden_dim=1024,
class_embed_dim=512,
class_distance_type="cosine",
num_queries=900,
csp_activation="silu",
conv_norm_activation="gelu",
encoder_feedforward_activation="relu",
encoder_feedforward_dropout=0.0,
encoder_dropout=0.0,
hidden_expansion=1,
vision_features_channels=[256, 256, 256],
encoder_hidden_dim=256,
encoder_in_channels=[192, 384, 768],
encoder_projection_indices=[2],
encoder_attention_heads=8,
encoder_dim_feedforward=2048,
encoder_layers=1,
positional_encoding_temperature=10000,
num_feature_levels=3,
decoder_hidden_dim=256,
decoder_num_heads=8,
decoder_num_layers=6,
decoder_activation="relu",
decoder_dim_feedforward=2048,
decoder_num_points=4,
decoder_dropout=0.0,
eval_size=None,
learn_initial_query=False,
cache_size=100,
is_encoder_decoder=True,
**kwargs,
):
if use_timm_backbone:
if backbone_config is None:
backbone_kwargs = {
"out_indices": [1, 2, 3],
"img_size": image_size,
"always_partition": True,
}
elif backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `swin` vision config.")
backbone_config = CONFIG_MAPPING["swin"](
window_size=7,
image_size=image_size,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
out_indices=[2, 3, 4],
)
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.get("model_type")
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
verify_backbone_config_arguments(
use_timm_backbone=use_timm_backbone,
use_pretrained_backbone=use_pretrained_backbone,
backbone=backbone,
backbone_config=backbone_config,
backbone_kwargs=backbone_kwargs,
)
if text_config is None:
logger.info(
"`text_config` is `None`. Initializing the config with the default `clip_text_model` text config."
)
text_config = CONFIG_MAPPING["clip_text_model"]()
elif isinstance(text_config, dict):
text_model_type = text_config.get("model_type")
text_config = CONFIG_MAPPING[text_model_type](**text_config)
if class_distance_type not in ["cosine", "dot"]:
raise ValueError(
f"Invalid `class_distance_type`. It should be either `cosine` or `dot`, but got {class_distance_type}."
)
self.text_config = text_config
self.backbone_config = backbone_config
self.use_timm_backbone = use_timm_backbone
self.backbone = backbone
self.backbone_kwargs = backbone_kwargs
self.use_pretrained_backbone = use_pretrained_backbone
self.apply_layernorm_after_vision_backbone = apply_layernorm_after_vision_backbone
self.image_size = image_size
self.disable_custom_kernels = disable_custom_kernels
self.layer_norm_eps = layer_norm_eps
self.batch_norm_eps = batch_norm_eps
self.init_std = init_std
self.text_projection_in_dim = text_projection_in_dim
self.text_projection_out_dim = text_projection_out_dim
self.task_encoder_hidden_dim = task_encoder_hidden_dim
self.class_embed_dim = class_embed_dim
self.class_distance_type = class_distance_type
self.num_queries = num_queries
self.csp_activation = csp_activation
self.conv_norm_activation = conv_norm_activation
self.encoder_feedforward_activation = encoder_feedforward_activation
self.encoder_feedforward_dropout = encoder_feedforward_dropout
self.encoder_dropout = encoder_dropout
self.hidden_expansion = hidden_expansion
self.vision_features_channels = vision_features_channels
self.encoder_hidden_dim = encoder_hidden_dim
self.encoder_in_channels = encoder_in_channels
self.encoder_projection_indices = encoder_projection_indices
self.encoder_attention_heads = encoder_attention_heads
self.encoder_dim_feedforward = encoder_dim_feedforward
self.encoder_layers = encoder_layers
self.positional_encoding_temperature = positional_encoding_temperature
self.num_feature_levels = num_feature_levels
self.decoder_hidden_dim = decoder_hidden_dim
self.decoder_num_heads = decoder_num_heads
self.decoder_num_layers = decoder_num_layers
self.decoder_activation = decoder_activation
self.decoder_dim_feedforward = decoder_dim_feedforward
self.decoder_num_points = decoder_num_points
self.decoder_dropout = decoder_dropout
self.eval_size = eval_size
self.learn_initial_query = learn_initial_query
self.cache_size = cache_size
self.is_encoder_decoder = is_encoder_decoder
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
__all__ = ["OmDetTurboConfig"]
| OmDetTurboConfig |
python | getsentry__sentry-python | sentry_sdk/scope.py | {
"start": 3304,
"end": 5076
} | class ____:
def __init__(self, hub=None):
# type: (Optional[Any]) -> None
self._old_scopes = [] # type: List[Scope]
def __enter__(self):
# type: () -> Scope
isolation_scope = Scope.get_isolation_scope()
self._old_scopes.append(isolation_scope)
forked_scope = isolation_scope.fork()
_isolation_scope.set(forked_scope)
return forked_scope
def __exit__(self, exc_type, exc_value, tb):
# type: (Any, Any, Any) -> None
old_scope = self._old_scopes.pop()
_isolation_scope.set(old_scope)
def add_global_event_processor(processor):
# type: (EventProcessor) -> None
global_event_processors.append(processor)
def register_external_propagation_context(fn):
# type: (Callable[[], Optional[Tuple[str, str]]]) -> None
global _external_propagation_context_fn
_external_propagation_context_fn = fn
def remove_external_propagation_context():
# type: () -> None
global _external_propagation_context_fn
_external_propagation_context_fn = None
def get_external_propagation_context():
# type: () -> Optional[Tuple[str, str]]
return (
_external_propagation_context_fn() if _external_propagation_context_fn else None
)
def _attr_setter(fn):
# type: (Any) -> Any
return property(fset=fn, doc=fn.__doc__)
def _disable_capture(fn):
# type: (F) -> F
@wraps(fn)
def wrapper(self, *args, **kwargs):
# type: (Any, *Dict[str, Any], **Any) -> Any
if not self._should_capture:
return
try:
self._should_capture = False
return fn(self, *args, **kwargs)
finally:
self._should_capture = True
return wrapper # type: ignore
| _ScopeManager |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/random/random_gamma_test.py | {
"start": 1385,
"end": 9052
} | class ____(test.TestCase):
"""This is a medium test due to the moments computation taking some time."""
def setUp(self):
np.random.seed(137)
random_seed.set_random_seed(137)
def _Sampler(self, num, alpha, beta, dtype, use_gpu=True, seed=None):
def func():
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
rng = random_ops.random_gamma(
[num], alpha, beta=beta, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in range(10):
ret[i, :] = self.evaluate(rng)
return ret
return func
def testNpDtypes(self):
self.evaluate(random_ops.random_gamma(
[5], alpha=np.ones([2, 1, 3]), beta=np.ones([3]), dtype=np.float32))
def testEmptySamplingNoError(self):
self.evaluate(random_ops.random_gamma(
[5], alpha=np.ones([2, 0, 3]), beta=np.ones([3]), dtype=dtypes.float32))
@test_util.run_deprecated_v1
def testMomentsFloat32(self):
self._testMoments(dtypes.float32)
@test_util.run_deprecated_v1
def testMomentsFloat64(self):
self._testMoments(dtypes.float64)
def _testMoments(self, dt):
try:
from scipy import stats # pylint: disable=g-import-not-at-top
except ImportError as e:
tf_logging.warn("Cannot test moments: %s" % e)
return
# The moments test is a z-value test. This is the largest z-value
# we want to tolerate. Since the z-test approximates a unit normal
# distribution, it should almost definitely never exceed 6.
z_limit = 6.0
for stride in 0, 1, 4, 17:
alphas = [0.2, 1.0, 3.0]
if dt == dtypes.float64:
alphas = [0.01] + alphas
for alpha in alphas:
for scale in 9, 17:
# Gamma moments only defined for values less than the scale param.
max_moment = min(6, scale // 2)
sampler = self._Sampler(20000, alpha, 1 / scale, dt, seed=12345)
z_scores = util.test_moment_matching(
sampler(),
max_moment,
stats.gamma(alpha, scale=scale),
stride=stride,
)
self.assertAllLess(z_scores, z_limit)
def _testZeroDensity(self, alpha):
"""Zero isn't in the support of the gamma distribution.
But quantized floating point math has its limits.
TODO(bjp): Implement log-gamma sampler for small-shape distributions.
Args:
alpha: float shape value to test
"""
try:
from scipy import stats # pylint: disable=g-import-not-at-top
except ImportError as e:
tf_logging.warn("Cannot test zero density proportions: %s" % e)
return
allowable_zeros = {
dtypes.float16: stats.gamma(alpha).cdf(np.finfo(np.float16).tiny),
dtypes.float32: stats.gamma(alpha).cdf(np.finfo(np.float32).tiny),
dtypes.float64: stats.gamma(alpha).cdf(np.finfo(np.float64).tiny)
}
failures = []
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sampler = self._Sampler(10000, alpha, 1.0, dt, seed=12345)
x = sampler()
allowable = allowable_zeros[dt] * x.size
allowable = allowable * 2 if allowable < 10 else allowable * 1.05
if np.sum(x <= 0) > allowable:
failures += [dt]
self.assertEqual([], failures)
def testNonZeroSmallShape(self):
self._testZeroDensity(0.01)
def testNonZeroSmallishShape(self):
self._testZeroDensity(0.35)
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sampler = self._Sampler(1000, 2.0, 1.0, dt)
x = sampler()
y = sampler()
# Number of different samples.
count = (x == y).sum()
count_limit = 20 if dt == dtypes.float16 else 10
self.assertLess(count, count_limit)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
@test_util.run_deprecated_v1
def testCPUGPUMatch(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
results = {}
for use_gpu in [False, True]:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
if dt == dtypes.float16:
self.assertAllClose(results[False], results[True], rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
def testSeed(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sx = self._Sampler(1000, 0.0, 1.0, dt, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, seed=345)
self.assertAllEqual(sx(), sy())
@test_util.run_deprecated_v1
def testNoCSE(self):
"""CSE = constant subexpression eliminator.
SetIsStateful() should prevent two identical random ops from getting
merged.
"""
for dtype in dtypes.float16, dtypes.float32, dtypes.float64:
with self.cached_session():
rnd1 = random_ops.random_gamma([24], 2.0, dtype=dtype)
rnd2 = random_ops.random_gamma([24], 2.0, dtype=dtype)
diff = rnd2 - rnd1
self.assertGreater(np.linalg.norm(diff.eval()), 0.1)
@test_util.run_deprecated_v1
def testShape(self):
# Fully known shape.
rnd = random_ops.random_gamma([150], 2.0)
self.assertEqual([150], rnd.get_shape().as_list())
rnd = random_ops.random_gamma([150], 2.0, beta=[3.0, 4.0])
self.assertEqual([150, 2], rnd.get_shape().as_list())
rnd = random_ops.random_gamma([150], array_ops.ones([1, 2, 3]))
self.assertEqual([150, 1, 2, 3], rnd.get_shape().as_list())
rnd = random_ops.random_gamma([20, 30], array_ops.ones([1, 2, 3]))
self.assertEqual([20, 30, 1, 2, 3], rnd.get_shape().as_list())
rnd = random_ops.random_gamma(
[123], array_ops.placeholder(
dtypes.float32, shape=(2,)))
self.assertEqual([123, 2], rnd.get_shape().as_list())
# Partially known shape.
rnd = random_ops.random_gamma(
array_ops.placeholder(
dtypes.int32, shape=(1,)), array_ops.ones([7, 3]))
self.assertEqual([None, 7, 3], rnd.get_shape().as_list())
rnd = random_ops.random_gamma(
array_ops.placeholder(
dtypes.int32, shape=(3,)), array_ops.ones([9, 6]))
self.assertEqual([None, None, None, 9, 6], rnd.get_shape().as_list())
# Unknown shape.
rnd = random_ops.random_gamma(
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.float32))
self.assertIs(None, rnd.get_shape().ndims)
rnd = random_ops.random_gamma([50], array_ops.placeholder(dtypes.float32))
self.assertIs(None, rnd.get_shape().ndims)
@test_util.run_deprecated_v1
def testPositive(self):
n = int(10e3)
for dt in [dtypes.float16, dtypes.float32, dtypes.float64]:
with self.cached_session():
x = random_ops.random_gamma(shape=[n], alpha=0.001, dtype=dt, seed=0)
self.assertEqual(0, math_ops.reduce_sum(math_ops.cast(
math_ops.less_equal(x, 0.), dtype=dtypes.int64)).eval())
def testSizeTooLarge(self):
# Grappler asserts on size overflow, so this error is only caught when
# running eagerly.
if context.executing_eagerly():
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"overflow"):
rate = constant_op.constant(1.0, shape=(4, 4, 4, 4, 4))
self.evaluate(
random_ops.random_gamma(
shape=[46902, 51188, 34063, 59195], alpha=rate))
if __name__ == "__main__":
test.main()
| RandomGammaTest |
python | bokeh__bokeh | tests/unit/bokeh/test_client_server.py | {
"start": 2326,
"end": 2456
} | class ____(Model):
distance = DistanceSpec(42)
angle = AngleSpec(0)
logging.basicConfig(level=logging.DEBUG)
| UnitsSpecModel |
python | getsentry__sentry | tests/sentry/api/serializers/test_commit_filechange.py | {
"start": 411,
"end": 2968
} | class ____(TestCase):
def test_simple(self) -> None:
user = self.create_user()
project = self.create_project()
release = Release.objects.create(
organization_id=project.organization_id, version=uuid4().hex
)
release.add_project(project)
repository = Repository.objects.create(
organization_id=project.organization_id, name="test/test"
)
commit_author = CommitAuthor.objects.create(
name="stebe", email="stebe@sentry.io", organization_id=project.organization_id
)
commit = Commit.objects.create(
organization_id=project.organization_id,
repository_id=repository.id,
key="abc",
author=commit_author,
message="waddap",
)
ReleaseCommit.objects.create(
organization_id=project.organization_id,
project_id=project.id,
release=release,
commit=commit,
order=1,
)
cfc = CommitFileChange.objects.create(
organization_id=project.organization_id,
commit_id=commit.id,
filename=".gitignore",
type="M",
)
result = serialize(cfc, user)
assert result["filename"] == ".gitignore"
assert result["commitMessage"] == "waddap"
assert result["author"] == {"name": "stebe", "email": "stebe@sentry.io"}
def test_no_author(self) -> None:
user = self.create_user()
project = self.create_project()
release = Release.objects.create(
organization_id=project.organization_id, version=uuid4().hex
)
release.add_project(project)
repository = Repository.objects.create(
organization_id=project.organization_id, name="test/test"
)
commit = Commit.objects.create(
organization_id=project.organization_id,
repository_id=repository.id,
key="abc",
message="waddap",
)
ReleaseCommit.objects.create(
organization_id=project.organization_id,
project_id=project.id,
release=release,
commit=commit,
order=1,
)
cfc = CommitFileChange.objects.create(
organization_id=project.organization_id,
commit_id=commit.id,
filename=".gitignore",
type="M",
)
result = serialize(cfc, user)
assert result["author"] == {}
| CommitFileChangeSerializerTest |
python | matplotlib__matplotlib | lib/matplotlib/transforms.py | {
"start": 63528,
"end": 65820
} | class ____(AffineBase):
"""
The base class of all 2D affine transformations.
2D affine transformations are performed using a 3x3 numpy array::
a c e
b d f
0 0 1
This class provides the read-only interface. For a mutable 2D
affine transformation, use `Affine2D`.
Subclasses of this class will generally only need to override a
constructor and `~.Transform.get_matrix` that generates a custom 3x3 matrix.
"""
input_dims = 2
output_dims = 2
def frozen(self):
# docstring inherited
return Affine2D(self.get_matrix().copy())
@property
def is_separable(self):
mtx = self.get_matrix()
return mtx[0, 1] == mtx[1, 0] == 0.0
def to_values(self):
"""
Return the values of the matrix as an ``(a, b, c, d, e, f)`` tuple.
"""
mtx = self.get_matrix()
return tuple(mtx[:2].swapaxes(0, 1).flat)
def transform_affine(self, values):
mtx = self.get_matrix()
if isinstance(values, np.ma.MaskedArray):
tpoints = affine_transform(values.data, mtx)
return np.ma.MaskedArray(tpoints, mask=np.ma.getmask(values))
return affine_transform(values, mtx)
if DEBUG:
_transform_affine = transform_affine
def transform_affine(self, values):
# docstring inherited
# The major speed trap here is just converting to the
# points to an array in the first place. If we can use
# more arrays upstream, that should help here.
if not isinstance(values, np.ndarray):
_api.warn_external(
f'A non-numpy array of type {type(values)} was passed in '
f'for transformation, which results in poor performance.')
return self._transform_affine(values)
def inverted(self):
# docstring inherited
if self._inverted is None or self._invalid:
mtx = self.get_matrix()
shorthand_name = None
if self._shorthand_name:
shorthand_name = '(%s)-1' % self._shorthand_name
self._inverted = Affine2D(inv(mtx), shorthand_name=shorthand_name)
self._invalid = 0
return self._inverted
| Affine2DBase |
python | sympy__sympy | sympy/combinatorics/fp_groups.py | {
"start": 1706,
"end": 18578
} | class ____(DefaultPrinting):
"""
The FpGroup would take a FreeGroup and a list/tuple of relators, the
relators would be specified in such a way that each of them be equal to the
identity of the provided free group.
"""
is_group = True
is_FpGroup = True
is_PermutationGroup = False
def __init__(self, fr_grp, relators):
relators = _parse_relators(relators)
self.free_group = fr_grp
self.relators = relators
self.generators = self._generators()
self.dtype = type("FpGroupElement", (FpGroupElement,), {"group": self})
# CosetTable instance on identity subgroup
self._coset_table = None
# returns whether coset table on identity subgroup
# has been standardized
self._is_standardized = False
self._order = None
self._center = None
self._rewriting_system = RewritingSystem(self)
self._perm_isomorphism = None
return
def _generators(self):
return self.free_group.generators
def make_confluent(self):
'''
Try to make the group's rewriting system confluent
'''
self._rewriting_system.make_confluent()
return
def reduce(self, word):
'''
Return the reduced form of `word` in `self` according to the group's
rewriting system. If it's confluent, the reduced form is the unique normal
form of the word in the group.
'''
return self._rewriting_system.reduce(word)
def equals(self, word1, word2):
'''
Compare `word1` and `word2` for equality in the group
using the group's rewriting system. If the system is
confluent, the returned answer is necessarily correct.
(If it is not, `False` could be returned in some cases
where in fact `word1 == word2`)
'''
if self.reduce(word1*word2**-1) == self.identity:
return True
elif self._rewriting_system.is_confluent:
return False
return None
@property
def identity(self):
return self.free_group.identity
def __contains__(self, g):
return g in self.free_group
def subgroup(self, gens, C=None, homomorphism=False):
'''
Return the subgroup generated by `gens` using the
Reidemeister-Schreier algorithm
homomorphism -- When set to True, return a dictionary containing the images
of the presentation generators in the original group.
Examples
========
>>> from sympy.combinatorics.fp_groups import FpGroup
>>> from sympy.combinatorics import free_group
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**3, y**5, (x*y)**2])
>>> H = [x*y, x**-1*y**-1*x*y*x]
>>> K, T = f.subgroup(H, homomorphism=True)
>>> T(K.generators)
[x*y, x**-1*y**2*x**-1]
'''
if not all(isinstance(g, FreeGroupElement) for g in gens):
raise ValueError("Generators must be `FreeGroupElement`s")
if not all(g.group == self.free_group for g in gens):
raise ValueError("Given generators are not members of the group")
if homomorphism:
g, rels, _gens = reidemeister_presentation(self, gens, C=C, homomorphism=True)
else:
g, rels = reidemeister_presentation(self, gens, C=C)
if g:
g = FpGroup(g[0].group, rels)
else:
g = FpGroup(free_group('')[0], [])
if homomorphism:
from sympy.combinatorics.homomorphisms import homomorphism
return g, homomorphism(g, self, g.generators, _gens, check=False)
return g
def coset_enumeration(self, H, strategy="relator_based", max_cosets=None,
draft=None, incomplete=False):
"""
Return an instance of ``coset table``, when Todd-Coxeter algorithm is
run over the ``self`` with ``H`` as subgroup, using ``strategy``
argument as strategy. The returned coset table is compressed but not
standardized.
An instance of `CosetTable` for `fp_grp` can be passed as the keyword
argument `draft` in which case the coset enumeration will start with
that instance and attempt to complete it.
When `incomplete` is `True` and the function is unable to complete for
some reason, the partially complete table will be returned.
"""
if not max_cosets:
max_cosets = CosetTable.coset_table_max_limit
if strategy == 'relator_based':
C = coset_enumeration_r(self, H, max_cosets=max_cosets,
draft=draft, incomplete=incomplete)
else:
C = coset_enumeration_c(self, H, max_cosets=max_cosets,
draft=draft, incomplete=incomplete)
if C.is_complete():
C.compress()
return C
def standardize_coset_table(self):
"""
Standardized the coset table ``self`` and makes the internal variable
``_is_standardized`` equal to ``True``.
"""
self._coset_table.standardize()
self._is_standardized = True
def coset_table(self, H, strategy="relator_based", max_cosets=None,
draft=None, incomplete=False):
"""
Return the mathematical coset table of ``self`` in ``H``.
"""
if not H:
if self._coset_table is not None:
if not self._is_standardized:
self.standardize_coset_table()
else:
C = self.coset_enumeration([], strategy, max_cosets=max_cosets,
draft=draft, incomplete=incomplete)
self._coset_table = C
self.standardize_coset_table()
return self._coset_table.table
else:
C = self.coset_enumeration(H, strategy, max_cosets=max_cosets,
draft=draft, incomplete=incomplete)
C.standardize()
return C.table
def order(self, strategy="relator_based"):
"""
Returns the order of the finitely presented group ``self``. It uses
the coset enumeration with identity group as subgroup, i.e ``H=[]``.
Examples
========
>>> from sympy.combinatorics import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x, y**2])
>>> f.order(strategy="coset_table_based")
2
"""
if self._order is not None:
return self._order
if self._coset_table is not None:
self._order = len(self._coset_table.table)
elif len(self.relators) == 0:
self._order = self.free_group.order()
elif len(self.generators) == 1:
self._order = abs(gcd([r.array_form[0][1] for r in self.relators]))
elif self._is_infinite():
self._order = S.Infinity
else:
gens, C = self._finite_index_subgroup()
if C:
ind = len(C.table)
self._order = ind*self.subgroup(gens, C=C).order()
else:
self._order = self.index([])
return self._order
def _is_infinite(self):
'''
Test if the group is infinite. Return `True` if the test succeeds
and `None` otherwise
'''
used_gens = set()
for r in self.relators:
used_gens.update(r.contains_generators())
if not set(self.generators) <= used_gens:
return True
# Abelianisation test: check is the abelianisation is infinite
abelian_rels = []
for rel in self.relators:
abelian_rels.append([rel.exponent_sum(g) for g in self.generators])
m = Matrix(Matrix(abelian_rels))
if 0 in invariant_factors(m):
return True
else:
return None
def _finite_index_subgroup(self, s=None):
'''
Find the elements of `self` that generate a finite index subgroup
and, if found, return the list of elements and the coset table of `self` by
the subgroup, otherwise return `(None, None)`
'''
gen = self.most_frequent_generator()
rels = list(self.generators)
rels.extend(self.relators)
if not s:
if len(self.generators) == 2:
s = [gen] + [g for g in self.generators if g != gen]
else:
rand = self.free_group.identity
i = 0
while ((rand in rels or rand**-1 in rels or rand.is_identity)
and i<10):
rand = self.random()
i += 1
s = [gen, rand] + [g for g in self.generators if g != gen]
mid = (len(s)+1)//2
half1 = s[:mid]
half2 = s[mid:]
draft1 = None
draft2 = None
m = 200
C = None
while not C and (m/2 < CosetTable.coset_table_max_limit):
m = min(m, CosetTable.coset_table_max_limit)
draft1 = self.coset_enumeration(half1, max_cosets=m,
draft=draft1, incomplete=True)
if draft1.is_complete():
C = draft1
half = half1
else:
draft2 = self.coset_enumeration(half2, max_cosets=m,
draft=draft2, incomplete=True)
if draft2.is_complete():
C = draft2
half = half2
if not C:
m *= 2
if not C:
return None, None
C.compress()
return half, C
def most_frequent_generator(self):
gens = self.generators
rels = self.relators
freqs = [sum(r.generator_count(g) for r in rels) for g in gens]
return gens[freqs.index(max(freqs))]
def random(self):
import random
r = self.free_group.identity
for i in range(random.randint(2,3)):
r = r*random.choice(self.generators)**random.choice([1,-1])
return r
def index(self, H, strategy="relator_based"):
"""
Return the index of subgroup ``H`` in group ``self``.
Examples
========
>>> from sympy.combinatorics import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**5, y**4, y*x*y**3*x**3])
>>> f.index([x])
4
"""
# TODO: use |G:H| = |G|/|H| (currently H can't be made into a group)
# when we know |G| and |H|
if H == []:
return self.order()
else:
C = self.coset_enumeration(H, strategy)
return len(C.table)
def __str__(self):
if self.free_group.rank > 30:
str_form = "<fp group with %s generators>" % self.free_group.rank
else:
str_form = "<fp group on the generators %s>" % str(self.generators)
return str_form
__repr__ = __str__
#==============================================================================
# PERMUTATION GROUP METHODS
#==============================================================================
def _to_perm_group(self):
'''
Return an isomorphic permutation group and the isomorphism.
The implementation is dependent on coset enumeration so
will only terminate for finite groups.
'''
from sympy.combinatorics import Permutation
from sympy.combinatorics.homomorphisms import homomorphism
if self.order() is S.Infinity:
raise NotImplementedError("Permutation presentation of infinite "
"groups is not implemented")
if self._perm_isomorphism:
T = self._perm_isomorphism
P = T.image()
else:
C = self.coset_table([])
gens = self.generators
images = [[C[i][2*gens.index(g)] for i in range(len(C))] for g in gens]
images = [Permutation(i) for i in images]
P = PermutationGroup(images)
T = homomorphism(self, P, gens, images, check=False)
self._perm_isomorphism = T
return P, T
def _perm_group_list(self, method_name, *args):
'''
Given the name of a `PermutationGroup` method (returning a subgroup
or a list of subgroups) and (optionally) additional arguments it takes,
return a list or a list of lists containing the generators of this (or
these) subgroups in terms of the generators of `self`.
'''
P, T = self._to_perm_group()
perm_result = getattr(P, method_name)(*args)
single = False
if isinstance(perm_result, PermutationGroup):
perm_result, single = [perm_result], True
result = []
for group in perm_result:
gens = group.generators
result.append(T.invert(gens))
return result[0] if single else result
def derived_series(self):
'''
Return the list of lists containing the generators
of the subgroups in the derived series of `self`.
'''
return self._perm_group_list('derived_series')
def lower_central_series(self):
'''
Return the list of lists containing the generators
of the subgroups in the lower central series of `self`.
'''
return self._perm_group_list('lower_central_series')
def center(self):
'''
Return the list of generators of the center of `self`.
'''
return self._perm_group_list('center')
def derived_subgroup(self):
'''
Return the list of generators of the derived subgroup of `self`.
'''
return self._perm_group_list('derived_subgroup')
def centralizer(self, other):
'''
Return the list of generators of the centralizer of `other`
(a list of elements of `self`) in `self`.
'''
T = self._to_perm_group()[1]
other = T(other)
return self._perm_group_list('centralizer', other)
def normal_closure(self, other):
'''
Return the list of generators of the normal closure of `other`
(a list of elements of `self`) in `self`.
'''
T = self._to_perm_group()[1]
other = T(other)
return self._perm_group_list('normal_closure', other)
def _perm_property(self, attr):
'''
Given an attribute of a `PermutationGroup`, return
its value for a permutation group isomorphic to `self`.
'''
P = self._to_perm_group()[0]
return getattr(P, attr)
@property
def is_abelian(self):
'''
Check if `self` is abelian.
'''
return self._perm_property("is_abelian")
@property
def is_nilpotent(self):
'''
Check if `self` is nilpotent.
'''
return self._perm_property("is_nilpotent")
@property
def is_solvable(self):
'''
Check if `self` is solvable.
'''
return self._perm_property("is_solvable")
@property
def elements(self):
'''
List the elements of `self`.
'''
P, T = self._to_perm_group()
return T.invert(P.elements)
@property
def is_cyclic(self):
"""
Return ``True`` if group is Cyclic.
"""
if len(self.generators) <= 1:
return True
try:
P, T = self._to_perm_group()
except NotImplementedError:
raise NotImplementedError("Check for infinite Cyclic group "
"is not implemented")
return P.is_cyclic
def abelian_invariants(self):
"""
Return Abelian Invariants of a group.
"""
try:
P, T = self._to_perm_group()
except NotImplementedError:
raise NotImplementedError("abelian invariants is not implemented"
"for infinite group")
return P.abelian_invariants()
def composition_series(self):
"""
Return subnormal series of maximum length for a group.
"""
try:
P, T = self._to_perm_group()
except NotImplementedError:
raise NotImplementedError("composition series is not implemented"
"for infinite group")
return P.composition_series()
| FpGroup |
python | scipy__scipy | scipy/stats/_multivariate.py | {
"start": 49867,
"end": 53946
} | class ____(multi_rv_frozen):
"""
Create a frozen matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
If `seed` is `None` the `~np.random.RandomState` singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used, seeded
with seed.
If `seed` is already a ``RandomState`` or ``Generator`` instance,
then that object is used.
Default is `None`.
Examples
--------
>>> import numpy as np
>>> from scipy.stats import matrix_normal
>>> distn = matrix_normal(mean=np.zeros((3,3)))
>>> X = distn.rvs(); X
array([[-0.02976962, 0.93339138, -0.09663178],
[ 0.67405524, 0.28250467, -0.93308929],
[-0.31144782, 0.74535536, 1.30412916]])
>>> distn.pdf(X)
2.5160642368346784e-05
>>> distn.logpdf(X)
-10.590229595124615
"""
__class_getitem__ = None
def __init__(self, mean=None, rowcov=1, colcov=1, seed=None):
self._dist = matrix_normal_gen(seed)
self.dims, self.mean, self.rowcov, self.colcov = \
self._dist._process_parameters(mean, rowcov, colcov)
self.rowpsd = _PSD(self.rowcov, allow_singular=False)
self.colpsd = _PSD(self.colcov, allow_singular=False)
def logpdf(self, X):
X = self._dist._process_quantiles(X, self.dims)
out = self._dist._logpdf(self.dims, X, self.mean, self.rowpsd.U,
self.rowpsd.log_pdet, self.colpsd.U,
self.colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X):
return np.exp(self.logpdf(X))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.rowcov, self.colcov, size,
random_state)
def entropy(self):
return self._dist._entropy(self.dims, self.rowpsd.log_pdet,
self.colpsd.log_pdet)
# Set frozen generator docstrings from corresponding docstrings in
# matrix_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'entropy']:
method = matrix_normal_gen.__dict__[name]
method_frozen = matrix_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__,
matnorm_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_params)
_matt_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default: `None`)
row_spread : array_like, optional
Row-wise 2nd order raw central moment matrix (default: ``1``)
col_spread : array_like, optional
Column-wise 2nd order raw central moment matrix (default: ``1``)
df : scalar, optional
Degrees of freedom (default: ``1``)
"""
_matt_doc_callparams_note = """\
If `mean` is set to `None` then a matrix of zeros is used for the mean.
The dimensions of this matrix are inferred from the shape of `row_spread` and
`col_spread`, if these are provided, or set to ``1`` if ambiguous.
`row_spread` and `col_spread` can be two-dimensional array_likes specifying the
spread matrices directly. Alternatively, a one-dimensional array will
be be interpreted as the entries of a diagonal matrix, and a scalar or
zero-dimensional array will be interpreted as this value times the
identity matrix.
"""
_matt_doc_frozen_callparams = ""
_matt_doc_frozen_callparams_note = """\
See class definition for a detailed description of parameters."""
matrix_t_docdict_params = {
"_matt_doc_default_callparams": _matt_doc_default_callparams,
"_matt_doc_callparams_note": _matt_doc_callparams_note,
"_doc_random_state": _doc_random_state,
}
matrix_t_docdict_noparams = {
"_matt_doc_default_callparams": _matt_doc_frozen_callparams,
"_matt_doc_callparams_note": _matt_doc_frozen_callparams_note,
"_doc_random_state": _doc_random_state,
}
| matrix_normal_frozen |
python | streamlit__streamlit | lib/streamlit/web/server/routes.py | {
"start": 1872,
"end": 4182
} | class ____(tornado.web.StaticFileHandler):
def initialize(
self,
path: str,
default_filename: str | None = None,
reserved_paths: Sequence[str] = (),
) -> None:
self._reserved_paths = reserved_paths
super().initialize(path, default_filename)
def set_extra_headers(self, path: str) -> None:
"""Disable cache for HTML files and manifest.json.
Other assets like JS and CSS are suffixed with their hash, so they can
be cached indefinitely.
"""
is_index_url = len(path) == 0
if is_index_url or NO_CACHE_PATTERN.search(path):
self.set_header("Cache-Control", "no-cache")
else:
# For all other static files suffixed with their hash, we set a long cache time.
self.set_header(
"Cache-Control",
f"public, immutable, max-age={STATIC_ASSET_CACHE_MAX_AGE_SECONDS}",
)
def validate_absolute_path(self, root: str, absolute_path: str) -> str | None:
try:
return super().validate_absolute_path(root, absolute_path)
except tornado.web.HTTPError as e:
# If the file is not found, and there are no reserved paths,
# we try to serve the default file and allow the frontend to handle the issue.
if e.status_code == 404:
url_path = self.path
# self.path is OS specific file path, we convert it to a URL path
# for checking it against reserved paths.
if os.path.sep != "/":
url_path = url_path.replace(os.path.sep, "/")
if any(url_path.endswith(x) for x in self._reserved_paths):
raise
self.path = self.parse_url_path(self.default_filename or "index.html")
absolute_path = self.get_absolute_path(self.root, self.path)
return super().validate_absolute_path(root, absolute_path)
raise
def write_error(self, status_code: int, **kwargs: Any) -> None:
if status_code == 404:
index_file = os.path.join(file_util.get_static_dir(), "index.html")
self.render(index_file)
else:
super().write_error(status_code, **kwargs)
| StaticFileHandler |
python | huggingface__transformers | src/transformers/models/bert_japanese/tokenization_bert_japanese.py | {
"start": 20675,
"end": 22920
} | class ____:
"""Runs basic tokenization with jumanpp morphological parser."""
def __init__(
self,
do_lower_case=False,
never_split=None,
normalize_text=True,
trim_whitespace=False,
):
"""
Constructs a JumanppTokenizer.
Args:
**do_lower_case**: (*optional*) boolean (default True)
Whether to lowercase the input.
**never_split**: (*optional*) list of str
Kept for backward compatibility purposes. Now implemented directly at the base class level (see
[`PreTrainedTokenizer.tokenize`]) List of tokens not to split.
**normalize_text**: (*optional*) boolean (default True)
Whether to apply unicode normalization to text before tokenization.
**trim_whitespace**: (*optional*) boolean (default False)
Whether to trim all whitespace, tab, newline from tokens.
"""
self.do_lower_case = do_lower_case
self.never_split = never_split if never_split is not None else []
self.normalize_text = normalize_text
self.trim_whitespace = trim_whitespace
try:
import rhoknp
except ImportError:
raise ImportError(
"You need to install rhoknp to use JumanppTokenizer. "
"See https://github.com/ku-nlp/rhoknp for installation."
)
self.juman = rhoknp.Jumanpp()
def tokenize(self, text, never_split=None, **kwargs):
"""Tokenizes a piece of text."""
if self.normalize_text:
text = unicodedata.normalize("NFKC", text)
text = text.strip()
never_split = self.never_split + (never_split if never_split is not None else [])
tokens = []
for mrph in self.juman.apply_to_sentence(text).morphemes:
token = mrph.text
if self.do_lower_case and token not in never_split:
token = token.lower()
if self.trim_whitespace:
if token.strip() == "":
continue
else:
token = token.strip()
tokens.append(token)
return tokens
| JumanppTokenizer |
python | kubernetes-client__python | kubernetes/client/models/v1_non_resource_attributes.py | {
"start": 383,
"end": 4199
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'path': 'str',
'verb': 'str'
}
attribute_map = {
'path': 'path',
'verb': 'verb'
}
def __init__(self, path=None, verb=None, local_vars_configuration=None): # noqa: E501
"""V1NonResourceAttributes - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._path = None
self._verb = None
self.discriminator = None
if path is not None:
self.path = path
if verb is not None:
self.verb = verb
@property
def path(self):
"""Gets the path of this V1NonResourceAttributes. # noqa: E501
Path is the URL path of the request # noqa: E501
:return: The path of this V1NonResourceAttributes. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this V1NonResourceAttributes.
Path is the URL path of the request # noqa: E501
:param path: The path of this V1NonResourceAttributes. # noqa: E501
:type: str
"""
self._path = path
@property
def verb(self):
"""Gets the verb of this V1NonResourceAttributes. # noqa: E501
Verb is the standard HTTP verb # noqa: E501
:return: The verb of this V1NonResourceAttributes. # noqa: E501
:rtype: str
"""
return self._verb
@verb.setter
def verb(self, verb):
"""Sets the verb of this V1NonResourceAttributes.
Verb is the standard HTTP verb # noqa: E501
:param verb: The verb of this V1NonResourceAttributes. # noqa: E501
:type: str
"""
self._verb = verb
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1NonResourceAttributes):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1NonResourceAttributes):
return True
return self.to_dict() != other.to_dict()
| V1NonResourceAttributes |
python | google__jax | tests/pallas/tpu_sparsecore_pallas_debug_check_test.py | {
"start": 1419,
"end": 5140
} | class ____(jtu.JaxTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
total_shards = int(os.environ.get("TEST_TOTAL_SHARDS", -1))
if total_shards == -1:
raise unittest.SkipTest("Tests can only be run with Bazel.")
loader = unittest.TestLoader()
test_cases = loader.loadTestsFromModule(
sys.modules['__main__']
).countTestCases()
if test_cases > total_shards:
raise RuntimeError(
"Each test with a failing assertion should be in a separate test"
" shard because they put the hardware in a halt state, causing"
" subsequent tests to fail. Make sure sharding is enabled and the"
f" shard count is at least {test_cases}."
)
def setUp(self):
if not jtu.is_device_tpu(5, "p") and not jtu.is_device_tpu_at_least(6):
self.skipTest("SparseCore only supported on TPU v5p+")
super().setUp()
def test_scalar_debug_check(self):
if not jtu.is_device_tpu_at_least(6):
# TODO: b/436509694 - Figure out why the test gets stuck on v5p.
self.skipTest("")
x = jnp.arange(8)
@pl.kernel(
out_shape=x,
mesh=plsc.ScalarSubcoreMesh(axis_name="core", num_cores=1),
)
def kernel(o_hbm_ref):
@functools.partial(
pl.run_scoped,
sem=pltpu.SemaphoreType.DMA,
)
def _(sem):
pltpu.async_copy(o_hbm_ref, o_hbm_ref, sem).wait()
pl.debug_check(True, "Check success!")
pl.debug_check(False, "Check failure!")
with pl.enable_debug_checks(), self.assertRaises(
jax.errors.JaxRuntimeError
) as error:
jax.block_until_ready(kernel())
self.assertNotIn("Check success!", str(error.exception))
self.assertIn("Check failure!", str(error.exception))
self.assertIn(
"check at DebugCheckTest.test_scalar_debug_check", str(error.exception)
)
def test_vector_debug_check(self):
x = jnp.arange(8)
@functools.partial(
pl.pallas_call,
out_shape=x,
compiler_params=pltpu.CompilerParams(
kernel_type=pltpu.KernelType.SC_VECTOR_SUBCORE
),
)
def kernel(_):
pl.debug_check(True, "Check success!")
pl.debug_check(False, "Check failure!")
with pl.enable_debug_checks(), self.assertRaises(
jax.errors.JaxRuntimeError
) as error:
jax.block_until_ready(kernel())
self.assertNotIn("Check success!", str(error.exception))
self.assertIn("Check failure!", str(error.exception))
self.assertIn(
"check at DebugCheckTest.test_vector_debug_check", str(error.exception)
)
def test_trigger_bounds_checker(self):
if "xla_sc_assert_level" in flags.FLAGS:
# The test crashes the process anyway, so no need to be clean.
flags.FLAGS.xla_sc_assert_level = "all-loads-stores"
else:
self.skipTest("TODO: Find another way to enable bounds checking.")
x = jnp.arange(8, dtype=jnp.int32)
# Index 8 is out-of-bounds.
indices = jnp.array([0, 1, 2, 3, 4, 5, 6, 8], dtype=jnp.int32)
@functools.partial(
pl.pallas_call,
out_shape=x,
compiler_params=pltpu.CompilerParams(
kernel_type=pltpu.KernelType.SC_VECTOR_SUBCORE
),
)
def kernel(x_ref, indices_ref, o_ref):
o_ref[...] = plsc.load_gather(x_ref, [indices_ref[...]])
# We expect this to fail with a runtime error from the bounds checker.
with self.assertRaisesRegex(
jax.errors.JaxRuntimeError,
"Trying to perform an indexed vector load from out of bounds address.",
):
jax.block_until_ready(kernel(x, indices))
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| DebugCheckTest |
python | huggingface__transformers | src/transformers/models/owlv2/modeling_owlv2.py | {
"start": 16683,
"end": 17982
} | class ____(nn.Module):
def __init__(self, config: Owlv2TextConfig):
super().__init__()
self.token_embedding = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embedding = nn.Embedding(config.max_position_embeddings, config.hidden_size)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
) -> torch.Tensor:
seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if inputs_embeds is None:
inputs_embeds = self.token_embedding(input_ids)
position_embeddings = self.position_embedding(position_ids)
embeddings = inputs_embeds + position_embeddings
return embeddings
# Copied from transformers.models.owlvit.modeling_owlvit.OwlViTAttention with OwlViT->Owlv2
| Owlv2TextEmbeddings |
python | kubernetes-client__python | kubernetes/base/dynamic/exceptions.py | {
"start": 3353,
"end": 3415
} | class ____(DynamicApiError):
""" 410: StatusGone """
| GoneError |
python | skorch-dev__skorch | skorch/tests/callbacks/test_training.py | {
"start": 463,
"end": 18033
} | class ____:
@pytest.fixture
def checkpoint_cls(self):
from skorch.callbacks import Checkpoint
return Checkpoint
@pytest.fixture
def save_params_mock(self):
with patch('skorch.NeuralNet.save_params') as mock:
yield mock
@pytest.fixture(params=['torch', 'safetensors'])
def use_safetensors(self, request):
return request.param == 'safetensors'
@pytest.fixture
def pickle_dump_mock(self):
with patch('pickle.dump') as mock:
yield mock
@pytest.fixture
def net_cls(self):
"""very simple network that trains for 10 epochs"""
from skorch import NeuralNetRegressor
from skorch.toy import make_regressor
module_cls = make_regressor(
input_units=1,
num_hidden=0,
output_units=1,
)
return partial(
NeuralNetRegressor,
module=module_cls,
max_epochs=10,
batch_size=10)
@pytest.fixture(scope='module')
def data(self):
# have 10 examples so we can do a nice CV split
X = np.zeros((10, 1), dtype='float32')
y = np.zeros((10, 1), dtype='float32')
return X, y
def test_init_with_wrong_kwarg_name_raises(self, checkpoint_cls):
checkpoint_cls(f_foobar='foobar.pt').initialize() # works
msg = "Checkpoint got an unexpected argument 'foobar', did you mean 'f_foobar'?"
with pytest.raises(TypeError, match=msg):
checkpoint_cls(foobar='foobar.pt').initialize()
def test_init_with_f_params_and_f_module_raises(self, checkpoint_cls):
msg = "Checkpoint called with both f_params and f_module, please choose one"
with pytest.raises(TypeError, match=msg):
checkpoint_cls(f_module='weights.pt', f_params='params.pt').initialize()
def test_init_with_f_optimizer_and_safetensors_raises(self, checkpoint_cls):
msg = (
"Cannot save optimizer state when using safetensors, "
"please set f_optimizer=None or don't use safetensors."
)
with pytest.raises(ValueError, match=msg):
checkpoint_cls(f_optimizer='optimizer.safetensors', use_safetensors=True)
def test_none_monitor_saves_always(
self, save_params_mock, net_cls, checkpoint_cls, data):
sink = Mock()
net = net_cls(callbacks=[
checkpoint_cls(monitor=None, sink=sink,
event_name='event_another'),
])
net.fit(*data)
assert save_params_mock.call_count == 4 * len(net.history)
assert sink.call_count == len(net.history)
assert all((x is True) for x in net.history[:, 'event_another'])
@pytest.mark.parametrize('message,files', [
('Unable to save module state to params.pt, '
'Exception: encoding error',
{
'f_params': 'params.pt',
'f_optimizer': None,
'f_criterion': None,
'f_history': None,
}),
('Unable to save optimizer state to optimizer.pt, '
'Exception: encoding error',
{
'f_params': None,
'f_optimizer': 'optimizer.pt',
'f_criterion': None,
'f_history': None,
}),
('Unable to save criterion state to criterion.pt, '
'Exception: encoding error',
{
'f_params': None,
'f_optimizer': None,
'f_criterion': 'criterion.pt',
'f_history': None,
}),
('Unable to save history to history.json, '
'Exception: encoding error',
{
'f_params': None,
'f_optimizer': None,
'f_criterion': None,
'f_history': 'history.json',
}),
])
def test_outputs_to_sink_when_save_params_errors(
self, save_params_mock, net_cls, checkpoint_cls, data,
message, files):
sink = Mock()
save_params_mock.side_effect = Exception('encoding error')
net = net_cls(callbacks=[
checkpoint_cls(monitor=None, sink=sink, **files)
])
net.fit(*data)
assert save_params_mock.call_count == len(net.history)
assert sink.call_count == 2*len(net.history)
save_error_messages = [call(message)] * len(net.history)
sink.assert_has_calls(save_error_messages, any_order=True)
@pytest.mark.parametrize('f_name, mode', [
('f_params', 'w'),
('f_optimizer', 'w'),
('f_criterion', 'w'),
('f_history', 'w'),
('f_pickle', 'wb')
])
def test_init_with_dirname_and_file_like_object_error(
self, checkpoint_cls, tmpdir, f_name, mode):
from skorch.exceptions import SkorchException
skorch_dir = tmpdir.mkdir("skorch")
exp_dir = skorch_dir.join("exp1")
f = skorch_dir.join(f_name + ".pt")
with f.open(mode) as fp:
with pytest.raises(SkorchException) as e:
checkpoint_cls(**{f_name: fp}, dirname=str(exp_dir))
expected = "dirname can only be used when f_* are strings"
assert str(e.value) == expected
@pytest.mark.parametrize('f_name, mode', [
('f_params', 'w'),
('f_optimizer', 'w'),
('f_criterion', 'w'),
('f_history', 'w'),
('f_pickle', 'wb')
])
def test_initialize_with_dirname_and_file_like_object_error(
self, checkpoint_cls, tmpdir, f_name, mode):
from skorch.exceptions import SkorchException
skorch_dir = tmpdir.mkdir("skorch")
exp_dir = skorch_dir.join("exp1")
f = skorch_dir.join(f_name + ".pt")
with f.open(mode) as fp:
with pytest.raises(SkorchException) as e:
cp = checkpoint_cls(dirname=str(exp_dir))
setattr(cp, f_name, fp)
cp.initialize()
expected = "dirname can only be used when f_* are strings"
assert str(e.value) == expected
def test_default_without_validation_raises_meaningful_error(
self, net_cls, checkpoint_cls, data):
net = net_cls(
callbacks=[
checkpoint_cls(),
],
train_split=None
)
from skorch.exceptions import SkorchException
msg_expected = (
r"Key 'valid_loss_best' was not found in history. "
r"Make sure you have validation data if you use "
r"validation scores for checkpointing."
)
with pytest.raises(SkorchException, match=msg_expected):
net.fit(*data)
def test_string_monitor_and_formatting(
self, save_params_mock, net_cls, checkpoint_cls, data):
def epoch_3_scorer(net, *_):
return 1 if net.history[-1, 'epoch'] == 3 else 0
from skorch.callbacks import EpochScoring
scoring = EpochScoring(
scoring=epoch_3_scorer, on_train=True, lower_is_better=False)
sink = Mock()
cb = checkpoint_cls(
monitor='epoch_3_scorer_best',
f_params='model_{last_epoch[epoch]}_{net.max_epochs}.pt',
f_optimizer='optimizer_{last_epoch[epoch]}_{net.max_epochs}.pt',
f_criterion='criterion_{last_epoch[epoch]}_{net.max_epochs}.pt',
sink=sink)
net = net_cls(callbacks=[
('my_score', scoring), cb
])
net.fit(*data)
assert save_params_mock.call_count == 8
assert cb.get_formatted_files(net) == {
'f_params': 'model_3_10.pt',
'f_optimizer': 'optimizer_3_10.pt',
'f_criterion': 'criterion_3_10.pt',
'f_history': 'history.json',
'f_pickle': None
}
save_params_mock.assert_has_calls(
[
# params is turned into module
call(f_module='model_1_10.pt', use_safetensors=False),
call(f_optimizer='optimizer_1_10.pt', use_safetensors=False),
call(f_criterion='criterion_1_10.pt', use_safetensors=False),
call(f_history='history.json', use_safetensors=False),
# params is turned into module
call(f_module='model_3_10.pt', use_safetensors=False),
call(f_optimizer='optimizer_3_10.pt', use_safetensors=False),
call(f_criterion='criterion_3_10.pt', use_safetensors=False),
call(f_history='history.json', use_safetensors=False),
],
any_order=True,
)
assert sink.call_count == 2
# The first epoch will always be saved. `epoch_3_scorer` returns 1 at
# epoch 3, which will trigger another checkpoint. For all other epochs
# `epoch_3_scorer` returns 0, which does not trigger a checkpoint.
assert [True, False, True] + [False] * 7 == net.history[:, 'event_cp']
def test_save_all_targets(
self, save_params_mock, pickle_dump_mock,
net_cls, checkpoint_cls, data, use_safetensors):
kwargs = dict(
monitor=None,
f_params='params.pt',
f_pickle='model.pkl',
f_optimizer='optimizer.pt',
f_criterion='criterion.pt',
f_history='history.json',
use_safetensors=use_safetensors,
)
if use_safetensors:
# safetensors cannot safe optimizers
kwargs['f_optimizer'] = None
net = net_cls(callbacks=[checkpoint_cls(**kwargs)])
net.fit(*data)
if use_safetensors:
# no optimizer
assert save_params_mock.call_count == 3 * len(net.history)
else:
assert save_params_mock.call_count == 4 * len(net.history)
assert pickle_dump_mock.call_count == len(net.history)
kwargs = {'use_safetensors': use_safetensors}
calls_expected = [
call(f_module='params.pt', **kwargs), # params is turned into module
call(f_criterion='criterion.pt', **kwargs),
call(f_history='history.json', **kwargs),
]
if not use_safetensors:
# safetensors cannot safe optimizers
calls_expected.append(call(f_optimizer='optimizer.pt', **kwargs))
save_params_mock.assert_has_calls(
calls_expected * len(net.history),
any_order=True,
)
def test_save_all_targets_with_prefix(
self, save_params_mock, pickle_dump_mock,
net_cls, checkpoint_cls, data, use_safetensors):
kwargs = dict(
monitor=None,
f_params='params.pt',
f_pickle='model.pkl',
f_optimizer='optimizer.pt',
f_criterion='criterion.pt',
f_history='history.json',
use_safetensors=use_safetensors,
fn_prefix="exp1_",
)
if use_safetensors:
# safetensors cannot safe optimizers
kwargs['f_optimizer'] = None
cp = checkpoint_cls(**kwargs)
net = net_cls(callbacks=[cp])
net.fit(*data)
assert cp.f_history_ == "exp1_history.json"
if use_safetensors:
assert save_params_mock.call_count == 3 * len(net.history)
else:
assert save_params_mock.call_count == 4 * len(net.history)
assert pickle_dump_mock.call_count == len(net.history)
kwargs = {'use_safetensors': use_safetensors}
calls_expected = [
call(f_module='exp1_params.pt', **kwargs),
call(f_criterion='exp1_criterion.pt', **kwargs),
call(f_history='exp1_history.json', **kwargs),
]
if not use_safetensors:
# safetensors cannot safe optimizers
calls_expected.append(call(f_optimizer='exp1_optimizer.pt', **kwargs))
save_params_mock.assert_has_calls(
calls_expected * len(net.history),
any_order=True,
)
def test_save_all_targets_with_prefix_and_dirname(
self, save_params_mock, pickle_dump_mock,
net_cls, checkpoint_cls, data, tmpdir, use_safetensors):
skorch_dir = tmpdir.mkdir('skorch').join('exp1')
kwargs = dict(
monitor=None,
f_params='params.pt',
f_history='history.json',
f_pickle='model.pkl',
f_optimizer='optimizer.pt',
f_criterion='criterion.pt',
fn_prefix="unet_",
dirname=str(skorch_dir),
use_safetensors=use_safetensors,
)
if use_safetensors:
# safetensors cannot safe optimizers
kwargs['f_optimizer'] = None
cp = checkpoint_cls(**kwargs)
net = net_cls(callbacks=[cp])
net.fit(*data)
f_params = skorch_dir.join('unet_params.pt')
f_optimizer = skorch_dir.join('unet_optimizer.pt')
f_criterion = skorch_dir.join('unet_criterion.pt')
f_history = skorch_dir.join('unet_history.json')
assert cp.f_history_ == str(f_history)
if use_safetensors:
assert save_params_mock.call_count == 3 * len(net.history)
else:
assert save_params_mock.call_count == 4 * len(net.history)
assert pickle_dump_mock.call_count == len(net.history)
kwargs = {'use_safetensors': use_safetensors}
calls_expected = [
call(f_module=str(f_params), **kwargs), # params is turned into module
call(f_criterion=str(f_criterion), **kwargs),
call(f_history=str(f_history), **kwargs),
]
if not use_safetensors:
calls_expected.append(call(f_optimizer=str(f_optimizer), **kwargs))
save_params_mock.assert_has_calls(
calls_expected * len(net.history),
any_order=True,
)
assert skorch_dir.exists()
def test_save_no_targets(
self, save_params_mock, pickle_dump_mock,
net_cls, checkpoint_cls, data):
net = net_cls(callbacks=[
checkpoint_cls(
monitor=None,
f_params=None,
f_optimizer=None,
f_criterion=None,
f_history=None,
f_pickle=None,
),
])
net.fit(*data)
assert save_params_mock.call_count == 0
assert pickle_dump_mock.call_count == 0
def test_warnings_when_monitor_appears_in_history(
self, net_cls, checkpoint_cls, save_params_mock, data):
net = net_cls(
callbacks=[checkpoint_cls(monitor="valid_loss")],
max_epochs=1)
exp_warn = (
"Checkpoint monitor parameter is set to 'valid_loss' and the "
"history contains 'valid_loss_best'. Perhaps you meant to set the "
"parameter to 'valid_loss_best'")
with pytest.warns(UserWarning, match=exp_warn):
net.fit(*data)
assert save_params_mock.call_count == 4
def test_save_custom_module(
self, save_params_mock, module_cls, checkpoint_cls, data, use_safetensors
):
# checkpointing custom modules works
from skorch import NeuralNetRegressor
class MyNet(NeuralNetRegressor):
"""Net with custom module"""
def __init__(self, *args, mymodule=module_cls, **kwargs):
self.mymodule = mymodule
super().__init__(*args, **kwargs)
def initialize_module(self, *args, **kwargs):
super().initialize_module(*args, **kwargs)
params = self.get_params_for('mymodule')
self.mymodule_ = self.mymodule(**params)
return self
cp = checkpoint_cls(
monitor=None,
f_params=None,
f_optimizer=None,
f_criterion=None,
f_history=None,
f_mymodule='mymodule.pt',
use_safetensors=use_safetensors,
)
net = MyNet(module_cls, callbacks=[cp])
net.fit(*data)
assert save_params_mock.call_count == 1 * len(net.history)
kwargs = {'use_safetensors': use_safetensors}
save_params_mock.assert_has_calls(
[call(f_mymodule='mymodule.pt', **kwargs)] * len(net.history)
)
@pytest.fixture
def load_params(self):
import torch
return torch.load
@pytest.mark.parametrize('load_best_flag', [False, True])
def test_automatically_load_checkpoint(
self, net_cls, checkpoint_cls, data, tmp_path,
load_params, load_best_flag,
):
# checkpoint once at the beginning of training.
# when restoring at the end of training, the parameters
# of the net should not differ. If we do not restore
# then the parameters must differ.
path_cb = tmp_path / 'params_cb.pt'
path_net = tmp_path / 'params_net.pt'
def save_once_monitor(net):
return len(net.history) == 1
net = net_cls(
max_epochs=3,
callbacks=[
checkpoint_cls(
monitor=save_once_monitor,
f_params=path_cb,
load_best=load_best_flag,
),
],
)
net.fit(*data)
net.save_params(path_net)
params_cb = load_params(path_cb)
params_net = load_params(path_net)
if load_best_flag:
assert params_cb == params_net
else:
assert params_cb != params_net
| TestCheckpoint |
python | getsentry__sentry | src/sentry/integrations/slack/webhooks/command.py | {
"start": 2662,
"end": 6986
} | class ____(SlackDMEndpoint):
owner = ApiOwner.ECOSYSTEM
publish_status = {
"POST": ApiPublishStatus.PRIVATE,
}
authentication_classes = ()
permission_classes = ()
slack_request_class = SlackCommandRequest
def reply(self, slack_request: SlackDMRequest, message: str) -> Response:
return self.respond(
{
"response_type": "ephemeral",
"replace_original": False,
"text": message,
}
)
def link_team(self, slack_request: SlackDMRequest) -> Response:
if slack_request.channel_name == DIRECT_MESSAGE_CHANNEL_NAME:
return self.reply(slack_request, LINK_FROM_CHANNEL_MESSAGE)
logger_params: dict[str, int] = {}
identity_user = slack_request.get_identity_user()
if not identity_user:
_logger.info("no-identity-user", extra=logger_params)
return self.reply(slack_request, LINK_USER_FIRST_MESSAGE)
integration = slack_request.integration
logger_params["integration_id"] = integration.id
organization_memberships = OrganizationMember.objects.get_for_integration(
integration, identity_user
)
has_valid_role = False
for organization_membership in organization_memberships:
if is_valid_role(organization_membership) or is_team_admin(organization_membership):
has_valid_role = True
if not has_valid_role:
return self.reply(slack_request, INSUFFICIENT_ROLE_MESSAGE)
if not slack_request.user_id:
return self.reply(slack_request, NO_USER_ID_MESSAGE)
if not slack_request.channel_id:
return self.reply(slack_request, NO_CHANNEL_ID_MESSAGE)
associate_url = build_team_linking_url(
integration=integration,
slack_id=slack_request.user_id,
channel_id=slack_request.channel_id,
channel_name=slack_request.channel_name,
response_url=slack_request.response_url,
)
return self.reply(slack_request, LINK_TEAM_MESSAGE.format(associate_url=associate_url))
def unlink_team(self, slack_request: SlackDMRequest) -> Response:
if slack_request.channel_name == DIRECT_MESSAGE_CHANNEL_NAME:
return self.reply(slack_request, LINK_FROM_CHANNEL_MESSAGE)
identity_user = slack_request.get_identity_user()
if not identity_user:
return self.reply(slack_request, LINK_USER_FIRST_MESSAGE)
integration = slack_request.integration
organization_memberships = OrganizationMember.objects.get_for_integration(
integration, identity_user
)
found: OrganizationMember | None = None
for organization_membership in organization_memberships:
if is_team_linked_to_channel(organization_membership.organization, slack_request):
found = organization_membership
if not found:
return self.reply(slack_request, TEAM_NOT_LINKED_MESSAGE)
if not is_valid_role(found) and not is_team_admin(found):
return self.reply(slack_request, INSUFFICIENT_ROLE_MESSAGE)
if not slack_request.user_id:
return self.reply(slack_request, NO_USER_ID_MESSAGE)
if not slack_request.channel_id:
return self.reply(slack_request, NO_CHANNEL_ID_MESSAGE)
associate_url = build_team_unlinking_url(
integration=integration,
organization_id=found.organization.id,
slack_id=slack_request.user_id,
channel_id=slack_request.channel_id,
channel_name=slack_request.channel_name,
response_url=slack_request.response_url,
)
return self.reply(slack_request, UNLINK_TEAM_MESSAGE.format(associate_url=associate_url))
def post(self, request: Request) -> Response:
try:
slack_request = self.slack_request_class(request)
slack_request.validate()
except SlackRequestError as e:
if e.status == status.HTTP_403_FORBIDDEN:
return self.respond(SlackDisconnectedMessageBuilder().build())
return self.respond(status=e.status)
return super().post_dispatcher(slack_request)
| SlackCommandsEndpoint |
python | readthedocs__readthedocs.org | readthedocs/organizations/tests/test_forms.py | {
"start": 479,
"end": 1126
} | class ____(TestCase):
def setUp(self):
self.owner = fixture.get(User)
self.user = fixture.get(User)
self.project = fixture.get(Project)
self.organization = fixture.get(
Organization,
name="Mozilla",
slug="mozilla",
owners=[self.owner],
projects=[self.project],
stripe_id="1234",
)
self.team = fixture.get(
Team,
name="foobar",
slug="foobar",
access="admin",
organization=self.organization,
)
self.client.force_login(self.owner)
| OrganizationTestCase |
python | getsentry__sentry | tests/sentry/issues/endpoints/test_group.py | {
"start": 455,
"end": 3015
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.permission = GroupAiPermission()
self.project = self.create_project()
self.group = self.create_group(project=self.project)
self.demo_user = self.create_user()
def _demo_mode_enabled(self) -> ContextManager[None]:
return override_options({"demo-mode.enabled": True, "demo-mode.users": [self.demo_user.id]})
def has_object_perm(
self,
method: str,
obj: Group,
auth: ApiToken | None = None,
user: User | None = None,
is_superuser: bool | None = None,
) -> bool:
request = self.make_request(user=user, auth=auth, method=method, is_superuser=is_superuser)
drf_request = drf_request_from_request(request)
return self.permission.has_permission(
drf_request, APIView()
) and self.permission.has_object_permission(drf_request, APIView(), obj)
def test_demo_user_safe_methods(self) -> None:
with self._demo_mode_enabled():
for method in ("GET", "HEAD", "OPTIONS"):
assert self.has_object_perm(method, self.group, user=self.demo_user)
def test_demo_user_post_allowed(self) -> None:
with self._demo_mode_enabled():
assert self.has_object_perm("POST", self.group, user=self.demo_user)
def test_demo_user_unsafe_methods_blocked(self) -> None:
with self._demo_mode_enabled():
for method in ("PUT", "DELETE", "PATCH"):
assert not self.has_object_perm(method, self.group, user=self.demo_user)
def test_demo_user_demo_mode_disabled(self) -> None:
for method in ("GET", "POST", "PUT", "DELETE"):
assert not self.has_object_perm(method, self.group, user=self.demo_user)
def test_regular_user_with_access(self) -> None:
user = self.create_user()
self.create_member(
user=user,
organization=self.project.organization,
role="member",
teams=[self.project.teams.first()],
)
assert self.has_object_perm("GET", self.group, user=user)
assert self.has_object_perm("POST", self.group, user=user)
assert self.has_object_perm("DELETE", self.group, user=user)
def test_superuser_access(self) -> None:
superuser = self.create_user(is_superuser=True)
for method in ("GET", "POST", "PUT", "DELETE"):
assert self.has_object_perm(method, self.group, user=superuser, is_superuser=True)
| GroupAiPermissionTest |
python | dateutil__dateutil | src/dateutil/parser/_parser.py | {
"start": 58000,
"end": 58616
} | class ____(ValueError):
"""Exception subclass used for any failure to parse a datetime string.
This is a subclass of :py:exc:`ValueError`, and should be raised any time
earlier versions of ``dateutil`` would have raised ``ValueError``.
.. versionadded:: 2.8.1
"""
def __str__(self):
try:
return self.args[0] % self.args[1:]
except (TypeError, IndexError):
return super(ParserError, self).__str__()
def __repr__(self):
args = ", ".join("'%s'" % arg for arg in self.args)
return "%s(%s)" % (self.__class__.__name__, args)
| ParserError |
python | getsentry__sentry | tests/sentry/incidents/test_logic.py | {
"start": 135496,
"end": 136086
} | class ____(BaseAlertRuleTriggerActionTest):
@cached_property
def action(self):
return create_alert_rule_trigger_action(
self.trigger,
AlertRuleTriggerAction.Type.EMAIL,
AlertRuleTriggerAction.TargetType.USER,
target_identifier=str(self.user.id),
)
def test(self) -> None:
action_id = self.action.id
delete_alert_rule_trigger_action(self.action)
with pytest.raises(AlertRuleTriggerAction.DoesNotExist):
AlertRuleTriggerAction.objects.get(id=action_id)
| DeleteAlertRuleTriggerAction |
python | rq__rq | tests/test_registry.py | {
"start": 810,
"end": 9772
} | class ____(RQTestCase):
"""Test all the BaseRegistry functionality"""
def setUp(self):
super().setUp()
self.registry = BaseRegistry(connection=self.connection)
def test_init(self):
"""Registry can be instantiated with queue or name/Redis connection"""
queue = Queue('foo', connection=self.connection)
registry = BaseRegistry(queue=queue)
self.assertEqual(registry.name, queue.name)
self.assertEqual(registry.connection, queue.connection)
self.assertEqual(registry.serializer, queue.serializer)
registry = BaseRegistry('bar', self.connection, serializer=JSONSerializer)
self.assertEqual(registry.name, 'bar')
self.assertEqual(registry.connection, self.connection)
self.assertEqual(registry.serializer, JSONSerializer)
def test_key(self):
self.assertEqual(self.registry.key, 'rq:registry:default')
def test_custom_job_class(self):
registry = BaseRegistry(job_class=CustomJob)
self.assertIsNot(registry.job_class, self.registry.job_class)
def test_contains(self):
queue = Queue(connection=self.connection)
job = queue.enqueue(say_hello)
self.assertNotIn(job, self.registry)
self.assertNotIn(job.id, self.registry)
self.registry.add(job, 5)
self.assertIn(job, self.registry)
self.assertIn(job.id, self.registry)
def test_get_expiration_time(self):
"""registry.get_expiration_time() returns correct datetime objects"""
queue = Queue(connection=self.connection)
job = queue.enqueue(say_hello)
self.registry.add(job, 5)
time = self.registry.get_expiration_time(job)
expected_time = (now() + timedelta(seconds=5)).replace(microsecond=0)
self.assertGreaterEqual(time, expected_time - timedelta(seconds=2))
self.assertLessEqual(time, expected_time + timedelta(seconds=2))
def test_add_and_remove(self):
"""Adding and removing job from BaseRegistry."""
timestamp = current_timestamp()
queue = Queue(connection=self.connection)
job = queue.enqueue(say_hello)
# Test that job is added with the right score
self.registry.add(job, 1000)
self.assertLess(self.connection.zscore(self.registry.key, job.id), timestamp + 1002)
# Ensure that a timeout of -1 results in a score of inf
self.registry.add(job, -1)
self.assertEqual(self.connection.zscore(self.registry.key, job.id), float('inf'))
# Ensure that job is removed from sorted set, but job key is not deleted
self.registry.remove(job)
self.assertIsNone(self.connection.zscore(self.registry.key, job.id))
self.assertTrue(self.connection.exists(job.key))
self.registry.add(job, -1)
# registry.remove() also accepts job.id
self.registry.remove(job.id)
self.assertIsNone(self.connection.zscore(self.registry.key, job.id))
self.registry.add(job, -1)
# delete_job = True deletes job key
self.registry.remove(job, delete_job=True)
self.assertIsNone(self.connection.zscore(self.registry.key, job.id))
self.assertFalse(self.connection.exists(job.key))
job = queue.enqueue(say_hello)
self.registry.add(job, -1)
# delete_job = True also works with job.id
self.registry.remove(job.id, delete_job=True)
self.assertIsNone(self.connection.zscore(self.registry.key, job.id))
self.assertFalse(self.connection.exists(job.key))
def test_add_and_remove_with_serializer(self):
"""Adding and removing job from BaseRegistry (with serializer)."""
# delete_job = True also works with job.id and custom serializer
queue = Queue(connection=self.connection, serializer=JSONSerializer)
registry = BaseRegistry(connection=self.connection, serializer=JSONSerializer)
job = queue.enqueue(say_hello)
registry.add(job, -1)
registry.remove(job.id, delete_job=True)
self.assertIsNone(self.connection.zscore(registry.key, job.id))
self.assertFalse(self.connection.exists(job.key))
def test_get_job_ids(self):
"""Getting job ids from BaseRegistry."""
timestamp = current_timestamp()
self.connection.zadd(self.registry.key, {'will-be-cleaned-up': 1})
self.connection.zadd(self.registry.key, {'foo': timestamp + 10})
self.connection.zadd(self.registry.key, {'bar': timestamp + 20})
self.assertEqual(self.registry.get_job_ids(), ['will-be-cleaned-up', 'foo', 'bar'])
def test_get_expired_job_ids(self):
"""Getting expired job ids form BaseRegistry."""
timestamp = current_timestamp()
self.connection.zadd(self.registry.key, {'foo': 1})
self.connection.zadd(self.registry.key, {'bar': timestamp + 10})
self.connection.zadd(self.registry.key, {'baz': timestamp + 30})
self.assertEqual(self.registry.get_expired_job_ids(), ['foo'])
self.assertEqual(self.registry.get_expired_job_ids(timestamp + 20), ['foo', 'bar'])
# CanceledJobRegistry does not implement get_expired_job_ids()
registry = CanceledJobRegistry(connection=self.connection)
self.assertRaises(NotImplementedError, registry.get_expired_job_ids)
def test_count(self):
"""BaseRegistry returns the right number of job count."""
timestamp = current_timestamp() + 10
self.connection.zadd(self.registry.key, {'will-be-cleaned-up': 1})
self.connection.zadd(self.registry.key, {'foo': timestamp})
self.connection.zadd(self.registry.key, {'bar': timestamp})
self.assertEqual(self.registry.count, 3)
self.assertEqual(len(self.registry), 3)
def test_get_job_count(self):
"""Ensure cleanup is not called and does not affect the reported number of jobs.
Note, the original motivation to stop calling cleanup was to make the count operation O(1) to allow usage of
monitoring tools and avoid side effects of failure callbacks that cleanup triggers.
"""
timestamp = current_timestamp() + 10
self.connection.zadd(self.registry.key, {'will-be-counted-despite-outdated': 1})
self.connection.zadd(self.registry.key, {'foo': timestamp})
self.connection.zadd(self.registry.key, {'bar': timestamp})
with mock.patch.object(self.registry, 'cleanup') as mock_cleanup:
self.assertEqual(self.registry.get_job_count(cleanup=False), 3)
mock_cleanup.assert_not_called()
def test_clean_registries(self):
"""clean_registries() cleans Started and Finished job registries."""
queue = Queue(connection=self.connection)
finished_job_registry = FinishedJobRegistry(connection=self.connection)
self.connection.zadd(finished_job_registry.key, {'foo': 1})
started_job_registry = StartedJobRegistry(connection=self.connection)
self.connection.zadd(started_job_registry.key, {'foo:execution_id': 1})
failed_job_registry = FailedJobRegistry(connection=self.connection)
self.connection.zadd(failed_job_registry.key, {'foo': 1})
clean_registries(queue)
self.assertEqual(self.connection.zcard(finished_job_registry.key), 0)
self.assertEqual(self.connection.zcard(started_job_registry.key), 0)
self.assertEqual(self.connection.zcard(failed_job_registry.key), 0)
def test_clean_registries_with_serializer(self):
"""clean_registries() cleans Started and Finished job registries (with serializer)."""
queue = Queue(connection=self.connection, serializer=JSONSerializer)
finished_job_registry = FinishedJobRegistry(connection=self.connection, serializer=JSONSerializer)
self.connection.zadd(finished_job_registry.key, {'foo': 1})
started_job_registry = StartedJobRegistry(connection=self.connection, serializer=JSONSerializer)
self.connection.zadd(started_job_registry.key, {'foo:execution_id': 1})
failed_job_registry = FailedJobRegistry(connection=self.connection, serializer=JSONSerializer)
self.connection.zadd(failed_job_registry.key, {'foo': 1})
clean_registries(queue)
self.assertEqual(self.connection.zcard(finished_job_registry.key), 0)
self.assertEqual(self.connection.zcard(started_job_registry.key), 0)
self.assertEqual(self.connection.zcard(failed_job_registry.key), 0)
def test_get_queue(self):
"""registry.get_queue() returns the right Queue object."""
registry = BaseRegistry(connection=self.connection)
self.assertEqual(registry.get_queue(), Queue(connection=self.connection))
registry = BaseRegistry('foo', connection=self.connection, serializer=JSONSerializer)
self.assertEqual(registry.get_queue(), Queue('foo', connection=self.connection, serializer=JSONSerializer))
| TestRegistry |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 231984,
"end": 232579
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of(CommitEdge), graphql_name="edges")
nodes = sgqlc.types.Field(sgqlc.types.list_of("Commit"), graphql_name="nodes")
page_info = sgqlc.types.Field(
sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| CommitHistoryConnection |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 68769,
"end": 69063
} | class ____(_PrintableStructure):
_fields_ = [
('year', c_uint32),
('month', c_uint16),
('day', c_uint16),
('hour', c_uint16),
('min', c_uint16),
('sec', c_uint16),
('status', c_uint8),
]
| c_nvmlGridLicenseExpiry_t |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1536328,
"end": 1537139
} | class ____(sgqlc.types.Type, Node):
"""Represents an 'unlabeled' event on a given issue or pull request."""
__schema__ = github_schema
__field_names__ = ("actor", "created_at", "label", "labelable")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
"""Identifies the actor who performed the event."""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
label = sgqlc.types.Field(sgqlc.types.non_null(Label), graphql_name="label")
"""Identifies the label associated with the 'unlabeled' event."""
labelable = sgqlc.types.Field(sgqlc.types.non_null(Labelable), graphql_name="labelable")
"""Identifies the `Labelable` associated with the event."""
| UnlabeledEvent |
python | python-attrs__attrs | tests/test_functional.py | {
"start": 1380,
"end": 1486
} | class ____(metaclass=Meta):
pass
FromMakeClass = attr.make_class("FromMakeClass", ["x"])
| WithMetaSlots |
python | openai__openai-python | src/openai/types/responses/input_item_list_params.py | {
"start": 286,
"end": 964
} | class ____(TypedDict, total=False):
after: str
"""An item ID to list items after, used in pagination."""
include: List[ResponseIncludable]
"""Additional fields to include in the response.
See the `include` parameter for Response creation above for more information.
"""
limit: int
"""A limit on the number of objects to be returned.
Limit can range between 1 and 100, and the default is 20.
"""
order: Literal["asc", "desc"]
"""The order to return the input items in. Default is `desc`.
- `asc`: Return the input items in ascending order.
- `desc`: Return the input items in descending order.
"""
| InputItemListParams |
python | apache__airflow | airflow-core/src/airflow/exceptions.py | {
"start": 7446,
"end": 7568
} | class ____(AirflowException):
"""Raise when multiple values are found for the same connection ID."""
| ConnectionNotUnique |
python | streamlit__streamlit | lib/tests/streamlit/elements/vega_charts_test.py | {
"start": 104266,
"end": 117808
} | class ____(unittest.TestCase):
"""Test vega chart utility methods."""
@parameterized.expand(
[
(
"param_",
'{"config": {"settings": ["param_1", "param_2"], "ignore": ["param_3"]}}',
'{"config": {"settings": ["param_1", "param_2"], "ignore": ["param_3"]}}',
), # Deep structure, but "ignore" should not be reset
(
"param_",
'{"data": {"options": ["param_20"], "params": ["param_20", "param_5"]}}',
'{"data": {"options": ["param_1"], "params": ["param_1", "param_2"]}}',
), # Nested with duplicates across sub-structures
(
"view_",
'{"views": {"list": ["view_10", "view_2"], "additional": "view_1"}}',
'{"views": {"list": ["view_1", "view_2"], "additional": "view_3"}}',
), # Deep structure, with single key being the same as others
(
"view_",
'{"layers": [{"id": "view_5"}, {"id": "view_5"}, {"id": "view_7"}]}',
'{"layers": [{"id": "view_1"}, {"id": "view_1"}, {"id": "view_2"}]}',
), # Objects in an array with duplicate IDs
(
"plot_",
'{"data": {"items": ["plot_3"], "descriptions": ["This plot_4 shows..."]}}',
'{"data": {"items": ["plot_1"], "descriptions": ["This plot_4 shows..."]}}',
), # Only replace actual IDs, not text content
(
"param_",
'{"config": {"settings": ["param_e4f9", "param_a1b2c3"]}}',
'{"config": {"settings": ["param_1", "param_2"]}}',
), # Hash-based suffixes should be replaced as well
(
"view_",
'{"views": {"list": ["view_d1f2", "view_d1f2", "view_0abc"]}}',
'{"views": {"list": ["view_1", "view_1", "view_2"]}}',
), # Hash-based suffixes with duplicates
]
)
def test_reset_counter_pattern(self, prefix: str, vega_spec: str, expected: str):
"""Test that _reset_counter_pattern correctly replaces IDs."""
result = _reset_counter_pattern(prefix, vega_spec)
assert result == expected
@parameterized.expand(
[
(
'{"data": {"name": "e49f4eae50f240b9cf1895776f847b5d"}, "mark": {"type": "point"}, "encoding": {"color": {"condition": {"param": "param_1", "field": "Origin", "type": "nominal"}, "value": "lightgray"}, "tooltip": {"value": null}, "x": {"field": "Horsepower", "type": "quantitative"}, "y": {"field": "Miles_per_Gallon", "type": "quantitative"}}, "params": [{"name": "param_1", "select": {"type": "point"}}]}',
{"param_1"},
),
(
'{"data": {"name": "438d17320890cc476723f9301ba57f91"}, "mark": {"type": "bar"}, "encoding": {"fillOpacity": {"condition": {"param": "my_param", "value": 1}, "value": 0.3}, "tooltip": {"value": null}, "x": {"field": "a", "type": "nominal"}, "y": {"field": "b", "type": "quantitative"}}, "params": [{"name": "my_param", "select": {"type": "point"}}, {"name": "not_valid_param"}]}',
{"my_param"}, # Extracts only one since the other is not a valid param
),
(
'{"data": {"name": "438d17320890cc476723f9301ba57f91"}, "mark": {"type": "bar"}, "encoding": {"fillOpacity": {"condition": {"param": "my_param", "value": 1}, "value": 0.3}, "tooltip": {"value": null}, "x": {"field": "a", "type": "nominal"}, "y": {"field": "b", "type": "quantitative"}}, "params": [{"name": "my_param_1", "select": {"type": "point"}}, {"name": "my_param_2", "select": {"type": "interval"}}]}',
{"my_param_1", "my_param_2"},
),
]
)
def test_extract_selection_parameters(
self, vega_spec: str, expected_params: set[str]
):
"""Test that _extract_selection_parameters correctly extracts parameters."""
result = _extract_selection_parameters(json.loads(vega_spec))
assert result == expected_params
@parameterized.expand(
[
(
'{"params": [{"name": "my_param_1", "select": {"type": "point"}}, {"name": "my_param_2", "select": {"type": "interval"}}]}',
None,
["my_param_1", "my_param_2"],
),
(
'{"params": [{"name": "my_param_1", "select": {"type": "point"}}, {"name": "my_param_2", "select": {"type": "interval"}}]}',
"my_param_1",
["my_param_1"],
),
(
'{"params": [{"name": "my_param_1", "select": {"type": "point"}}, {"name": "my_param_2", "select": {"type": "interval"}}]}',
("my_param_1", "my_param_2"),
["my_param_1", "my_param_2"],
),
]
)
def test_parse_selection_mode(
self,
vega_spec: str,
input_selection_modes: Any,
expected_selection_modes: set[str] | Exception,
):
"""Test that _parse_selection_mode correctly extracts parameters."""
result = _parse_selection_mode(json.loads(vega_spec), input_selection_modes)
assert result == expected_selection_modes
def test_parse_selection_mode_raises_exception(self):
"""Test that _parse_selection_mode correctly extracts parameters."""
vega_spec = json.loads(
'{"params": [{"name": "my_param_1", "select": {"type": "point"}}, {"name": "my_param_2", "select": {"type": "interval"}}]}'
)
with pytest.raises(StreamlitAPIException):
# The provided parameter is not defined in spec:
_parse_selection_mode(vega_spec, "not_exiting_param")
with pytest.raises(StreamlitAPIException):
# One of the parameters is not defined in spec:
_parse_selection_mode(vega_spec, ("my_param_1", "not_exiting_param"))
with pytest.raises(StreamlitAPIException):
# No parameters defined in spec
_parse_selection_mode({}, ())
@parameterized.expand(
[
(
'{"vconcat": [{"hconcat": [{"mark": {"type": "point"}, "encoding": {"color": {"field": "species", "type": "nominal"}, "tooltip": {"value": null}, "x": {"field": "sepalLength", "type": "quantitative"}, "y": {"field": "petalLength", "type": "quantitative"}}, "height": 200, "name": "view_33", "width": 200}, {"mark": {"type": "point"}, "encoding": {"color": {"field": "species", "type": "nominal"}, "tooltip": {"value": null}, "x": {"field": "sepalWidth", "type": "quantitative"}, "y": {"field": "petalLength", "type": "quantitative"}}, "height": 200, "name": "view_34", "width": 200}]}, {"hconcat": [{"mark": {"type": "point"}, "encoding": {"color": {"field": "species", "type": "nominal"}, "tooltip": {"value": null}, "x": {"field": "sepalLength", "type": "quantitative"}, "y": {"field": "petalWidth", "type": "quantitative"}}, "height": 200, "name": "view_35", "width": 200}, {"mark": {"type": "point"}, "encoding": {"color": {"field": "species", "type": "nominal"}, "tooltip": {"value": null}, "x": {"field": "sepalWidth", "type": "quantitative"}, "y": {"field": "petalWidth", "type": "quantitative"}}, "height": 200, "name": "view_36", "width": 200}]}], "data": {"url": "https://cdn.jsdelivr.net/npm/vega-datasets@v1.29.0/data/iris.json"}, "params": [{"name": "param_17", "select": {"type": "point"}, "views": ["view_33", "view_34", "view_35", "view_36"]}, {"name": "param_18", "select": {"type": "interval"}, "views": ["view_33", "view_34", "view_35", "view_36"]}], "$schema": "https://vega.github.io/schema/vega-lite/v5.17.0.json", "autosize": {"type": "fit", "contains": "padding"}}',
'{"vconcat": [{"hconcat": [{"mark": {"type": "point"}, "encoding": {"color": {"field": "species", "type": "nominal"}, "tooltip": {"value": null}, "x": {"field": "sepalLength", "type": "quantitative"}, "y": {"field": "petalLength", "type": "quantitative"}}, "height": 200, "name": "view_1", "width": 200}, {"mark": {"type": "point"}, "encoding": {"color": {"field": "species", "type": "nominal"}, "tooltip": {"value": null}, "x": {"field": "sepalWidth", "type": "quantitative"}, "y": {"field": "petalLength", "type": "quantitative"}}, "height": 200, "name": "view_2", "width": 200}]}, {"hconcat": [{"mark": {"type": "point"}, "encoding": {"color": {"field": "species", "type": "nominal"}, "tooltip": {"value": null}, "x": {"field": "sepalLength", "type": "quantitative"}, "y": {"field": "petalWidth", "type": "quantitative"}}, "height": 200, "name": "view_3", "width": 200}, {"mark": {"type": "point"}, "encoding": {"color": {"field": "species", "type": "nominal"}, "tooltip": {"value": null}, "x": {"field": "sepalWidth", "type": "quantitative"}, "y": {"field": "petalWidth", "type": "quantitative"}}, "height": 200, "name": "view_4", "width": 200}]}], "data": {"url": "https://cdn.jsdelivr.net/npm/vega-datasets@v1.29.0/data/iris.json"}, "params": [{"name": "param_1", "select": {"type": "point"}, "views": ["view_1", "view_2", "view_3", "view_4"]}, {"name": "param_2", "select": {"type": "interval"}, "views": ["view_1", "view_2", "view_3", "view_4"]}], "$schema": "https://vega.github.io/schema/vega-lite/v5.17.0.json", "autosize": {"type": "fit", "contains": "padding"}}',
), # Advanced concatenated Vega-Lite spec with parameters
# Simpler cases:
(
"{ 'mark': 'point', 'encoding': { 'x': { 'field': 'a', 'type': 'quantitative' }, 'y': { 'field': 'b', 'type': 'quantitative' } } }",
"{ 'mark': 'point', 'encoding': { 'x': { 'field': 'a', 'type': 'quantitative' }, 'y': { 'field': 'b', 'type': 'quantitative' } } }",
), # Simple with nothing replaced
(
'{"mark": "bar", "encoding": {"x": {"field": "data", "type": "ordinal"}, "y": {"field": "value", "type": "quantitative"}, "color": {"field": "category", "type": "nominal"}}, "name": "view_112"}',
'{"mark": "bar", "encoding": {"x": {"field": "data", "type": "ordinal"}, "y": {"field": "value", "type": "quantitative"}, "color": {"field": "category", "type": "nominal"}}, "name": "view_112"}',
), # A simple bar chart will not have `view_` replaced, only composite charts
(
'{"description": "This is a view_123 visualization of param_45 data points.", "mark": "point"}',
'{"description": "This is a view_123 visualization of param_45 data points.", "mark": "point"}',
), # Ensure text containing prefix within descriptions or other properties is not changed
(
'{"elements": [{"type": "parameter", "name": "param_5"}]}',
'{"elements": [{"type": "parameter", "name": "param_5"}]}',
), # Do not replace params when there's no "params" key but similar naming exists
(
'{"layer": [{"mark": "line", "encoding": {"x": {"field": "year", "type": "temporal"}, "y": {"field": "growth", "type": "quantitative"}}, "name": "view_203"}]}',
'{"layer": [{"mark": "line", "encoding": {"x": {"field": "year", "type": "temporal"}, "y": {"field": "growth", "type": "quantitative"}}, "name": "view_1"}]}',
), # A layer spec with a single view needing reset
(
'{"repeat": {"layer": ["year_1", "year_2"]}, "spec": {"mark": "area", "encoding": {"y": {"field": {"repeat": "layer"}, "type": "quantitative"}}, "name": "view_15"}}',
'{"repeat": {"layer": ["year_1", "year_2"]}, "spec": {"mark": "area", "encoding": {"y": {"field": {"repeat": "layer"}, "type": "quantitative"}}, "name": "view_1"}}',
), # Nested structure using repeat and requiring name reset
(
'{"concat": [{"view": {"mark": "point", "name": "view_250"}}, {"view": {"mark": "point", "name": "view_251"}}]}',
'{"concat": [{"view": {"mark": "point", "name": "view_1"}}, {"view": {"mark": "point", "name": "view_2"}}]}',
), # Concatenated chart requiring name reset
(
'{"hconcat": [{"view": {"mark": "point", "name": "view_250"}}, {"view": {"mark": "point", "name": "view_251"}}]}',
'{"hconcat": [{"view": {"mark": "point", "name": "view_1"}}, {"view": {"mark": "point", "name": "view_2"}}]}',
), # hconcat chart requiring name reset
(
'{"vconcat": [{"view": {"mark": "point", "name": "view_250"}}, {"view": {"mark": "point", "name": "view_251"}}]}',
'{"vconcat": [{"view": {"mark": "point", "name": "view_1"}}, {"view": {"mark": "point", "name": "view_2"}}]}',
), # vconcat chart requiring name reset
(
'{"facet": {"field": "category", "type": "ordinal"}, "spec": {"mark": "tick", "encoding": {"x": {"field": "value", "type": "quantitative"}}, "name": "view_54"}}',
'{"facet": {"field": "category", "type": "ordinal"}, "spec": {"mark": "tick", "encoding": {"x": {"field": "value", "type": "quantitative"}}, "name": "view_1"}}',
), # Faceted chart requiring name reset
]
)
def test_stabilize_vega_json_spec(self, input_spec: str, expected: str):
"""Test that _stabilize_vega_json_spec correctly fixes the auto-generated names."""
result = _stabilize_vega_json_spec(input_spec)
assert result == expected
| VegaUtilitiesTest |
python | jackfrued__Python-100-Days | Day31-35/code/test_example02.py | {
"start": 74,
"end": 652
} | class ____(TestCase):
"""测试排序函数的测试用例"""
def setUp(self):
self.data1 = [35, 97, 12, 68, 55, 73, 81, 40]
self.items1 = [12, 35, 68, 97]
self.items2 = [40, 55, 73, 81]
def test_merge(self):
items = merge(self.items1, self.items2)
for i in range(len(items) - 1):
self.assertLessEqual(items[i], items[i + 1])
def test_select_sort(self):
"""测试顺序查找"""
items = select_sort(self.data1)
for i in range(len(items) - 1):
self.assertLessEqual(items[i], items[i + 1])
| TestExample02 |
python | sympy__sympy | sympy/physics/mechanics/tests/test_wrapping_geometry.py | {
"start": 15151,
"end": 19285
} | class ____:
"""
A test class to verify the Lagrangian mechanics model of a particle
tethered by an elastic cable that is constrained to move on the surface
of a fixed cone.
The physical system consists of a particle of mass `m` which can slide
freely on a cone with a half-angle `alpha`. This particle is attached to a
fixed anchor point `A` (also on the cone) by a massless, elastic cable
with a spring constant `k` and a natural rest length `L0`.
A key feature of this model is that the cable always follows the shortest
path, the geodesic, between the anchor point and the particle. The system has
two degrees of freedom, described by the generalized coordinates `r(t)`
(the slant distance from the cone's apex) and `phi(t)` (the azimuthal angle).
"""
@classmethod
def setup_class(cls):
# Setup basic symbols
cls.t = symbols('t')
cls.m, cls.g, cls.alpha, cls.k, cls.L0 = symbols('m g alpha k L0', positive=True)
cls.rA, cls.phiA = symbols('rA phiA', real=True)
cls.r, cls.phi = dynamicsymbols('r phi')
cls.dr, cls.dphi = dynamicsymbols('r phi', 1)
cls.N = ReferenceFrame('N')
cls.O = Point('O')
cls.O.set_vel(cls.N, 0)
cls.cone = WrappingCone(cls.alpha, cls.O, cls.N.z)
# Generalized point on the cone
P = Point('P')
x = cls.r * sin(cls.alpha) * cos(cls.phi)
y = cls.r * sin(cls.alpha) * sin(cls.phi)
z = cls.r * cos(cls.alpha)
P.set_pos(cls.O, x * cls.N.x + y * cls.N.y + z * cls.N.z)
P.set_vel(cls.N, P.pos_from(cls.O).diff(cls.t, cls.N))
P_part = Particle('P_part', P, cls.m)
A = Point('A')
xA = cls.rA * sin(cls.alpha) * cos(cls.phiA)
yA = cls.rA * sin(cls.alpha) * sin(cls.phiA)
zA = cls.rA * cos(cls.alpha)
A.set_pos(cls.O, xA * cls.N.x + yA * cls.N.y + zA * cls.N.z)
cls.T = P_part.kinetic_energy(cls.N)
cls.L_geo = cls.cone.geodesic_length(A, P)
# Gravitational potential energy
h = P.pos_from(cls.O).dot(cls.N.z)
Vg = cls.m * cls.g * h
# Elastic potential energy
Ve = Rational(1, 2) * cls.k * (cls.L_geo - cls.L0)**2
# Total potential energy
cls.V = Vg + Ve
P_part.potential_energy = cls.V
# Get EOMs
cls.L = Lagrangian(cls.N, P_part)
LM = LagrangesMethod(cls.L, [cls.r, cls.phi])
cls.eom = LM.form_lagranges_equations()
for i in range(len(cls.eom)):
cls.eom[i] = simplify(cls.eom[i])
def test_kinetic_energy(self):
expected_T = Rational(1, 2) * self.m * (self.dr**2 + self.r**2 * sin(self.alpha)**2 * self.dphi**2)
assert simplify(self.T - expected_T) == 0
def test_geodesic_length(self):
expected_L_geo = sqrt(self.rA**2 - 2*self.rA*self.r*cos(Piecewise((-self.phiA + self.phi + 2*pi, -self.phiA + self.phi + 2*pi <= pi), (self.phiA - self.phi, True))*sin(self.alpha)) + self.r**2)
assert simplify(self.L_geo - expected_L_geo) == 0
def test_potential_energy(self):
expected_V = (self.m * self.g * self.r * cos(self.alpha) + Rational(1, 2) * self.k * (self.L_geo - self.L0)**2)
assert simplify(self.V - expected_V) == 0
def test_lagrangian(self):
assert simplify(self.L - (self.T - self.V)) == 0
def test_equation_of_motion_r(self):
# Derived by hand: EOM for r: m*r_ddot - m*r*sin(alpha)**2*phi_dot**2 + dV/dr = 0
dV_dr = diff(self.V, self.r)
expected_eom_r = (self.m * diff(self.r, self.t, 2) -
self.m * self.r * sin(self.alpha)**2 * self.dphi**2 +
dV_dr)
assert simplify(self.eom[0] - expected_eom_r) == 0
def test_equation_of_motion_phi(self):
# Derived by hand: EOM for phi: d/dt(m*r**2*sin(alpha)**2*phi_dot) + dV/dphi = 0
dV_dphi = diff(self.V, self.phi)
expected_eom_phi = (diff(self.m * self.r**2 * sin(self.alpha)**2 * self.dphi, self.t) + dV_dphi)
assert simplify(self.eom[1] - expected_eom_phi) == 0
| TestElasticConeModel |
python | google__pytype | pytype/tests/test_test_code.py | {
"start": 79,
"end": 3349
} | class ____(test_base.BaseTest):
"""Tests for test assertions."""
def test_assert_not_none(self):
self.Check("""
import unittest
from typing import Optional
def foo():
return '10' if __random__ else None
class FooTest(unittest.TestCase):
def test_foo(self):
x = foo()
assert_type(x, Optional[str])
self.assertIsNotNone(x)
assert_type(x, str)
""")
def test_assert_not_none_with_message(self):
self.Check("""
import unittest
from typing import Optional
def foo():
return '10' if __random__ else None
class FooTest(unittest.TestCase):
def test_foo(self):
x = foo()
assert_type(x, Optional[str])
self.assertIsNotNone(x, "assertion message")
assert_type(x, str)
""")
def test_assert_isinstance(self):
self.Check("""
import unittest
from typing import Union
def foo():
return '10' if __random__ else 10
class FooTest(unittest.TestCase):
def test_foo(self):
x = foo()
assert_type(x, Union[int, str])
self.assertIsInstance(x, str)
assert_type(x, str)
""")
def test_assert_isinstance_with_message(self):
self.Check("""
import unittest
from typing import Union
def foo():
return '10' if __random__ else 10
class FooTest(unittest.TestCase):
def test_foo(self):
x = foo()
assert_type(x, Union[int, str])
self.assertIsInstance(x, str, "assertion message")
assert_type(x, str)
""")
def test_narrowed_type_from_assert_isinstance(self):
# assertIsInstance should narrow the original var's bindings if possible.
self.Check("""
import unittest
from typing import Union
class A:
pass
class B(A):
pass
class FooTest(unittest.TestCase):
def test_foo(self, x: Union[A, B, int]):
self.assertIsInstance(x, A)
assert_type(x, Union[A, B])
""")
def test_new_type_from_assert_isinstance(self):
# assertIsInstance should create a var with a new type if it is not in
# the original var's bindings.
self.Check("""
import unittest
class A:
pass
class B(A):
pass
def foo() -> A:
return B()
class FooTest(unittest.TestCase):
def test_foo(self):
x = foo()
assert_type(x, A)
self.assertIsInstance(x, B)
assert_type(x, B)
""")
def test_assert_isinstance_tuple(self):
self.Check("""
import unittest
from typing import Union
class FooTest(unittest.TestCase):
def test_foo(self):
x = None
self.assertIsInstance(x, (int, str))
assert_type(x, Union[int, str])
self.assertIsInstance(x, (int,))
assert_type(x, int)
""")
def test_instance_attribute(self):
self.Check("""
import unittest
class Foo:
def __init__(self, x):
self.x = x
class FooTest(unittest.TestCase):
def test_foo(self):
foo = __any_object__
self.assertIsInstance(foo, Foo)
print(foo.x)
""")
| AssertionTest |
python | cython__cython | docs/examples/userguide/early_binding_for_speed/rectangle_cdef.py | {
"start": 15,
"end": 622
} | class ____:
x0: cython.int
y0: cython.int
x1: cython.int
y1: cython.int
def __init__(self, x0: cython.int, y0: cython.int, x1: cython.int, y1: cython.int):
self.x0 = x0
self.y0 = y0
self.x1 = x1
self.y1 = y1
@cython.cfunc
def _area(self) -> cython.int:
area: cython.int = (self.x1 - self.x0) * (self.y1 - self.y0)
if area < 0:
area = -area
return area
def area(self):
return self._area()
def rectArea(x0, y0, x1, y1):
rect: Rectangle = Rectangle(x0, y0, x1, y1)
return rect._area()
| Rectangle |
python | dask__distributed | distributed/dashboard/components/scheduler.py | {
"start": 78749,
"end": 94042
} | class ____(DashboardComponent):
"""
Task Group Graph
Creates a graph layout for TaskGroups on the scheduler. It assigns
(x, y) locations to all the TaskGroups and lays them out by according
to their dependencies. The layout gets updated every time that new
TaskGroups are added.
Each task group node incodes information about task progress, memory,
and output type into glyphs, as well as a hover tooltip with more detailed
information on name, computation time, memory, and tasks status.
"""
def __init__(self, scheduler, **kwargs):
self.scheduler = scheduler
self.nodes_layout = {}
self.arrows_layout = {}
self.old_counter = -1
self.nodes_source = ColumnDataSource(
{
"x": [],
"y": [],
"w_box": [],
"h_box": [],
"name": [],
"tot_tasks": [],
"color": [],
"x_start": [],
"x_end": [],
"y_start": [],
"y_end": [],
"x_end_progress": [],
"mem_alpha": [],
"node_line_width": [],
"comp_tasks": [],
"url_logo": [],
"x_logo": [],
"y_logo": [],
"w_logo": [],
"h_logo": [],
"in_processing": [],
"in_memory": [],
"in_released": [],
"in_erred": [],
"compute_time": [],
"memory": [],
}
)
self.arrows_source = ColumnDataSource({"xs": [], "ys": [], "xe": [], "ye": []})
self.root = figure(title="Task Groups Graph", match_aspect=True, **kwargs)
self.root.axis.visible = False
self.subtitle = Title(text=" ", text_font_style="italic")
self.root.add_layout(self.subtitle, "above")
rect = self.root.rect(
x="x",
y="y",
width="w_box",
height="h_box",
color="color",
fill_alpha="mem_alpha",
line_color="black",
line_width="node_line_width",
source=self.nodes_source,
)
# plot tg log
self.root.image_url(
url="url_logo",
x="x_logo",
y="y_logo",
w="w_logo",
h="h_logo",
anchor="center",
source=self.nodes_source,
)
# progress bar plain box
self.root.quad(
left="x_start",
right="x_end",
bottom="y_start",
top="y_end",
color=None,
line_color="black",
source=self.nodes_source,
)
# progress bar
self.root.quad(
left="x_start",
right="x_end_progress",
bottom="y_start",
top="y_end",
color="color",
line_color=None,
fill_alpha=0.6,
source=self.nodes_source,
)
self.arrows = Arrow(
end=VeeHead(size=8),
line_color="black",
line_alpha=0.5,
line_width=1,
x_start="xs",
y_start="ys",
x_end="xe",
y_end="ye",
source=self.arrows_source,
)
self.root.add_layout(self.arrows)
self.root.xgrid.grid_line_color = None
self.root.ygrid.grid_line_color = None
self.root.x_range.range_padding = 0.5
self.root.y_range.range_padding = 0.5
hover = HoverTool(
point_policy="follow_mouse",
tooltips="""
<div>
<span style="font-size: 12px; font-weight: bold;">Name:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@name</span>
</div>
<div>
<span style="font-size: 12px; font-weight: bold;">Compute time:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@compute_time</span>
</div>
<div>
<span style="font-size: 12px; font-weight: bold;">Memory:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@memory</span>
</div>
<div>
<span style="font-size: 12px; font-weight: bold;">Tasks:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@tot_tasks</span>
</div>
<div style="margin-left: 2em;">
<span style="font-size: 12px; font-weight: bold;">Completed:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@comp_tasks</span>
</div>
<div style="margin-left: 2em;">
<span style="font-size: 12px; font-weight: bold;">Processing:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@in_processing</span>
</div>
<div style="margin-left: 2em;">
<span style="font-size: 12px; font-weight: bold;">In memory:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@in_memory</span>
</div>
<div style="margin-left: 2em;">
<span style="font-size: 12px; font-weight: bold;">Erred:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@in_erred</span>
</div>
<div style="margin-left: 2em;">
<span style="font-size: 12px; font-weight: bold;">Released:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@in_released</span>
</div>
""",
renderers=[rect],
)
self.root.add_tools(hover)
@without_property_validation
@log_errors
def update_layout(self):
# Get dependencies per task group.
# In some cases there are tg that have themselves as dependencies - we remove those.
dependencies = {
k: {ds.name for ds in ts.dependencies if ds.name != k}
for k, ts in self.scheduler.task_groups.items()
}
import dask
order = dask.order.order(
dsk={group.name: 1 for k, group in self.scheduler.task_groups.items()},
dependencies=dependencies,
)
ordered = sorted(self.scheduler.task_groups, key=order.get)
xs = {}
ys = {}
locations = set()
nodes_layout = {}
arrows_layout = {}
for tg in ordered:
if dependencies[tg]:
x = max(xs[dep] for dep in dependencies[tg]) + 1
y = max(ys[dep] for dep in dependencies[tg])
if (
len(dependencies[tg]) > 1
and len({ys[dep] for dep in dependencies[tg]}) == 1
):
y += 1
else:
x = 0
y = max(ys.values()) + 1 if ys else 0
while (x, y) in locations: # avoid collisions by moving up
y += 1
locations.add((x, y))
xs[tg], ys[tg] = x, y
# info needed for node layout to column data source
nodes_layout[tg] = {"x": xs[tg], "y": ys[tg]}
# info needed for arrow layout
arrows_layout[tg] = {
"nstart": dependencies[tg],
"nend": [tg] * len(dependencies[tg]),
}
return nodes_layout, arrows_layout
def compute_size(self, x, min_box, max_box):
start = 0.4
end = 0.8
y = (end - start) / (max_box - min_box) * (x - min_box) + start
return y
@without_property_validation
def update(self):
if self.scheduler.transition_counter == self.old_counter:
return
self.old_counter = self.scheduler.transition_counter
if not self.scheduler.task_groups:
self.subtitle.text = "Scheduler is empty."
else:
self.subtitle.text = " "
if self.nodes_layout.keys() != self.scheduler.task_groups.keys():
self.nodes_layout, self.arrows_layout = self.update_layout()
nodes_data = {
"x": [],
"y": [],
"w_box": [],
"h_box": [],
"name": [],
"color": [],
"tot_tasks": [],
"x_start": [],
"x_end": [],
"y_start": [],
"y_end": [],
"x_end_progress": [],
"mem_alpha": [],
"node_line_width": [],
"comp_tasks": [],
"url_logo": [],
"x_logo": [],
"y_logo": [],
"w_logo": [],
"h_logo": [],
"in_processing": [],
"in_memory": [],
"in_released": [],
"in_erred": [],
"compute_time": [],
"memory": [],
}
arrows_data = {
"xs": [],
"ys": [],
"xe": [],
"ye": [],
}
durations = set()
nbytes = set()
for tg in self.scheduler.task_groups.values():
if tg.duration and tg.nbytes_total:
durations.add(tg.duration)
nbytes.add(tg.nbytes_total)
durations_min = min(durations, default=0)
durations_max = max(durations, default=0)
nbytes_min = min(nbytes, default=0)
nbytes_max = max(nbytes, default=0)
box_dim = {}
for key, tg in self.scheduler.task_groups.items():
comp_tasks = (
tg.states["released"] + tg.states["memory"] + tg.states["erred"]
)
tot_tasks = sum(tg.states.values())
# compute width and height of boxes
if (
tg.duration
and tg.nbytes_total
and comp_tasks
and len(durations) > 1
and len(nbytes) > 1
):
# scale duration (width)
width_box = self.compute_size(
tg.duration / comp_tasks * tot_tasks,
min_box=durations_min / comp_tasks * tot_tasks,
max_box=durations_max / comp_tasks * tot_tasks,
)
# need to scale memory (height)
height_box = self.compute_size(
tg.nbytes_total / comp_tasks * tot_tasks,
min_box=nbytes_min / comp_tasks * tot_tasks,
max_box=nbytes_max / comp_tasks * tot_tasks,
)
else:
width_box = 0.6
height_box = width_box / 2
box_dim[key] = {"width": width_box, "height": height_box}
for key, tg in self.scheduler.task_groups.items():
x = self.nodes_layout[key]["x"]
y = self.nodes_layout[key]["y"]
width = box_dim[key]["width"]
height = box_dim[key]["height"]
# main boxes layout
nodes_data["x"].append(x)
nodes_data["y"].append(y)
nodes_data["w_box"].append(width)
nodes_data["h_box"].append(height)
comp_tasks = (
tg.states["released"] + tg.states["memory"] + tg.states["erred"]
)
tot_tasks = sum(tg.states.values())
nodes_data["name"].append(tg.prefix.name)
nodes_data["color"].append(color_of(tg.prefix.name))
nodes_data["tot_tasks"].append(tot_tasks)
# memory alpha factor by 0.4 if not gets too dark
nodes_data["mem_alpha"].append(
(tg.states["memory"] / sum(tg.states.values())) * 0.4
)
# main box line width
if tg.states["processing"]:
nodes_data["node_line_width"].append(5)
else:
nodes_data["node_line_width"].append(1)
# progress bar data update
nodes_data["x_start"].append(x - width / 2)
nodes_data["x_end"].append(x + width / 2)
nodes_data["y_start"].append(y - height / 2)
nodes_data["y_end"].append(y - height / 2 + height * 0.4)
nodes_data["x_end_progress"].append(
x - width / 2 + width * comp_tasks / tot_tasks
)
# arrows
arrows_data["xs"] += [
self.nodes_layout[k]["x"] + box_dim[k]["width"] / 2
for k in self.arrows_layout[key]["nstart"]
]
arrows_data["ys"] += [
self.nodes_layout[k]["y"] for k in self.arrows_layout[key]["nstart"]
]
arrows_data["xe"] += [
self.nodes_layout[k]["x"] - box_dim[k]["width"] / 2
for k in self.arrows_layout[key]["nend"]
]
arrows_data["ye"] += [
self.nodes_layout[k]["y"] for k in self.arrows_layout[key]["nend"]
]
# LOGOS
if len(tg.types) == 1:
logo_type = next(iter(tg.types)).split(".")[0]
try:
url_logo = logos_dict[logo_type]
except KeyError:
url_logo = ""
else:
url_logo = ""
nodes_data["url_logo"].append(url_logo)
nodes_data["x_logo"].append(x + width / 3)
nodes_data["y_logo"].append(y + height / 3)
ratio = width / height
if ratio > 1:
nodes_data["h_logo"].append(height * 0.3)
nodes_data["w_logo"].append(width * 0.3 / ratio)
else:
nodes_data["h_logo"].append(height * 0.3 * ratio)
nodes_data["w_logo"].append(width * 0.3)
# compute_time and memory
nodes_data["compute_time"].append(format_time(tg.duration))
nodes_data["memory"].append(format_bytes(tg.nbytes_total))
# Add some status to hover
tasks_processing = tg.states["processing"]
tasks_memory = tg.states["memory"]
tasks_relased = tg.states["released"]
tasks_erred = tg.states["erred"]
nodes_data["comp_tasks"].append(
f"{comp_tasks} ({comp_tasks / tot_tasks * 100:.0f} %)"
)
nodes_data["in_processing"].append(
f"{tasks_processing} ({tasks_processing / tot_tasks * 100:.0f} %)"
)
nodes_data["in_memory"].append(
f"{tasks_memory} ({tasks_memory / tot_tasks * 100:.0f} %)"
)
nodes_data["in_released"].append(
f"{tasks_relased} ({tasks_relased / tot_tasks * 100:.0f} %)"
)
nodes_data["in_erred"].append(
f"{tasks_erred} ({tasks_erred / tot_tasks * 100:.0f} %)"
)
self.nodes_source.data.update(nodes_data)
self.arrows_source.data.update(arrows_data)
| TaskGroupGraph |
python | django-crispy-forms__django-crispy-forms | crispy_forms/exceptions.py | {
"start": 288,
"end": 330
} | class ____(CrispyError):
pass
| DynamicError |
python | jackfrued__Python-100-Days | Day31-35/code/example14.py | {
"start": 124,
"end": 275
} | class ____(Enum):
"""花色(枚举)"""
SPADE, HEART, CLUB, DIAMOND = range(4)
def __lt__(self, other):
return self.value < other.value
| Suite |
python | google__jax | tests/hijax_test.py | {
"start": 4951,
"end": 5397
} | class ____(HiPrimitive):
def abstract_eval(_, hi_aval):
return ShapedArray(hi_aval.shape, jnp.dtype('float32')), set()
def to_lojax(_, hi_val):
return hi_val.arr.astype('float32') * hi_val.scale[:, None]
def jvp(_, primals, tangents):
(x,), (xdot,) = primals, tangents
return from_qarray(x), from_qarray(xdot)
def transpose(_, out_bar, __):
return [to_qarray(out_bar)]
from_qarray_p = FromQ('from_q')
@dataclass
| FromQ |
python | pytorch__pytorch | torch/export/unflatten.py | {
"start": 1236,
"end": 1383
} | class ____(Enum):
PARAMETER = "parameter"
BUFFER = "buffer"
CONSTANT = "constant"
MODULE = "module"
@dataclass(frozen=True)
| _AttrKind |
python | aimacode__aima-python | deep_learning4e.py | {
"start": 403,
"end": 703
} | class ____:
"""
A single unit of a layer in a neural network
:param weights: weights between parent nodes and current node
:param value: value of current node
"""
def __init__(self, weights=None, value=None):
self.value = value
self.weights = weights or []
| Node |
python | walkccc__LeetCode | solutions/11. Container With Most Water/11.py | {
"start": 0,
"end": 303
} | class ____:
def maxArea(self, height: list[int]) -> int:
ans = 0
l = 0
r = len(height) - 1
while l < r:
minHeight = min(height[l], height[r])
ans = max(ans, minHeight * (r - l))
if height[l] < height[r]:
l += 1
else:
r -= 1
return ans
| Solution |
python | jina-ai__jina | jina/excepts.py | {
"start": 2004,
"end": 2130
} | class ____(Exception, BaseJinaException):
"""Exception when user accidentally using a retired argument."""
| NotSupportedError |
python | huggingface__transformers | src/transformers/models/mobilebert/modeling_mobilebert.py | {
"start": 38079,
"end": 41495
} | class ____(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.mobilebert = MobileBertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
return_dict=True,
**kwargs,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
# Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering with Bert->MobileBert all-casing
| MobileBertForSequenceClassification |
python | ray-project__ray | ci/ray_ci/test_linux_tester_container.py | {
"start": 442,
"end": 10187
} | class ____:
"""
Mock subprocess.Popen. This process returns 1 if test targets is empty or contains
bad_test; otherwise return 0.
"""
def __init__(self, test_targets: List[str]):
self.test_targets = test_targets
def wait(self) -> int:
return 1 if "bad_test" in self.test_targets or not self.test_targets else 0
@mock.patch("ci.ray_ci.tester_container.TesterContainer._upload_build_info")
@mock.patch("ci.ray_ci.tester_container.TesterContainer.upload_test_results")
@mock.patch("ci.ray_ci.tester_container.TesterContainer.move_test_state")
def test_persist_test_results(
mock_upload_build_info, mock_upload_test_result, mock_move_test_state
) -> None:
container = LinuxTesterContainer("team", skip_ray_installation=True)
with mock.patch.dict(
os.environ,
{
"BUILDKITE_BRANCH": "master",
"BUILDKITE_PIPELINE_ID": "w00t",
},
):
container._persist_test_results("team", "log_dir")
assert not mock_upload_build_info.called
assert not mock_move_test_state.called
with mock.patch.dict(
os.environ,
{
"BUILDKITE_BRANCH": "non-master",
"BUILDKITE_PIPELINE_ID": get_global_config()["ci_pipeline_postmerge"][0],
},
):
container._persist_test_results("team", "log_dir")
assert not mock_upload_build_info.called
assert not mock_move_test_state.called
with mock.patch.dict(
os.environ,
{
"BUILDKITE_BRANCH": "non-master",
"BUILDKITE_PIPELINE_ID": get_global_config()["ci_pipeline_premerge"][0],
},
):
container._persist_test_results("team", "log_dir")
assert mock_upload_build_info.called
assert mock_move_test_state.called
with mock.patch.dict(
os.environ,
{
"BUILDKITE_BRANCH": "master",
"BUILDKITE_PIPELINE_ID": get_global_config()["ci_pipeline_postmerge"][0],
},
):
container._persist_test_results("team", "log_dir")
assert mock_upload_build_info.called
assert mock_move_test_state.called
def test_run_tests_in_docker() -> None:
inputs = []
def _mock_popen(input: List[str]) -> None:
inputs.append(" ".join(input))
with mock.patch("subprocess.Popen", side_effect=_mock_popen), mock.patch(
"ci.ray_ci.linux_tester_container.LinuxTesterContainer.install_ray",
return_value=None,
):
LinuxTesterContainer(
"team",
network="host",
build_type="debug",
test_envs=["ENV_01", "ENV_02"],
)._run_tests_in_docker(["t1", "t2"], [0, 1], "/tmp", ["v=k"], "flag")
input_str = inputs[-1]
assert "--env ENV_01 --env ENV_02 --env BUILDKITE" in input_str
assert "--network host" in input_str
assert '--gpus "device=0,1"' in input_str
assert "--volume /tmp:/tmp/bazel_event_logs" in input_str
assert (
"bazel test --jobs=1 --config=ci $(./ci/run/bazel_export_options) "
"--config=ci-debug --test_env v=k --test_arg flag t1 t2" in input_str
)
if RUN_PER_FLAKY_TEST > 1:
assert f"--runs_per_test {RUN_PER_FLAKY_TEST} " not in input_str
LinuxTesterContainer("team")._run_tests_in_docker(
["t1", "t2"], [], "/tmp", ["v=k"], run_flaky_tests=True
)
input_str = inputs[-1]
assert "--env BUILDKITE_BUILD_URL" in input_str
assert "--gpus" not in input_str
if RUN_PER_FLAKY_TEST > 1:
assert f"--runs_per_test {RUN_PER_FLAKY_TEST} " in input_str
LinuxTesterContainer("team")._run_tests_in_docker(
["t1", "t2"], [], "/tmp", ["v=k"], cache_test_results=True
)
input_str = inputs[-1]
assert "--cache_test_results=auto" in input_str.split()
def test_run_script_in_docker() -> None:
def _mock_check_output(input: List[str]) -> bytes:
input_str = " ".join(input)
assert "/bin/bash -iecuo pipefail -- run command" in input_str
return b""
with mock.patch(
"subprocess.check_output", side_effect=_mock_check_output
), mock.patch(
"ci.ray_ci.linux_tester_container.LinuxTesterContainer.install_ray",
return_value=None,
):
container = LinuxTesterContainer("team")
container.run_script_with_output(["run command"])
def test_skip_ray_installation() -> None:
install_ray_called = []
def _mock_install_ray(build_type: Optional[str], mask: Optional[str]) -> None:
install_ray_called.append(True)
with mock.patch(
"ci.ray_ci.linux_tester_container.LinuxTesterContainer.install_ray",
side_effect=_mock_install_ray,
):
assert len(install_ray_called) == 0
LinuxTesterContainer("team", skip_ray_installation=False)
assert len(install_ray_called) == 1
LinuxTesterContainer("team", skip_ray_installation=True)
assert len(install_ray_called) == 1
def test_ray_installation() -> None:
install_ray_cmds = []
def _mock_subprocess(inputs: List[str], env, stdout, stderr) -> None:
install_ray_cmds.append(inputs)
with mock.patch("subprocess.check_call", side_effect=_mock_subprocess):
LinuxTesterContainer("team", build_type="debug")
docker_image = f"{_DOCKER_ECR_REPO}:team"
assert install_ray_cmds[-1] == [
"docker",
"build",
"--pull",
"--progress=plain",
"-t",
docker_image,
"--build-arg",
f"BASE_IMAGE={docker_image}",
"--build-arg",
"BUILD_TYPE=debug",
"--build-arg",
"BUILDKITE_CACHE_READONLY=",
"-f",
"ci/ray_ci/tests.env.Dockerfile",
"/ray",
]
def test_run_tests() -> None:
def _mock_run_tests_in_docker(
test_targets: List[str],
gpu_ids: List[int],
bazel_log_dir: str,
test_envs: List[str],
test_arg: Optional[str] = None,
run_flaky_tests: Optional[bool] = False,
cache_test_results: Optional[bool] = False,
) -> MockPopen:
return MockPopen(test_targets)
def _mock_shard_tests(tests: List[str], workers: int, worker_id: int) -> List[str]:
return chunk_into_n(tests, workers)[worker_id]
with tempfile.TemporaryDirectory() as tmpdir, mock.patch(
"ci.ray_ci.linux_tester_container.LinuxTesterContainer.get_artifact_mount",
return_value=("/tmp/artifacts", tmpdir),
), mock.patch(
"ci.ray_ci.linux_tester_container.LinuxTesterContainer._persist_test_results",
return_value=None,
), mock.patch(
"ci.ray_ci.linux_tester_container.LinuxTesterContainer._run_tests_in_docker",
side_effect=_mock_run_tests_in_docker,
), mock.patch(
"ci.ray_ci.tester_container.shard_tests", side_effect=_mock_shard_tests
), mock.patch(
"ci.ray_ci.linux_tester_container.LinuxTesterContainer.install_ray",
return_value=None,
):
container = LinuxTesterContainer("team", shard_count=2, shard_ids=[0, 1])
# test_targets are not empty
assert container.run_tests("manu", ["t1", "t2"], [])
# test_targets is empty after chunking, but not creating popen
assert container.run_tests("manu", ["t1"], [])
assert container.run_tests("manu", [], [])
# test targets contain bad_test
assert not container.run_tests("manu", ["bad_test"], [])
def test_create_bazel_log_mount() -> None:
with tempfile.TemporaryDirectory() as tmpdir, mock.patch(
"ci.ray_ci.linux_tester_container.LinuxTesterContainer.get_artifact_mount",
return_value=("/tmp/artifacts", tmpdir),
):
container = LinuxTesterContainer("team", skip_ray_installation=True)
assert container._create_bazel_log_mount("w00t") == (
"/tmp/artifacts/w00t",
os.path.join(tmpdir, "w00t"),
)
def test_get_test_results() -> None:
_BAZEL_LOGS = [
json.dumps(log)
for log in [
{
"id": {"testResult": {"label": "//ray/ci:test", "run": "1"}},
"testResult": {"status": "FAILED"},
},
{
"id": {"testResult": {"label": "//ray/ci:reef", "run": "1"}},
"testResult": {"status": "FAILED"},
},
{
"id": {"testResult": {"label": "//ray/ci:test", "run": "2"}},
"testResult": {"status": "FAILED"},
},
{
"id": {"testResult": {"label": "//ray/ci:test", "run": "1"}},
"testResult": {"status": "PASSED"},
},
]
]
with tempfile.TemporaryDirectory() as tmp:
with open(os.path.join(tmp, "bazel_log"), "w") as f:
f.write("\n".join(_BAZEL_LOGS))
results = LinuxTesterContainer.get_test_and_results("manu", tmp)
results.sort(key=lambda x: x[0].get_name())
test, result = results[0]
assert test.get_name() == f"{platform.system().lower()}://ray/ci:reef"
assert test.get_oncall() == "manu"
assert result.is_failing()
test, result = results[1]
assert test.get_name() == f"{platform.system().lower()}://ray/ci:test"
assert test.get_oncall() == "manu"
assert result.is_passing()
test, result = results[2]
assert test.get_name() == f"{platform.system().lower()}://ray/ci:test"
assert test.get_oncall() == "manu"
assert result.is_failing()
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| MockPopen |
python | crytic__slither | slither/slithir/operations/binary.py | {
"start": 538,
"end": 3141
} | class ____(Enum):
POWER = "**"
MULTIPLICATION = "*"
DIVISION = "/"
MODULO = "%"
ADDITION = "+"
SUBTRACTION = "-"
LEFT_SHIFT = "<<"
RIGHT_SHIFT = ">>"
AND = "&"
CARET = "^"
OR = "|"
LESS = "<"
GREATER = ">"
LESS_EQUAL = "<="
GREATER_EQUAL = ">="
EQUAL = "=="
NOT_EQUAL = "!="
ANDAND = "&&"
OROR = "||"
@staticmethod
def return_bool(operation_type: "BinaryType") -> bool:
return operation_type in [
BinaryType.OROR,
BinaryType.ANDAND,
BinaryType.LESS,
BinaryType.GREATER,
BinaryType.LESS_EQUAL,
BinaryType.GREATER_EQUAL,
BinaryType.EQUAL,
BinaryType.NOT_EQUAL,
]
@staticmethod
def get_type(operation_type: str) -> "BinaryType": # pylint: disable=too-many-branches
if operation_type == "**":
return BinaryType.POWER
if operation_type == "*":
return BinaryType.MULTIPLICATION
if operation_type == "/":
return BinaryType.DIVISION
if operation_type == "%":
return BinaryType.MODULO
if operation_type == "+":
return BinaryType.ADDITION
if operation_type == "-":
return BinaryType.SUBTRACTION
if operation_type == "<<":
return BinaryType.LEFT_SHIFT
if operation_type == ">>":
return BinaryType.RIGHT_SHIFT
if operation_type == "&":
return BinaryType.AND
if operation_type == "^":
return BinaryType.CARET
if operation_type == "|":
return BinaryType.OR
if operation_type == "<":
return BinaryType.LESS
if operation_type == ">":
return BinaryType.GREATER
if operation_type == "<=":
return BinaryType.LESS_EQUAL
if operation_type == ">=":
return BinaryType.GREATER_EQUAL
if operation_type == "==":
return BinaryType.EQUAL
if operation_type == "!=":
return BinaryType.NOT_EQUAL
if operation_type == "&&":
return BinaryType.ANDAND
if operation_type == "||":
return BinaryType.OROR
raise SlithIRError(f"get_type: Unknown operation type {operation_type})")
def can_be_checked_for_overflow(self) -> bool:
return self in [
BinaryType.POWER,
BinaryType.MULTIPLICATION,
BinaryType.ADDITION,
BinaryType.SUBTRACTION,
BinaryType.DIVISION,
]
| BinaryType |
python | allegroai__clearml | clearml/binding/import_bind.py | {
"start": 213,
"end": 3086
} | class ____(object):
_patched = False
_post_import_hooks = defaultdict(list)
@staticmethod
def _init_hook() -> None:
if PostImportHookPatching._patched:
return
PostImportHookPatching._patched = True
if six.PY2:
# python2.x
builtins.__org_import__ = builtins.__import__
builtins.__import__ = PostImportHookPatching.__patched_import2
else:
# python3.x
builtins.__org_import__ = builtins.__import__
builtins.__import__ = PostImportHookPatching.__patched_import3
@staticmethod
def __patched_import2(
name: str,
globals: dict = {},
locals: dict = {},
fromlist: list = [],
level: int = -1,
) -> types.ModuleType:
already_imported = name in sys.modules
mod = builtins.__org_import__(name, globals=globals, locals=locals, fromlist=fromlist, level=level)
if not already_imported and name in PostImportHookPatching._post_import_hooks:
for hook in PostImportHookPatching._post_import_hooks[name]:
hook()
return mod
@staticmethod
def __patched_import3(
name: str,
globals: dict = None,
locals: dict = None,
fromlist: tuple = (),
level: int = 0,
) -> Any:
name_parts = name.split(".")
base_name = name_parts[0]
second_name = ".".join(name_parts[:2]) if len(name_parts) > 1 else None
base_already_imported = (not base_name) or (base_name in sys.modules)
second_already_imported = (not second_name) or (second_name in sys.modules)
mod = builtins.__org_import__(name, globals=globals, locals=locals, fromlist=fromlist, level=level)
if not base_already_imported and base_name in PostImportHookPatching._post_import_hooks:
for hook in PostImportHookPatching._post_import_hooks[base_name]:
hook()
if not second_already_imported and second_name in PostImportHookPatching._post_import_hooks:
for hook in PostImportHookPatching._post_import_hooks[second_name]:
hook()
return mod
@staticmethod
def add_on_import(name: str, func: Callable) -> None:
PostImportHookPatching._init_hook()
if (
name not in PostImportHookPatching._post_import_hooks
or func not in PostImportHookPatching._post_import_hooks[name]
):
PostImportHookPatching._post_import_hooks[name].append(func)
@staticmethod
def remove_on_import(name: str, func: Callable) -> None:
if (
name in PostImportHookPatching._post_import_hooks
and func in PostImportHookPatching._post_import_hooks[name]
):
PostImportHookPatching._post_import_hooks[name].remove(func)
| PostImportHookPatching |
python | pypa__pipenv | pipenv/vendor/tomlkit/toml_char.py | {
"start": 16,
"end": 1291
} | class ____(str):
def __init__(self, c):
super().__init__()
if len(self) > 1:
raise ValueError("A TOML character must be of length 1")
BARE = string.ascii_letters + string.digits + "-_"
KV = "= \t"
NUMBER = string.digits + "+-_.e"
SPACES = " \t"
NL = "\n\r"
WS = SPACES + NL
def is_bare_key_char(self) -> bool:
"""
Whether the character is a valid bare key name or not.
"""
return self in self.BARE
def is_kv_sep(self) -> bool:
"""
Whether the character is a valid key/value separator or not.
"""
return self in self.KV
def is_int_float_char(self) -> bool:
"""
Whether the character if a valid integer or float value character or not.
"""
return self in self.NUMBER
def is_ws(self) -> bool:
"""
Whether the character is a whitespace character or not.
"""
return self in self.WS
def is_nl(self) -> bool:
"""
Whether the character is a new line character or not.
"""
return self in self.NL
def is_spaces(self) -> bool:
"""
Whether the character is a space or not
"""
return self in self.SPACES
| TOMLChar |
python | qdrant__qdrant-client | qdrant_client/embed/embedder.py | {
"start": 652,
"end": 16412
} | class ____:
def __init__(self, threads: Optional[int] = None, **kwargs: Any) -> None:
self.embedding_models: dict[str, list[ModelInstance[TextEmbedding]]] = defaultdict(list)
self.sparse_embedding_models: dict[str, list[ModelInstance[SparseTextEmbedding]]] = (
defaultdict(list)
)
self.late_interaction_embedding_models: dict[
str, list[ModelInstance[LateInteractionTextEmbedding]]
] = defaultdict(list)
self.image_embedding_models: dict[str, list[ModelInstance[ImageEmbedding]]] = defaultdict(
list
)
self.late_interaction_multimodal_embedding_models: dict[
str, list[ModelInstance[LateInteractionMultimodalEmbedding]]
] = defaultdict(list)
self._threads = threads
def get_or_init_model(
self,
model_name: str,
cache_dir: Optional[str] = None,
threads: Optional[int] = None,
providers: Optional[Sequence["OnnxProvider"]] = None,
cuda: bool = False,
device_ids: Optional[list[int]] = None,
deprecated: bool = False,
**kwargs: Any,
) -> TextEmbedding:
if not FastEmbedMisc.is_supported_text_model(model_name):
raise ValueError(
f"Unsupported embedding model: {model_name}. Supported models: {FastEmbedMisc.list_text_models()}"
)
options = {
"cache_dir": cache_dir,
"threads": threads or self._threads,
"providers": providers,
"cuda": cuda,
"device_ids": device_ids,
**kwargs,
}
for instance in self.embedding_models[model_name]:
if (deprecated and instance.deprecated) or (
not deprecated and instance.options == options
):
return instance.model
model = TextEmbedding(model_name=model_name, **options)
model_instance: ModelInstance[TextEmbedding] = ModelInstance(
model=model, options=options, deprecated=deprecated
)
self.embedding_models[model_name].append(model_instance)
return model
def get_or_init_sparse_model(
self,
model_name: str,
cache_dir: Optional[str] = None,
threads: Optional[int] = None,
providers: Optional[Sequence["OnnxProvider"]] = None,
cuda: bool = False,
device_ids: Optional[list[int]] = None,
deprecated: bool = False,
**kwargs: Any,
) -> SparseTextEmbedding:
if not FastEmbedMisc.is_supported_sparse_model(model_name):
raise ValueError(
f"Unsupported embedding model: {model_name}. Supported models: {FastEmbedMisc.list_sparse_models()}"
)
options = {
"cache_dir": cache_dir,
"threads": threads or self._threads,
"providers": providers,
"cuda": cuda,
"device_ids": device_ids,
**kwargs,
}
for instance in self.sparse_embedding_models[model_name]:
if (deprecated and instance.deprecated) or (
not deprecated and instance.options == options
):
return instance.model
model = SparseTextEmbedding(model_name=model_name, **options)
model_instance: ModelInstance[SparseTextEmbedding] = ModelInstance(
model=model, options=options, deprecated=deprecated
)
self.sparse_embedding_models[model_name].append(model_instance)
return model
def get_or_init_late_interaction_model(
self,
model_name: str,
cache_dir: Optional[str] = None,
threads: Optional[int] = None,
providers: Optional[Sequence["OnnxProvider"]] = None,
cuda: bool = False,
device_ids: Optional[list[int]] = None,
**kwargs: Any,
) -> LateInteractionTextEmbedding:
if not FastEmbedMisc.is_supported_late_interaction_text_model(model_name):
raise ValueError(
f"Unsupported embedding model: {model_name}. "
f"Supported models: {FastEmbedMisc.list_late_interaction_text_models()}"
)
options = {
"cache_dir": cache_dir,
"threads": threads or self._threads,
"providers": providers,
"cuda": cuda,
"device_ids": device_ids,
**kwargs,
}
for instance in self.late_interaction_embedding_models[model_name]:
if instance.options == options:
return instance.model
model = LateInteractionTextEmbedding(model_name=model_name, **options)
model_instance: ModelInstance[LateInteractionTextEmbedding] = ModelInstance(
model=model, options=options
)
self.late_interaction_embedding_models[model_name].append(model_instance)
return model
def get_or_init_late_interaction_multimodal_model(
self,
model_name: str,
cache_dir: Optional[str] = None,
threads: Optional[int] = None,
providers: Optional[Sequence["OnnxProvider"]] = None,
cuda: bool = False,
device_ids: Optional[list[int]] = None,
**kwargs: Any,
) -> LateInteractionMultimodalEmbedding:
if not FastEmbedMisc.is_supported_late_interaction_multimodal_model(model_name):
raise ValueError(
f"Unsupported embedding model: {model_name}. "
f"Supported models: {FastEmbedMisc.list_late_interaction_multimodal_models()}"
)
options = {
"cache_dir": cache_dir,
"threads": threads or self._threads,
"providers": providers,
"cuda": cuda,
"device_ids": device_ids,
**kwargs,
}
for instance in self.late_interaction_multimodal_embedding_models[model_name]:
if instance.options == options:
return instance.model
model = LateInteractionMultimodalEmbedding(model_name=model_name, **options)
model_instance: ModelInstance[LateInteractionMultimodalEmbedding] = ModelInstance(
model=model, options=options
)
self.late_interaction_multimodal_embedding_models[model_name].append(model_instance)
return model
def get_or_init_image_model(
self,
model_name: str,
cache_dir: Optional[str] = None,
threads: Optional[int] = None,
providers: Optional[Sequence["OnnxProvider"]] = None,
cuda: bool = False,
device_ids: Optional[list[int]] = None,
**kwargs: Any,
) -> ImageEmbedding:
if not FastEmbedMisc.is_supported_image_model(model_name):
raise ValueError(
f"Unsupported embedding model: {model_name}. Supported models: {FastEmbedMisc.list_image_models()}"
)
options = {
"cache_dir": cache_dir,
"threads": threads or self._threads,
"providers": providers,
"cuda": cuda,
"device_ids": device_ids,
**kwargs,
}
for instance in self.image_embedding_models[model_name]:
if instance.options == options:
return instance.model
model = ImageEmbedding(model_name=model_name, **options)
model_instance: ModelInstance[ImageEmbedding] = ModelInstance(model=model, options=options)
self.image_embedding_models[model_name].append(model_instance)
return model
def embed(
self,
model_name: str,
texts: Optional[list[str]] = None,
images: Optional[list[ImageInput]] = None,
options: Optional[dict[str, Any]] = None,
is_query: bool = False,
batch_size: int = 8,
) -> NumericVector:
if (texts is None) is (images is None):
raise ValueError("Either documents or images should be provided")
embeddings: NumericVector # define type for a static type checker
if texts is not None:
if FastEmbedMisc.is_supported_text_model(model_name):
embeddings = self._embed_dense_text(
texts, model_name, options, is_query, batch_size
)
elif FastEmbedMisc.is_supported_sparse_model(model_name):
embeddings = self._embed_sparse_text(
texts, model_name, options, is_query, batch_size
)
elif FastEmbedMisc.is_supported_late_interaction_text_model(model_name):
embeddings = self._embed_late_interaction_text(
texts, model_name, options, is_query, batch_size
)
elif FastEmbedMisc.is_supported_late_interaction_multimodal_model(model_name):
embeddings = self._embed_late_interaction_multimodal_text(
texts, model_name, options, batch_size
)
else:
raise ValueError(f"Unsupported embedding model: {model_name}")
else:
assert (
images is not None
) # just to satisfy mypy which can't infer it from the previous conditions
if FastEmbedMisc.is_supported_image_model(model_name):
embeddings = self._embed_dense_image(images, model_name, options, batch_size)
elif FastEmbedMisc.is_supported_late_interaction_multimodal_model(model_name):
embeddings = self._embed_late_interaction_multimodal_image(
images, model_name, options, batch_size
)
else:
raise ValueError(f"Unsupported embedding model: {model_name}")
return embeddings
def _embed_dense_text(
self,
texts: list[str],
model_name: str,
options: Optional[dict[str, Any]],
is_query: bool,
batch_size: int,
) -> list[list[float]]:
embedding_model_inst = self.get_or_init_model(model_name=model_name, **options or {})
if not is_query:
embeddings = [
embedding.tolist()
for embedding in embedding_model_inst.embed(documents=texts, batch_size=batch_size)
]
else:
embeddings = [
embedding.tolist() for embedding in embedding_model_inst.query_embed(query=texts)
]
return embeddings
def _embed_sparse_text(
self,
texts: list[str],
model_name: str,
options: Optional[dict[str, Any]],
is_query: bool,
batch_size: int,
) -> list[models.SparseVector]:
embedding_model_inst = self.get_or_init_sparse_model(
model_name=model_name, **options or {}
)
if not is_query:
embeddings = [
models.SparseVector(
indices=sparse_embedding.indices.tolist(),
values=sparse_embedding.values.tolist(),
)
for sparse_embedding in embedding_model_inst.embed(
documents=texts, batch_size=batch_size
)
]
else:
embeddings = [
models.SparseVector(
indices=sparse_embedding.indices.tolist(),
values=sparse_embedding.values.tolist(),
)
for sparse_embedding in embedding_model_inst.query_embed(query=texts)
]
return embeddings
def _embed_late_interaction_text(
self,
texts: list[str],
model_name: str,
options: Optional[dict[str, Any]],
is_query: bool,
batch_size: int,
) -> list[list[list[float]]]:
embedding_model_inst = self.get_or_init_late_interaction_model(
model_name=model_name, **options or {}
)
if not is_query:
embeddings = [
embedding.tolist()
for embedding in embedding_model_inst.embed(documents=texts, batch_size=batch_size)
]
else:
embeddings = [
embedding.tolist() for embedding in embedding_model_inst.query_embed(query=texts)
]
return embeddings
def _embed_late_interaction_multimodal_text(
self,
texts: list[str],
model_name: str,
options: Optional[dict[str, Any]],
batch_size: int,
) -> list[list[list[float]]]:
embedding_model_inst = self.get_or_init_late_interaction_multimodal_model(
model_name=model_name, **options or {}
)
return [
embedding.tolist()
for embedding in embedding_model_inst.embed_text(
documents=texts, batch_size=batch_size
)
]
def _embed_late_interaction_multimodal_image(
self,
images: list[ImageInput],
model_name: str,
options: Optional[dict[str, Any]],
batch_size: int,
) -> list[list[list[float]]]:
embedding_model_inst = self.get_or_init_late_interaction_multimodal_model(
model_name=model_name, **options or {}
)
return [
embedding.tolist()
for embedding in embedding_model_inst.embed_image(images=images, batch_size=batch_size)
]
def _embed_dense_image(
self,
images: list[ImageInput],
model_name: str,
options: Optional[dict[str, Any]],
batch_size: int,
) -> list[list[float]]:
embedding_model_inst = self.get_or_init_image_model(model_name=model_name, **options or {})
embeddings = [
embedding.tolist()
for embedding in embedding_model_inst.embed(images=images, batch_size=batch_size)
]
return embeddings
@classmethod
def is_supported_text_model(cls, model_name: str) -> bool:
"""Check if model is supported by fastembed
Args:
model_name (str): The name of the model to check.
Returns:
bool: True if the model is supported, False otherwise.
"""
return FastEmbedMisc.is_supported_text_model(model_name)
@classmethod
def is_supported_image_model(cls, model_name: str) -> bool:
"""Check if model is supported by fastembed
Args:
model_name (str): The name of the model to check.
Returns:
bool: True if the model is supported, False otherwise.
"""
return FastEmbedMisc.is_supported_image_model(model_name)
@classmethod
def is_supported_late_interaction_text_model(cls, model_name: str) -> bool:
"""Check if model is supported by fastembed
Args:
model_name (str): The name of the model to check.
Returns:
bool: True if the model is supported, False otherwise.
"""
return FastEmbedMisc.is_supported_late_interaction_text_model(model_name)
@classmethod
def is_supported_late_interaction_multimodal_model(cls, model_name: str) -> bool:
"""Check if model is supported by fastembed
Args:
model_name (str): The name of the model to check.
Returns:
bool: True if the model is supported, False otherwise.
"""
return FastEmbedMisc.is_supported_late_interaction_multimodal_model(model_name)
@classmethod
def is_supported_sparse_model(cls, model_name: str) -> bool:
"""Check if model is supported by fastembed
Args:
model_name (str): The name of the model to check.
Returns:
bool: True if the model is supported, False otherwise.
"""
return FastEmbedMisc.is_supported_sparse_model(model_name)
| Embedder |
python | pytorch__pytorch | torch/distributed/flight_recorder/components/types.py | {
"start": 3147,
"end": 3218
} | class ____(NamedTuple):
group_id: str
global_rank: int
| Membership |
python | python-openxml__python-docx | tests/oxml/test_xmlchemy.py | {
"start": 7734,
"end": 11056
} | class ____:
def it_adds_a_getter_property_for_the_choice_element(self, getter_fixture):
parent, expected_choice = getter_fixture
assert parent.choice is expected_choice
def it_adds_a_creator_method_for_the_child_element(self, new_fixture):
parent, expected_xml = new_fixture
choice = parent._new_choice()
assert choice.xml == expected_xml
def it_adds_an_insert_method_for_the_child_element(self, insert_fixture):
parent, choice, expected_xml = insert_fixture
parent._insert_choice(choice)
assert parent.xml == expected_xml
assert parent._insert_choice.__doc__.startswith("Return the passed ``<w:choice>`` ")
def it_adds_an_add_method_for_the_child_element(self, add_fixture):
parent, expected_xml = add_fixture
choice = parent._add_choice()
assert parent.xml == expected_xml
assert isinstance(choice, CT_Choice)
assert parent._add_choice.__doc__.startswith("Add a new ``<w:choice>`` child element ")
def it_adds_a_get_or_change_to_method_for_the_child_element(self, get_or_change_to_fixture):
parent, expected_xml = get_or_change_to_fixture
choice = parent.get_or_change_to_choice()
assert isinstance(choice, CT_Choice)
assert parent.xml == expected_xml
# fixtures -------------------------------------------------------
@pytest.fixture
def add_fixture(self):
parent = self.parent_bldr().element
expected_xml = self.parent_bldr("choice").xml()
return parent, expected_xml
@pytest.fixture(
params=[
("choice2", "choice"),
(None, "choice"),
("choice", "choice"),
]
)
def get_or_change_to_fixture(self, request):
before_member_tag, after_member_tag = request.param
parent = self.parent_bldr(before_member_tag).element
expected_xml = self.parent_bldr(after_member_tag).xml()
return parent, expected_xml
@pytest.fixture(params=["choice", None])
def getter_fixture(self, request):
choice_tag = request.param
parent = self.parent_bldr(choice_tag).element
expected_choice = parent.find(qn("w:choice")) # None if not found
return parent, expected_choice
@pytest.fixture
def insert_fixture(self):
parent = (
a_parent().with_nsdecls().with_child(an_oomChild()).with_child(an_oooChild())
).element
choice = a_choice().with_nsdecls().element
expected_xml = (
a_parent()
.with_nsdecls()
.with_child(a_choice())
.with_child(an_oomChild())
.with_child(an_oooChild())
).xml()
return parent, choice, expected_xml
@pytest.fixture
def new_fixture(self):
parent = self.parent_bldr().element
expected_xml = a_choice().with_nsdecls().xml()
return parent, expected_xml
# fixture components ---------------------------------------------
def parent_bldr(self, choice_tag=None):
parent_bldr = a_parent().with_nsdecls()
if choice_tag == "choice":
parent_bldr.with_child(a_choice())
if choice_tag == "choice2":
parent_bldr.with_child(a_choice2())
return parent_bldr
| DescribeChoice |
python | huggingface__transformers | src/transformers/models/metaclip_2/configuration_metaclip_2.py | {
"start": 664,
"end": 5784
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MetaClip2TextModel`]. It is used to instantiate
a MetaClip2 text encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the MetaClip2
[facebook/metaclip-2-worldwide-huge-quickgelu](https://huggingface.co/facebook/metaclip-2-worldwide-huge-quickgelu) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 49408):
Vocabulary size of the MetaClip2 text model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`MetaClip2TextModel`].
hidden_size (`int`, *optional*, defaults to 512):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
projection_dim (`int`, *optional*, defaults to 512):
Dimensionality of text and vision projection layers.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
max_position_embeddings (`int`, *optional*, defaults to 77):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
pad_token_id (`int`, *optional*, defaults to 1):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 49406):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 49407):
End of stream token id.
Example:
```python
>>> from transformers import MetaClip2TextConfig, MetaClip2TextModel
>>> # Initializing a MetaClip2TextConfig with facebook/metaclip-2-worldwide-huge-quickgelu style configuration
>>> configuration = MetaClip2TextConfig()
>>> # Initializing a MetaClip2TextModel (with random weights) from the facebook/metaclip-2-worldwide-huge-quickgelu style configuration
>>> model = MetaClip2TextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "metaclip_2_text_model"
base_config_key = "text_config"
def __init__(
self,
vocab_size=49408,
hidden_size=512,
intermediate_size=2048,
projection_dim=512,
num_hidden_layers=12,
num_attention_heads=8,
max_position_embeddings=77,
hidden_act="quick_gelu",
layer_norm_eps=1e-5,
attention_dropout=0.0,
initializer_range=0.02,
initializer_factor=1.0,
# This differs from `MetaClip2Tokenizer`'s default and from openai/metaclip_2
# See https://github.com/huggingface/transformers/pull/24773#issuecomment-1632287538
pad_token_id=1,
bos_token_id=49406,
eos_token_id=49407,
**kwargs,
):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.max_position_embeddings = max_position_embeddings
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout
| MetaClip2TextConfig |
python | ansible__ansible | lib/ansible/cli/__init__.py | {
"start": 4703,
"end": 27716
} | class ____(ABC):
""" code behind bin/ansible* programs """
PAGER = C.config.get_config_value('PAGER')
# -F (quit-if-one-screen) -R (allow raw ansi control chars)
# -S (chop long lines) -X (disable termcap init and de-init)
LESS_OPTS = 'FRSX'
SKIP_INVENTORY_DEFAULTS = False
USES_CONNECTION = False
def __init__(self, args, callback=None):
"""
Base init method for all command line programs
"""
if not args:
raise ValueError('A non-empty list for args is required')
self.args = args
self.parser = None
self.callback = callback
self.show_devel_warning()
def show_devel_warning(self) -> None:
if C.DEVEL_WARNING and __version__.endswith('dev0'):
display.warning(
'You are running the development version of Ansible. You should only run Ansible from "devel" if '
'you are modifying the Ansible engine, or trying out features under development. This is a rapidly '
'changing source of code and can become unstable at any point.'
)
@abstractmethod
def run(self):
"""Run the ansible command
Subclasses must implement this method. It does the actual work of
running an Ansible command.
"""
self.parse()
# Initialize plugin loader after parse, so that the init code can utilize parsed arguments
cli_collections_path = context.CLIARGS.get('collections_path') or []
if not is_sequence(cli_collections_path):
# In some contexts ``collections_path`` is singular
cli_collections_path = [cli_collections_path]
init_plugin_loader(cli_collections_path)
display.vv(to_text(opt_help.version(self.parser.prog)))
if C.CONFIG_FILE:
display.v(u"Using %s as config file" % to_text(C.CONFIG_FILE))
else:
display.v(u"No config file found; using defaults")
_display._report_config_warnings(_deprecator.ANSIBLE_CORE_DEPRECATOR)
@staticmethod
def split_vault_id(vault_id):
# return (before_@, after_@)
# if no @, return whole string as after_
if '@' not in vault_id:
return (None, vault_id)
parts = vault_id.split('@', 1)
ret = tuple(parts)
return ret
@staticmethod
def build_vault_ids(vault_ids, vault_password_files=None,
ask_vault_pass=None, auto_prompt=True):
vault_password_files = vault_password_files or []
vault_ids = vault_ids or []
# convert vault_password_files into vault_ids slugs
for password_file in vault_password_files:
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, password_file)
# note this makes --vault-id higher precedence than --vault-password-file
# if we want to intertwingle them in order probably need a cli callback to populate vault_ids
# used by --vault-id and --vault-password-file
vault_ids.append(id_slug)
# if an action needs an encrypt password (create_new_password=True) and we don't
# have other secrets setup, then automatically add a password prompt as well.
# prompts can't/shouldn't work without a tty, so don't add prompt secrets
if ask_vault_pass or (not vault_ids and auto_prompt):
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, u'prompt_ask_vault_pass')
vault_ids.append(id_slug)
return vault_ids
@staticmethod
def setup_vault_secrets(loader, vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=False,
auto_prompt=True, initialize_context=True):
# list of tuples
vault_secrets = []
# Depending on the vault_id value (including how --ask-vault-pass / --vault-password-file create a vault_id)
# we need to show different prompts. This is for compat with older Towers that expect a
# certain vault password prompt format, so 'promp_ask_vault_pass' vault_id gets the old format.
prompt_formats = {}
# If there are configured default vault identities, they are considered 'first'
# so we prepend them to vault_ids (from cli) here
vault_password_files = vault_password_files or []
if C.DEFAULT_VAULT_PASSWORD_FILE:
vault_password_files.append(C.DEFAULT_VAULT_PASSWORD_FILE)
if create_new_password:
prompt_formats['prompt'] = ['New vault password (%(vault_id)s): ',
'Confirm new vault password (%(vault_id)s): ']
# 2.3 format prompts for --ask-vault-pass
prompt_formats['prompt_ask_vault_pass'] = ['New Vault password: ',
'Confirm New Vault password: ']
else:
prompt_formats['prompt'] = ['Vault password (%(vault_id)s): ']
# The format when we use just --ask-vault-pass needs to match 'Vault password:\s*?$'
prompt_formats['prompt_ask_vault_pass'] = ['Vault password: ']
vault_ids = CLI.build_vault_ids(vault_ids,
vault_password_files,
ask_vault_pass,
auto_prompt=auto_prompt)
last_exception = found_vault_secret = None
for vault_id_slug in vault_ids:
vault_id_name, vault_id_value = CLI.split_vault_id(vault_id_slug)
if vault_id_value in ['prompt', 'prompt_ask_vault_pass']:
# --vault-id some_name@prompt_ask_vault_pass --vault-id other_name@prompt_ask_vault_pass will be a little
# confusing since it will use the old format without the vault id in the prompt
built_vault_id = vault_id_name or C.DEFAULT_VAULT_IDENTITY
# choose the prompt based on --vault-id=prompt or --ask-vault-pass. --ask-vault-pass
# always gets the old format for Tower compatibility.
# ie, we used --ask-vault-pass, so we need to use the old vault password prompt
# format since Tower needs to match on that format.
prompted_vault_secret = PromptVaultSecret(prompt_formats=prompt_formats[vault_id_value],
vault_id=built_vault_id)
# a empty or invalid password from the prompt will warn and continue to the next
# without erroring globally
try:
prompted_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password prompt (%s): %s' % (vault_id_name, exc))
raise
found_vault_secret = True
vault_secrets.append((built_vault_id, prompted_vault_secret))
# update loader with new secrets incrementally, so we can load a vault password
# that is encrypted with a vault secret provided earlier
loader.set_vault_secrets(vault_secrets)
continue
# assuming anything else is a password file
display.vvvvv('Reading vault password file: %s' % vault_id_value)
# read vault_pass from a file
try:
file_vault_secret = get_file_vault_secret(filename=vault_id_value,
vault_id=vault_id_name,
loader=loader)
except AnsibleError as exc:
display.warning('Error getting vault password file (%s): %s' % (vault_id_name, to_text(exc)))
last_exception = exc
continue
try:
file_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password file loading (%s): %s' % (vault_id_name, to_text(exc)))
last_exception = exc
continue
found_vault_secret = True
if vault_id_name:
vault_secrets.append((vault_id_name, file_vault_secret))
else:
vault_secrets.append((C.DEFAULT_VAULT_IDENTITY, file_vault_secret))
# update loader with as-yet-known vault secrets
loader.set_vault_secrets(vault_secrets)
# An invalid or missing password file will error globally
# if no valid vault secret was found.
if last_exception and not found_vault_secret:
raise last_exception
if initialize_context:
VaultSecretsContext.initialize(VaultSecretsContext(vault_secrets))
return vault_secrets
@staticmethod
def _get_secret(prompt: str) -> str:
return getpass.getpass(prompt=prompt)
@staticmethod
def ask_passwords():
""" prompt for connection and become passwords if needed """
op = context.CLIARGS
sshpass = None
becomepass = None
become_prompt_method = "BECOME" if C.AGNOSTIC_BECOME_PROMPT else op['become_method'].upper()
try:
become_prompt = "%s password: " % become_prompt_method
if op['ask_pass']:
sshpass = CLI._get_secret("SSH password: ")
become_prompt = "%s password[defaults to SSH password]: " % become_prompt_method
elif op['connection_password_file']:
sshpass = CLI.get_password_from_file(op['connection_password_file'])
if op['become_ask_pass']:
becomepass = CLI._get_secret(become_prompt)
if op['ask_pass'] and becomepass == '':
becomepass = sshpass
elif op['become_password_file']:
becomepass = CLI.get_password_from_file(op['become_password_file'])
except EOFError:
pass
return sshpass, becomepass
def validate_conflicts(self, op, runas_opts=False, fork_opts=False):
""" check for conflicting options """
if fork_opts:
if op.forks < 1:
self.parser.error("The number of processes (--forks) must be >= 1")
return op
@abstractmethod
def init_parser(self, desc=None, epilog=None):
"""
Create an options parser for most ansible scripts
Subclasses need to implement this method. They will usually call the base class's
init_parser to create a basic version and then add their own options on top of that.
An implementation will look something like this::
def init_parser(self):
super(MyCLI, self).init_parser(desc='The purpose of the program is...')
ansible.arguments.option_helpers.add_runas_options(self.parser)
self.parser.add_option('--my-option', dest='my_option', action='store')
"""
self.parser = opt_help.create_base_parser(self.name, desc=desc, epilog=epilog)
@abstractmethod
def post_process_args(self, options):
"""Process the command line args
Subclasses need to implement this method. This method validates and transforms the command
line arguments. It can be used to check whether conflicting values were given, whether filenames
exist, etc.
An implementation will look something like this::
def post_process_args(self, options):
options = super(MyCLI, self).post_process_args(options)
if options.addition and options.subtraction:
raise AnsibleOptionsError('Only one of --addition and --subtraction can be specified')
if isinstance(options.listofhosts, str):
options.listofhosts = options.listofhosts.split(',')
return options
"""
# process tags
if hasattr(options, 'tags') and not options.tags:
# optparse defaults does not do what's expected
# More specifically, we want `--tags` to be additive. So we cannot
# simply change C.TAGS_RUN's default to ["all"] because then passing
# --tags foo would cause us to have ['all', 'foo']
options.tags = ['all']
if hasattr(options, 'tags') and options.tags:
tags = set()
for tag_set in options.tags:
for tag in tag_set.split(u','):
tags.add(tag.strip())
options.tags = list(tags)
# process skip_tags
if hasattr(options, 'skip_tags') and options.skip_tags:
skip_tags = set()
for tag_set in options.skip_tags:
for tag in tag_set.split(u','):
skip_tags.add(tag.strip())
options.skip_tags = list(skip_tags)
# Make sure path argument doesn't have a backslash
if hasattr(options, 'action') and options.action in ['install', 'download'] and hasattr(options, 'args'):
options.args = [path.rstrip("/") for path in options.args]
# process inventory options except for CLIs that require their own processing
if hasattr(options, 'inventory') and not self.SKIP_INVENTORY_DEFAULTS:
if options.inventory:
# should always be list
if isinstance(options.inventory, str):
options.inventory = [options.inventory]
# Ensure full paths when needed
options.inventory = [unfrackpath(opt, follow=False) if ',' not in opt else opt for opt in options.inventory]
else:
options.inventory = C.DEFAULT_HOST_LIST
return options
def parse(self):
"""Parse the command line args
This method parses the command line arguments. It uses the parser
stored in the self.parser attribute and saves the args and options in
context.CLIARGS.
Subclasses need to implement two helper methods, init_parser() and post_process_args() which
are called from this function before and after parsing the arguments.
"""
self.init_parser()
if HAS_ARGCOMPLETE:
argcomplete.autocomplete(self.parser)
try:
options = self.parser.parse_args(self.args[1:])
except SystemExit as ex:
if ex.code != 0:
self.parser.exit(status=2, message=" \n%s" % self.parser.format_help())
raise
options = self.post_process_args(options)
context._init_global_context(options)
@staticmethod
def version_info(gitinfo=False):
""" return full ansible version info """
if gitinfo:
# expensive call, user with care
ansible_version_string = opt_help.version()
else:
ansible_version_string = __version__
ansible_version = ansible_version_string.split()[0]
ansible_versions = ansible_version.split('.')
for counter in range(len(ansible_versions)):
if ansible_versions[counter] == "":
ansible_versions[counter] = 0
try:
ansible_versions[counter] = int(ansible_versions[counter])
except Exception:
pass
if len(ansible_versions) < 3:
for counter in range(len(ansible_versions), 3):
ansible_versions.append(0)
return {'string': ansible_version_string.strip(),
'full': ansible_version,
'major': ansible_versions[0],
'minor': ansible_versions[1],
'revision': ansible_versions[2]}
@staticmethod
def pager(text):
""" find reasonable way to display text """
# this is a much simpler form of what is in pydoc.py
if not sys.stdout.isatty():
display.display(text, screen_only=True)
elif CLI.PAGER:
if sys.platform == 'win32':
display.display(text, screen_only=True)
else:
CLI.pager_pipe(text)
else:
p = subprocess.Popen('less --version', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode == 0:
CLI.pager_pipe(text, 'less')
else:
display.display(text, screen_only=True)
@staticmethod
def pager_pipe(text):
""" pipe text through a pager """
if 'less' in CLI.PAGER:
os.environ['LESS'] = CLI.LESS_OPTS
try:
cmd = subprocess.Popen(CLI.PAGER, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
cmd.communicate(input=to_bytes(text))
except (OSError, KeyboardInterrupt):
pass
def _play_prereqs(self):
# TODO: evaluate moving all of the code that touches ``AnsibleCollectionConfig``
# into ``init_plugin_loader`` so that we can specifically remove
# ``AnsibleCollectionConfig.playbook_paths`` to make it immutable after instantiation
options = context.CLIARGS
# all needs loader
loader = DataLoader()
basedir = options.get('basedir', False)
if basedir:
loader.set_basedir(basedir)
add_all_plugin_dirs(basedir)
AnsibleCollectionConfig.playbook_paths = basedir
default_collection = _get_collection_name_from_path(basedir)
if default_collection:
display.warning(u'running with default collection {0}'.format(default_collection))
AnsibleCollectionConfig.default_collection = default_collection
vault_ids = list(options['vault_ids'])
default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
vault_ids = default_vault_ids + vault_ids
vault_secrets = CLI.setup_vault_secrets(loader,
vault_ids=vault_ids,
vault_password_files=list(options['vault_password_files']),
ask_vault_pass=options['ask_vault_pass'],
auto_prompt=False)
loader.set_vault_secrets(vault_secrets)
if self.USES_CONNECTION:
_agent_launch.launch_ssh_agent()
# create the inventory, and filter it based on the subset specified (if any)
inventory = InventoryManager(loader=loader, sources=options['inventory'], cache=(not options.get('flush_cache')))
# create the variable manager, which will be shared throughout
# the code, ensuring a consistent view of global variables
variable_manager = VariableManager(loader=loader, inventory=inventory, version_info=CLI.version_info(gitinfo=False))
# flush fact cache if requested
if options['flush_cache']:
CLI._flush_cache(inventory, variable_manager)
return loader, inventory, variable_manager
@staticmethod
def _flush_cache(inventory, variable_manager):
variable_manager.clear_facts('localhost')
for host in inventory.list_hosts():
hostname = host.get_name()
variable_manager.clear_facts(hostname)
@staticmethod
def get_host_list(inventory, subset, pattern='all'):
no_hosts = False
if len(inventory.list_hosts()) == 0:
# Empty inventory
if C.LOCALHOST_WARNING and pattern not in C.LOCALHOST:
display.warning("provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'")
no_hosts = True
inventory.subset(subset)
hosts = inventory.list_hosts(pattern)
if not hosts and no_hosts is False:
raise AnsibleError("Specified inventory, host pattern and/or --limit leaves us with no hosts to target.")
return hosts
@staticmethod
def get_password_from_file(pwd_file: str) -> str:
b_pwd_file = to_bytes(pwd_file)
if b_pwd_file == b'-':
# ensure its read as bytes
secret = sys.stdin.buffer.read()
elif not os.path.exists(b_pwd_file):
raise AnsibleError("The password file %s was not found" % pwd_file)
elif is_executable(b_pwd_file):
display.vvvv(u'The password file %s is a script.' % to_text(pwd_file))
cmd = [b_pwd_file]
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
raise AnsibleError("Problem occurred when trying to run the password script %s (%s)."
" If this is not a script, remove the executable bit from the file." % (pwd_file, e))
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("The password script %s returned an error (rc=%s): %s" % (pwd_file, p.returncode, to_text(stderr)))
secret = stdout
else:
try:
with open(b_pwd_file, "rb") as password_file:
secret = password_file.read().strip()
except OSError as ex:
raise AnsibleError(f"Could not read password file {pwd_file!r}.") from ex
secret = secret.strip(b'\r\n')
if not secret:
raise AnsibleError('Empty password was provided from file (%s)' % pwd_file)
return to_text(secret)
@classmethod
def cli_executor(cls, args=None):
if args is None:
args = sys.argv
try:
display.debug("starting run")
ansible_dir = Path(C.ANSIBLE_HOME).expanduser()
try:
ansible_dir.mkdir(mode=0o700, exist_ok=True)
except OSError as ex:
display.error_as_warning(f"Failed to create the directory {ansible_dir!r}.", ex)
else:
display.debug("Created the '%s' directory" % ansible_dir)
cli = cls(args)
exit_code = cli.run()
except AnsibleError as ex:
display.error(ex)
exit_code = ex._exit_code
except KeyboardInterrupt:
display.error("User interrupted execution")
exit_code = ExitCode.KEYBOARD_INTERRUPT
except Exception as ex:
try:
raise AnsibleError("Unexpected Exception, this is probably a bug.") from ex
except AnsibleError as ex2:
# DTFIX-FUTURE: clean this up so we're not hacking the internals- re-wrap in an AnsibleCLIUnhandledError that always shows TB, or?
from ansible.module_utils._internal import _traceback
_traceback._is_traceback_enabled = lambda *_args, **_kwargs: True
display.error(ex2)
exit_code = ExitCode.UNKNOWN_ERROR
sys.exit(exit_code)
| CLI |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_poly_loading.py | {
"start": 44525,
"end": 49815
} | class ____(fixtures.DeclarativeMappedTest):
"""test #7304 and related cases
in this case we trigger the subclass attribute load, while at the same
time there will be a deferred loader option present in the state's
options that was established by the previous loader.
test both that the option takes effect (i.e. raiseload) and that a deferred
loader doesn't interfere with the mapper's load of the attribute.
"""
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Parent(Base):
__tablename__ = "parent"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
entity_id = Column(ForeignKey("entity.id"))
entity = relationship("Entity")
class Entity(Base):
__tablename__ = "entity"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
type = Column(String(32))
__mapper_args__ = {
"polymorphic_on": type,
"polymorphic_identity": "entity",
}
class SubEntity(Entity):
__tablename__ = "sub_entity"
id = Column(ForeignKey(Entity.id), primary_key=True)
name = Column(String(32))
__mapper_args__ = {"polymorphic_identity": "entity_two"}
@classmethod
def insert_data(cls, connection):
Parent, SubEntity = cls.classes("Parent", "SubEntity")
with Session(connection) as session:
session.add(Parent(entity=SubEntity(name="some name")))
session.commit()
@testing.combinations(
defaultload,
joinedload,
selectinload,
lazyload,
argnames="first_option",
)
@testing.combinations(
("load_only", "id", True),
("defer", "name", True),
("undefer", "name", True),
("raise", "name", False),
(None, None, True),
# these don't seem possible at the moment as the "type" column
# doesn't load and it can't recognize the polymorphic identity.
# we assume load_only() is smart enough to include this column
# ("defer", '*', True),
# ("undefer", '*', True),
# ("raise", '*', False),
argnames="second_option,second_argument,expect_load",
)
def test_subclass_loadattr(
self, first_option, second_option, second_argument, expect_load
):
Parent, Entity, SubEntity = self.classes(
"Parent", "Entity", "SubEntity"
)
stmt = select(Parent)
will_lazyload = first_option in (defaultload, lazyload)
if second_argument == "name":
second_argument = SubEntity.name
opt = first_option(Parent.entity.of_type(SubEntity))
elif second_argument == "id":
opt = first_option(Parent.entity)
second_argument = Entity.id
else:
opt = first_option(Parent.entity)
if second_option is None:
sub_opt = opt
elif second_option == "raise":
sub_opt = opt.defer(second_argument, raiseload=True)
else:
sub_opt = getattr(opt, second_option)(second_argument)
stmt = stmt.options(sub_opt)
session = fixture_session()
result = session.execute(stmt).scalars()
parent_obj = result.first()
entity_id = parent_obj.__dict__["entity_id"]
with assertsql.assert_engine(testing.db) as asserter_:
if expect_load:
eq_(parent_obj.entity.name, "some name")
else:
with expect_raises_message(
exc.InvalidRequestError,
"'SubEntity.name' is not available due to raiseload=True",
):
parent_obj.entity.name
expected = []
if will_lazyload:
expected.append(
CompiledSQL(
"SELECT entity.id, "
"entity.type FROM entity "
"WHERE entity.id = :pk_1",
[{"pk_1": entity_id}],
)
)
if second_option in ("load_only", None) or (
second_option == "undefer"
and first_option in (defaultload, lazyload)
):
# load will be a mapper optimized load for the name alone
expected.append(
CompiledSQL(
"SELECT sub_entity.name AS sub_entity_name "
"FROM sub_entity "
"WHERE :param_1 = sub_entity.id",
[{"param_1": entity_id}],
)
)
elif second_option == "defer":
# load will be a deferred load. this is because the explicit
# call to the deferred load put a deferred loader on the attribute
expected.append(
CompiledSQL(
"SELECT sub_entity.name AS sub_entity_name "
"FROM sub_entity "
"WHERE :param_1 = sub_entity.id",
[{"param_1": entity_id}],
)
)
asserter_.assert_(*expected)
| IgnoreOptionsOnSubclassAttrLoad |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/widgets/base.py | {
"start": 15812,
"end": 19153
} | class ____:
"""
Draw a border around any container, optionally with a title text.
Changing the title and body of the frame is possible at runtime by
assigning to the `body` and `title` attributes of this class.
:param body: Another container object.
:param title: Text to be displayed in the top of the frame (can be formatted text).
:param style: Style string to be applied to this widget.
"""
def __init__(
self,
body: AnyContainer,
title: AnyFormattedText = "",
style: str = "",
width: AnyDimension = None,
height: AnyDimension = None,
key_bindings: KeyBindings | None = None,
modal: bool = False,
) -> None:
self.title = title
self.body = body
fill = partial(Window, style="class:frame.border")
style = "class:frame " + style
top_row_with_title = VSplit(
[
fill(width=1, height=1, char=Border.TOP_LEFT),
fill(char=Border.HORIZONTAL),
fill(width=1, height=1, char="|"),
# Notice: we use `Template` here, because `self.title` can be an
# `HTML` object for instance.
Label(
lambda: Template(" {} ").format(self.title),
style="class:frame.label",
dont_extend_width=True,
),
fill(width=1, height=1, char="|"),
fill(char=Border.HORIZONTAL),
fill(width=1, height=1, char=Border.TOP_RIGHT),
],
height=1,
)
top_row_without_title = VSplit(
[
fill(width=1, height=1, char=Border.TOP_LEFT),
fill(char=Border.HORIZONTAL),
fill(width=1, height=1, char=Border.TOP_RIGHT),
],
height=1,
)
@Condition
def has_title() -> bool:
return bool(self.title)
self.container = HSplit(
[
ConditionalContainer(
content=top_row_with_title,
filter=has_title,
alternative_content=top_row_without_title,
),
VSplit(
[
fill(width=1, char=Border.VERTICAL),
DynamicContainer(lambda: self.body),
fill(width=1, char=Border.VERTICAL),
# Padding is required to make sure that if the content is
# too small, the right frame border is still aligned.
],
padding=0,
),
VSplit(
[
fill(width=1, height=1, char=Border.BOTTOM_LEFT),
fill(char=Border.HORIZONTAL),
fill(width=1, height=1, char=Border.BOTTOM_RIGHT),
],
# specifying height here will increase the rendering speed.
height=1,
),
],
width=width,
height=height,
style=style,
key_bindings=key_bindings,
modal=modal,
)
def __pt_container__(self) -> Container:
return self.container
| Frame |
python | tox-dev__tox | src/tox/journal/env.py | {
"start": 181,
"end": 2152
} | class ____:
"""Report the status of a tox environment."""
def __init__(self, enabled: bool, name: str) -> None: # noqa: FBT001
self._enabled = enabled
self.name = name
self._content: dict[str, Any] = {}
self._executes: list[tuple[str, Outcome]] = []
def __setitem__(self, key: str, value: Any) -> None:
"""
Add a new entry under key into the event journal.
:param key: the key under what to add the data
:param value: the data to add
"""
self._content[key] = value
def __bool__(self) -> bool:
""":return: a flag indicating if the event journal is on or not"""
return self._enabled
def add_execute(self, outcome: Outcome, run_id: str) -> None:
"""
Add a command execution to the journal.
:param outcome: the execution outcome
:param run_id: the execution id
"""
self._executes.append((run_id, outcome))
@property
def content(self) -> dict[str, Any]:
""":return: the env journal content (merges explicit keys and execution commands)"""
tests: list[dict[str, Any]] = []
setup: list[dict[str, Any]] = []
for run_id, outcome in self._executes:
one = {
"command": outcome.cmd,
"output": outcome.out,
"err": outcome.err,
"retcode": outcome.exit_code,
"elapsed": outcome.elapsed,
"show_on_standard": outcome.show_on_standard,
"run_id": run_id,
"start": outcome.start,
"end": outcome.end,
}
if run_id.startswith(("commands", "build")):
tests.append(one)
else:
setup.append(one)
if tests:
self["test"] = tests
if setup:
self["setup"] = setup
return self._content
__all__ = ("EnvJournal",)
| EnvJournal |
python | realpython__materials | python-enum/inheritance.py | {
"start": 201,
"end": 347
} | class ____(BaseTextEnum):
LOWERCASE = string.ascii_lowercase
UPPERCASE = string.ascii_uppercase
print(Alphabet.LOWERCASE.as_list())
| Alphabet |
python | kubernetes-client__python | kubernetes/client/models/v1_deployment_spec.py | {
"start": 383,
"end": 11289
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'min_ready_seconds': 'int',
'paused': 'bool',
'progress_deadline_seconds': 'int',
'replicas': 'int',
'revision_history_limit': 'int',
'selector': 'V1LabelSelector',
'strategy': 'V1DeploymentStrategy',
'template': 'V1PodTemplateSpec'
}
attribute_map = {
'min_ready_seconds': 'minReadySeconds',
'paused': 'paused',
'progress_deadline_seconds': 'progressDeadlineSeconds',
'replicas': 'replicas',
'revision_history_limit': 'revisionHistoryLimit',
'selector': 'selector',
'strategy': 'strategy',
'template': 'template'
}
def __init__(self, min_ready_seconds=None, paused=None, progress_deadline_seconds=None, replicas=None, revision_history_limit=None, selector=None, strategy=None, template=None, local_vars_configuration=None): # noqa: E501
"""V1DeploymentSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._min_ready_seconds = None
self._paused = None
self._progress_deadline_seconds = None
self._replicas = None
self._revision_history_limit = None
self._selector = None
self._strategy = None
self._template = None
self.discriminator = None
if min_ready_seconds is not None:
self.min_ready_seconds = min_ready_seconds
if paused is not None:
self.paused = paused
if progress_deadline_seconds is not None:
self.progress_deadline_seconds = progress_deadline_seconds
if replicas is not None:
self.replicas = replicas
if revision_history_limit is not None:
self.revision_history_limit = revision_history_limit
self.selector = selector
if strategy is not None:
self.strategy = strategy
self.template = template
@property
def min_ready_seconds(self):
"""Gets the min_ready_seconds of this V1DeploymentSpec. # noqa: E501
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) # noqa: E501
:return: The min_ready_seconds of this V1DeploymentSpec. # noqa: E501
:rtype: int
"""
return self._min_ready_seconds
@min_ready_seconds.setter
def min_ready_seconds(self, min_ready_seconds):
"""Sets the min_ready_seconds of this V1DeploymentSpec.
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) # noqa: E501
:param min_ready_seconds: The min_ready_seconds of this V1DeploymentSpec. # noqa: E501
:type: int
"""
self._min_ready_seconds = min_ready_seconds
@property
def paused(self):
"""Gets the paused of this V1DeploymentSpec. # noqa: E501
Indicates that the deployment is paused. # noqa: E501
:return: The paused of this V1DeploymentSpec. # noqa: E501
:rtype: bool
"""
return self._paused
@paused.setter
def paused(self, paused):
"""Sets the paused of this V1DeploymentSpec.
Indicates that the deployment is paused. # noqa: E501
:param paused: The paused of this V1DeploymentSpec. # noqa: E501
:type: bool
"""
self._paused = paused
@property
def progress_deadline_seconds(self):
"""Gets the progress_deadline_seconds of this V1DeploymentSpec. # noqa: E501
The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s. # noqa: E501
:return: The progress_deadline_seconds of this V1DeploymentSpec. # noqa: E501
:rtype: int
"""
return self._progress_deadline_seconds
@progress_deadline_seconds.setter
def progress_deadline_seconds(self, progress_deadline_seconds):
"""Sets the progress_deadline_seconds of this V1DeploymentSpec.
The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s. # noqa: E501
:param progress_deadline_seconds: The progress_deadline_seconds of this V1DeploymentSpec. # noqa: E501
:type: int
"""
self._progress_deadline_seconds = progress_deadline_seconds
@property
def replicas(self):
"""Gets the replicas of this V1DeploymentSpec. # noqa: E501
Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1. # noqa: E501
:return: The replicas of this V1DeploymentSpec. # noqa: E501
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""Sets the replicas of this V1DeploymentSpec.
Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1. # noqa: E501
:param replicas: The replicas of this V1DeploymentSpec. # noqa: E501
:type: int
"""
self._replicas = replicas
@property
def revision_history_limit(self):
"""Gets the revision_history_limit of this V1DeploymentSpec. # noqa: E501
The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10. # noqa: E501
:return: The revision_history_limit of this V1DeploymentSpec. # noqa: E501
:rtype: int
"""
return self._revision_history_limit
@revision_history_limit.setter
def revision_history_limit(self, revision_history_limit):
"""Sets the revision_history_limit of this V1DeploymentSpec.
The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10. # noqa: E501
:param revision_history_limit: The revision_history_limit of this V1DeploymentSpec. # noqa: E501
:type: int
"""
self._revision_history_limit = revision_history_limit
@property
def selector(self):
"""Gets the selector of this V1DeploymentSpec. # noqa: E501
:return: The selector of this V1DeploymentSpec. # noqa: E501
:rtype: V1LabelSelector
"""
return self._selector
@selector.setter
def selector(self, selector):
"""Sets the selector of this V1DeploymentSpec.
:param selector: The selector of this V1DeploymentSpec. # noqa: E501
:type: V1LabelSelector
"""
if self.local_vars_configuration.client_side_validation and selector is None: # noqa: E501
raise ValueError("Invalid value for `selector`, must not be `None`") # noqa: E501
self._selector = selector
@property
def strategy(self):
"""Gets the strategy of this V1DeploymentSpec. # noqa: E501
:return: The strategy of this V1DeploymentSpec. # noqa: E501
:rtype: V1DeploymentStrategy
"""
return self._strategy
@strategy.setter
def strategy(self, strategy):
"""Sets the strategy of this V1DeploymentSpec.
:param strategy: The strategy of this V1DeploymentSpec. # noqa: E501
:type: V1DeploymentStrategy
"""
self._strategy = strategy
@property
def template(self):
"""Gets the template of this V1DeploymentSpec. # noqa: E501
:return: The template of this V1DeploymentSpec. # noqa: E501
:rtype: V1PodTemplateSpec
"""
return self._template
@template.setter
def template(self, template):
"""Sets the template of this V1DeploymentSpec.
:param template: The template of this V1DeploymentSpec. # noqa: E501
:type: V1PodTemplateSpec
"""
if self.local_vars_configuration.client_side_validation and template is None: # noqa: E501
raise ValueError("Invalid value for `template`, must not be `None`") # noqa: E501
self._template = template
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1DeploymentSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1DeploymentSpec):
return True
return self.to_dict() != other.to_dict()
| V1DeploymentSpec |
python | keras-team__keras | keras/src/layers/convolutional/base_separable_conv.py | {
"start": 573,
"end": 12634
} | class ____(Layer):
"""Abstract base layer for separable convolution.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels. If
`use_bias` is True and a bias initializer is provided, it adds a bias vector
to the output.
Args:
rank: int, the rank of the convolution, e.g. 2 for 2D convolution.
depth_multiplier: The number of depthwise convolution output channels
for each input channel. The total number of depthwise convolution
output channels will be equal to `input_channel * depth_multiplier`.
filters: int, the dimensionality of the output space (i.e. the number
of filters in the pointwise convolution).
kernel_size: int or tuple/list of `rank` integers, specifying the size
of the depthwise convolution window.
strides: int or tuple/list of `rank` integers, specifying the stride
length of the depthwise convolution. If only one int is specified,
the same stride size will be used for all dimensions.
`stride value != 1` is incompatible with `dilation_rate != 1`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input. When `padding="same"` and
`strides=1`, the output has the same size as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
dilation_rate: int or tuple/list of `rank` integers, specifying the
dilation rate to use for dilated convolution. If only one int is
specified, the same dilation rate will be used for all dimensions.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
depthwise_initializer: An initializer for the depthwise convolution
kernel. If None, then the default initializer (`"glorot_uniform"`)
will be used.
pointwise_initializer: An initializer for the pointwise convolution
kernel. If None, then the default initializer (`"glorot_uniform"`)
will be used.
bias_initializer: An initializer for the bias vector. If None, the
default initializer ('"zeros"') will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used
for norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape).
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
"""
def __init__(
self,
rank,
depth_multiplier,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
depthwise_initializer="glorot_uniform",
pointwise_initializer="glorot_uniform",
bias_initializer="zeros",
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs,
):
super().__init__(
trainable=trainable,
name=name,
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs,
)
self.rank = rank
self.depth_multiplier = depth_multiplier
self.filters = filters
self.kernel_size = standardize_tuple(kernel_size, rank, "kernel_size")
self.strides = standardize_tuple(strides, rank, "strides")
self.dilation_rate = standardize_tuple(
dilation_rate, rank, "dilation_rate"
)
self.padding = standardize_padding(padding)
self.data_format = standardize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.pointwise_initializer = initializers.get(pointwise_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.pointwise_regularizer = regularizers.get(pointwise_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.pointwise_constraint = constraints.get(pointwise_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.data_format = self.data_format
self.input_spec = InputSpec(min_ndim=self.rank + 2)
if self.depth_multiplier is not None and self.depth_multiplier <= 0:
raise ValueError(
"Invalid value for argument `depth_multiplier`. Expected a "
"strictly positive value. Received "
f"depth_multiplier={self.depth_multiplier}."
)
if self.filters is not None and self.filters <= 0:
raise ValueError(
"Invalid value for argument `filters`. Expected a strictly "
f"positive value. Received filters={self.filters}."
)
if not all(self.kernel_size):
raise ValueError(
"The argument `kernel_size` cannot contain 0. Received: "
f"kernel_size={self.kernel_size}."
)
if not all(self.strides):
raise ValueError(
"The argument `strides` cannot contains 0(s). Received: "
f"strides={self.strides}"
)
if max(self.strides) > 1 and max(self.dilation_rate) > 1:
raise ValueError(
"`strides > 1` not supported in conjunction with "
f"`dilation_rate > 1`. Received: strides={self.strides} and "
f"dilation_rate={self.dilation_rate}"
)
def build(self, input_shape):
if self.data_format == "channels_last":
channel_axis = -1
input_channel = input_shape[-1]
else:
channel_axis = 1
input_channel = input_shape[1]
self.input_spec = InputSpec(
min_ndim=self.rank + 2, axes={channel_axis: input_channel}
)
depthwise_kernel_shape = self.kernel_size + (
input_channel,
self.depth_multiplier,
)
pointwise_kernel_shape = (1,) * self.rank + (
self.depth_multiplier * input_channel,
self.filters,
)
self.depthwise_kernel = self.add_weight(
name="depthwise_kernel",
shape=depthwise_kernel_shape,
initializer=self.depthwise_initializer,
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint,
trainable=True,
dtype=self.dtype,
)
self.pointwise_kernel = self.add_weight(
name="pointwise_kernel",
shape=pointwise_kernel_shape,
initializer=self.pointwise_initializer,
regularizer=self.pointwise_regularizer,
constraint=self.pointwise_constraint,
trainable=True,
dtype=self.dtype,
)
if self.use_bias:
self.bias = self.add_weight(
name="bias",
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype,
)
else:
self.bias = None
def call(self, inputs):
outputs = ops.separable_conv(
inputs,
self.depthwise_kernel,
self.pointwise_kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format,
)
if self.use_bias:
if self.data_format == "channels_last":
bias_shape = (1,) * (self.rank + 1) + (self.filters,)
else:
bias_shape = (1, self.filters) + (1,) * self.rank
bias = ops.reshape(self.bias, bias_shape)
outputs = ops.add(outputs, bias)
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
return compute_conv_output_shape(
input_shape,
self.filters,
self.kernel_size,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate,
)
def get_config(self):
config = super().get_config()
config.update(
{
"depth_multiplier": self.depth_multiplier,
"filters": self.filters,
"kernel_size": self.kernel_size,
"strides": self.strides,
"padding": self.padding,
"data_format": self.data_format,
"dilation_rate": self.dilation_rate,
"activation": activations.serialize(self.activation),
"use_bias": self.use_bias,
"depthwise_initializer": initializers.serialize(
self.depthwise_initializer
),
"pointwise_initializer": initializers.serialize(
self.pointwise_initializer
),
"bias_initializer": initializers.serialize(
self.bias_initializer
),
"depthwise_regularizer": regularizers.serialize(
self.depthwise_regularizer
),
"pointwise_regularizer": regularizers.serialize(
self.pointwise_regularizer
),
"bias_regularizer": regularizers.serialize(
self.bias_regularizer
),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"depthwise_constraint": constraints.serialize(
self.depthwise_constraint
),
"pointwise_constraint": constraints.serialize(
self.pointwise_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
}
)
return config
| BaseSeparableConv |
python | pallets__flask | src/flask/debughelpers.py | {
"start": 330,
"end": 509
} | class ____(AssertionError, UnicodeError):
"""Raised in places where we want some better error reporting for
unexpected unicode or binary data.
"""
| UnexpectedUnicodeError |
python | sympy__sympy | sympy/stats/crv_types.py | {
"start": 96337,
"end": 98567
} | class ____(SingleContinuousDistribution):
_argnames = ('mu', 's')
@property
def set(self):
return Interval(self.mu - self.s, self.mu + self.s)
@staticmethod
def check(mu, s):
_value_check(s > 0, "s must be positive")
def pdf(self, x):
mu, s = self.mu, self.s
return Piecewise(
((1+cos(pi*(x-mu)/s)) / (2*s), And(mu-s<=x, x<=mu+s)),
(S.Zero, True))
def _characteristic_function(self, t):
mu, s = self.mu, self.s
return Piecewise((exp(-I*pi*mu/s)/2, Eq(t, -pi/s)),
(exp(I*pi*mu/s)/2, Eq(t, pi/s)),
(pi**2*sin(s*t)*exp(I*mu*t) / (s*t*(pi**2 - s**2*t**2)), True))
def _moment_generating_function(self, t):
mu, s = self.mu, self.s
return pi**2 * sinh(s*t) * exp(mu*t) / (s*t*(pi**2 + s**2*t**2))
def RaisedCosine(name, mu, s):
r"""
Create a Continuous Random Variable with a raised cosine distribution.
Explanation
===========
The density of the raised cosine distribution is given by
.. math::
f(x) := \frac{1}{2s}\left(1+\cos\left(\frac{x-\mu}{s}\pi\right)\right)
with :math:`x \in [\mu-s,\mu+s]`.
Parameters
==========
mu : Real number
s : Real number, `s > 0`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import RaisedCosine, density
>>> from sympy import Symbol, pprint
>>> mu = Symbol("mu", real=True)
>>> s = Symbol("s", positive=True)
>>> z = Symbol("z")
>>> X = RaisedCosine("x", mu, s)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
/ /pi*(-mu + z)\
|cos|------------| + 1
| \ s /
<--------------------- for And(z >= mu - s, z <= mu + s)
| 2*s
|
\ 0 otherwise
References
==========
.. [1] https://en.wikipedia.org/wiki/Raised_cosine_distribution
"""
return rv(name, RaisedCosineDistribution, (mu, s))
#-------------------------------------------------------------------------------
# Rayleigh distribution --------------------------------------------------------
| RaisedCosineDistribution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_base_streams.py | {
"start": 1008,
"end": 1140
} | class ____(FBMarketingStream):
def list_objects(self, params: Mapping[str, Any]) -> Iterable:
yield from []
| SomeTestStream |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_metaclass.py | {
"start": 890,
"end": 1099
} | class ____(metaclass=InvalidAsMetaclass()): # [invalid-metaclass]
pass
def invalid_metaclass_1(name, bases, attrs):
return int
def invalid_metaclass_2(name, bases, attrs):
return 1
| FourthInvalid |
python | google__jax | tests/custom_partitioning_sharding_rule_test.py | {
"start": 3794,
"end": 10603
} | class ____(jtu.JaxTestCase):
def test_rule_is_not_a_str(self):
with self.assertRaisesRegex(TypeError, "rule must be a str"):
str_to_sdy_sharding_rule(1)
def test_factor_sizes_is_not_a_proper_dict(self):
with self.assertRaisesRegex(
TypeError, "factor_sizes must be a dict of str to int"):
str_to_sdy_sharding_rule("i->j", i="j")
def test_sharding_rule_ellipsis_not_complete(self):
with self.assertRaisesRegex(
ValueError, "Character '.' must be used inside ellipsis '...'"):
str_to_sdy_sharding_rule(".i -> j")
def test_sharding_rule_invalid_factor_name(self):
with self.assertRaisesRegex(ValueError, "Factor names have to start with a letter"):
str_to_sdy_sharding_rule("2i -> j")
def test_sharding_rule_missing_results(self):
with self.assertRaisesRegex(ValueError, "There is no -> in rule"):
str_to_sdy_sharding_rule("i")
def test_sharding_rule_inbalenced_brackets(self):
with self.assertRaisesRegex(ValueError, "Brackets are not balanced"):
str_to_sdy_sharding_rule("i j, k)->j")
def test_sharding_rule_inbalenced_brackets2(self):
with self.assertRaisesRegex(ValueError, "Brackets are not balanced"):
str_to_sdy_sharding_rule("i (j k->j")
def test_sharding_rule_empty_compound_dim(self):
with self.assertRaisesRegex(
ValueError, "Brackets should contain at least two factors"):
str_to_sdy_sharding_rule("i ( ) j k->j")
def test_sharding_rule_one_factorcompound_dim(self):
with self.assertRaisesRegex(
ValueError, "Brackets should contain at least two factors"):
str_to_sdy_sharding_rule("i (j ) k->j")
def test_sharding_rule_nested_brackets(self):
with self.assertRaisesRegex(
ValueError, "Compound factors should be one level"):
str_to_sdy_sharding_rule("i (j (k))->j")
def test_sharding_rule_unknown_char(self):
with self.assertRaisesRegex(ValueError, "Unknown character"):
str_to_sdy_sharding_rule("i; j->j")
def test_sharding_rule_unknown_single_char_ellipse(self):
with self.assertRaisesRegex(ValueError, "Unknown character"):
str_to_sdy_sharding_rule("…j->…j")
def test_sharding_rule_ellipsis_not_leading_dim(self):
with self.assertRaisesRegex(
ValueError, "Ellipsis can only be used at the beginning of a dimension"):
str_to_sdy_sharding_rule("i ... -> j")
def test_sharding_rule_ellipsis_inside_compound_dim(self):
with self.assertRaisesRegex(
ValueError, "Ellipsis can only be used at the beginning of a dimension"):
str_to_sdy_sharding_rule("i, (..., j) -> j")
def test_sharding_rule_redcution_factors_is_not_used(self):
with self.assertRaisesRegex(
ValueError, "Factor k in reduction_factors is not used"):
str_to_sdy_sharding_rule("i -> j", reduction_factors=("k",))
def test_sharding_rule_need_replication_factors_is_not_used(self):
with self.assertRaisesRegex(
ValueError, "Factor k in need_replication_factors is not used"):
str_to_sdy_sharding_rule("(i, j) -> (j, i)", need_replication_factors=("k",), i=10, j=20)
def test_sharding_rule_permutation_factors_must_be_a_tuple_of_factors(self):
with self.assertRaisesRegex(
ValueError, "permutation_factors must be a tuple of factors"):
str_to_sdy_sharding_rule("i j -> j", permutation_factors=3)
def test_sharding_rule_factor_used_in_multiple_special_factors(self):
with self.assertRaisesRegex(
ValueError, "Factor i can only be in one of the reduction, need "
"replication, or permutation factor sets"):
str_to_sdy_sharding_rule("i -> j", reduction_factors=("i",), need_replication_factors=("i",))
def test_sharding_rule_duplicated_factors_in_special_factors(self):
with self.assertRaisesRegex(
ValueError, "reduction_factors contains duplicated factors"):
str_to_sdy_sharding_rule("i -> j", reduction_factors=("i", "j", "i"))
def test_sharding_rule_scalar_operand_scalar_result(self):
rule = str_to_sdy_sharding_rule("->")
self.assertEqual(str(rule), "SdyShardingRule(((),), ((),), {})")
def test_sharding_rule_one_scalar_operand(self):
rule = str_to_sdy_sharding_rule("i j, , k->j")
self.assertEqual(
str(rule), "SdyShardingRule((('i', 'j'), (), ('k',)), (('j',),), {})")
def test_sharding_rule_factor_elementwise_add(self):
# An ellipsis without a number ... is treated as the same as ...0.
rule = str_to_sdy_sharding_rule("...0 i j, ...1 i j -> ...i j")
self.assertEqual(
str(rule),
"SdyShardingRule((('…0', 'i', 'j'), ('…1', 'i', 'j')), (('…0', 'i',"
" 'j'),), {})")
def test_sharding_rule_factor_vector_scalar_add(self):
rule = str_to_sdy_sharding_rule("...87 i, -> ...87 i")
self.assertEqual(
str(rule),
"SdyShardingRule((('…87', 'i'), ()), (('…87', 'i'),), {})")
def test_sharding_rule_factor_reshape_combining(self):
rule = str_to_sdy_sharding_rule("i j -> (i j)")
self.assertEqual(
str(rule), "SdyShardingRule((('i', 'j'),), ((('i', 'j'),),), {})")
def test_sharding_rule_factor_reshape_reordering(self):
rule = str_to_sdy_sharding_rule("(j i) -> (i j)", i=10, j=20)
self.assertEqual(
str(rule),
"SdyShardingRule(((('j', 'i'),),), ((('i', 'j'),),), {'i': 10, 'j':"
" 20})")
def test_sharding_rule_factor_compound_then_individual(self):
rule = str_to_sdy_sharding_rule("(i j) (j k) i -> j k")
self.assertEqual(
str(rule),
"SdyShardingRule(((('i', 'j'), ('j', 'k'), 'i'),), (('j', 'k'),), {})")
def test_sharding_rule_factor_individual_then_compound(self):
rule = str_to_sdy_sharding_rule("i j k -> (i j) (j k)")
self.assertEqual(
str(rule),
"SdyShardingRule((('i', 'j', 'k'),), ((('i', 'j'), ('j', 'k')),), {})")
def test_sharding_rule_factor_infer_k(self):
rule = str_to_sdy_sharding_rule("i_ (j k)-> j foo (m bar_24)", k=10, m=10, bar_24=20)
self.assertEqual(
str(rule),
"SdyShardingRule((('i_', ('j', 'k')),), (('j', 'foo', ('m', 'bar_24'))"
",), {'k': 10, 'm': 10, 'bar_24': 20})")
def test_sharding_rule_with_special_factors(self):
rule = str_to_sdy_sharding_rule("i_ (j k)-> j foo (m bar_24)", k=10, m=10, bar_24=20,
need_replication_factors=("m",),
permutation_factors=("j",),
reduction_factors=("k", "bar_24"))
self.assertEqual(
str(rule),
"SdyShardingRule((('i_', ('j', 'k')),), (('j', 'foo', ('m', 'bar_24'))"
",), {'k': 10, 'm': 10, 'bar_24': 20} "
"reduction_factors=('k', 'bar_24') "
"need_replication_factors=('m',) permutation_factors=('j',))")
| StrToSdyShardingRuleTest |
python | getsentry__sentry | tests/sentry/core/endpoints/test_project_team_details.py | {
"start": 440,
"end": 2769
} | class ____(ProjectTeamDetailsTest):
method = "post"
def test_add_team(self) -> None:
project = self.create_project()
team = self.create_team()
self.get_success_response(
project.organization.slug,
project.slug,
team.slug,
status_code=status.HTTP_201_CREATED,
)
team = self.create_team()
self.get_success_response(
project.organization.slug,
project.slug,
team.id,
status_code=status.HTTP_201_CREATED,
)
assert ProjectTeam.objects.filter(project=project, team=team).exists()
def test_add_team_not_found(self) -> None:
project = self.create_project()
response = self.get_error_response(
project.organization.slug,
project.slug,
"not-a-team",
status_code=status.HTTP_404_NOT_FOUND,
)
assert response.data["detail"] == "Team does not exist."
@with_feature("organizations:team-roles")
def test_add_team_with_team_role(self) -> None:
user = self.create_user(username="foo")
team_to_add = self.create_team(organization=self.organization)
team_1 = self.create_team(organization=self.organization, slug="admin-team")
team_2 = self.create_team(organization=self.organization, slug="contri-team")
project_1 = self.create_project(organization=self.organization, teams=[team_1])
project_2 = self.create_project(organization=self.organization, teams=[team_2])
self.create_member(user=user, organization=self.organization, role="member")
self.create_team_membership(user=user, team=team_1, role="admin")
self.create_team_membership(user=user, team=team_2)
self.login_as(user=user)
# Team Admin grant access to other teams
self.get_success_response(
self.organization.slug,
project_1.slug,
team_to_add.slug,
status_code=status.HTTP_201_CREATED,
)
# Team Contributor cannot grant access to other teams
self.get_error_response(
self.organization.slug,
project_2.slug,
team_to_add.slug,
status_code=status.HTTP_403_FORBIDDEN,
)
| ProjectTeamDetailsPostTest |
python | PrefectHQ__prefect | scripts/generate_cli_docs.py | {
"start": 496,
"end": 614
} | class ____(TypedDict):
"""A dictionary representing a command argument."""
name: str
help: str
| ArgumentDict |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 690878,
"end": 691608
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for Mannequin."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("MannequinEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("Mannequin"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| MannequinConnection |
python | wandb__wandb | wandb/sdk/internal/sender.py | {
"start": 4697,
"end": 5586
} | class ____:
_stopped: threading.Event
_queue: queue.Queue
_emulator: redirect.TerminalEmulator
_writer_thr: threading.Thread
_reader_thr: threading.Thread
def __init__(self, stream: str, sm: "SendManager"):
self._stopped = threading.Event()
self._queue = queue.Queue()
self._emulator = redirect.TerminalEmulator()
self._writer_thr = threading.Thread(
target=sm._output_raw_writer_thread,
kwargs=dict(stream=stream),
daemon=True,
name=f"OutRawWr-{stream}",
)
self._reader_thr = threading.Thread(
target=sm._output_raw_reader_thread,
kwargs=dict(stream=stream),
daemon=True,
name=f"OutRawRd-{stream}",
)
def start(self) -> None:
self._writer_thr.start()
self._reader_thr.start()
| _OutputRawStream |
python | chroma-core__chroma | chromadb/types.py | {
"start": 6520,
"end": 6592
} | class ____(TypedDict):
id: UUID
name: str
tenant: str
| Database |
python | django-haystack__django-haystack | haystack/fields.py | {
"start": 8995,
"end": 9415
} | class ____(SearchField):
field_type = "integer"
def __init__(self, **kwargs):
if kwargs.get("facet_class") is None:
kwargs["facet_class"] = FacetIntegerField
super().__init__(**kwargs)
def prepare(self, obj):
return self.convert(super().prepare(obj))
def convert(self, value):
if value is None:
return None
return int(value)
| IntegerField |
python | keras-team__keras | keras/src/metrics/hinge_metrics_test.py | {
"start": 2760,
"end": 4801
} | class ____(testing.TestCase):
def test_config(self):
cat_hinge_obj = hinge_metrics.CategoricalHinge(
name="cat_hinge", dtype="int32"
)
self.assertEqual(cat_hinge_obj.name, "cat_hinge")
self.assertEqual(cat_hinge_obj._dtype, "int32")
# Check save and restore config
cat_hinge_obj2 = hinge_metrics.CategoricalHinge.from_config(
cat_hinge_obj.get_config()
)
self.assertEqual(cat_hinge_obj2.name, "cat_hinge")
self.assertEqual(len(cat_hinge_obj2.variables), 2)
self.assertEqual(cat_hinge_obj2._dtype, "int32")
def test_unweighted(self):
cat_hinge_obj = hinge_metrics.CategoricalHinge()
y_true = np.array(
(
(0, 1, 0, 1, 0),
(0, 0, 1, 1, 1),
(1, 1, 1, 1, 0),
(0, 0, 0, 0, 1),
),
dtype="float32",
)
y_pred = np.array(
(
(0, 0, 1, 1, 0),
(1, 1, 1, 1, 1),
(0, 1, 0, 1, 0),
(1, 1, 1, 1, 1),
),
dtype="float32",
)
cat_hinge_obj.update_state(y_true, y_pred)
result = cat_hinge_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
cat_hinge_obj = hinge_metrics.CategoricalHinge()
y_true = np.array(
(
(0, 1, 0, 1, 0),
(0, 0, 1, 1, 1),
(1, 1, 1, 1, 0),
(0, 0, 0, 0, 1),
),
dtype="float32",
)
y_pred = np.array(
(
(0, 0, 1, 1, 0),
(1, 1, 1, 1, 1),
(0, 1, 0, 1, 0),
(1, 1, 1, 1, 1),
),
dtype="float32",
)
sample_weight = np.array((1.0, 1.5, 2.0, 2.5))
result = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.5, result, atol=1e-5)
| CategoricalHingeTest |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 14699,
"end": 14841
} | class ____(str, Enum):
NAV = "nav"
DAG = "dag"
DAG_RUN = "dag_run"
TASK = "task"
TASK_INSTANCE = "task_instance"
| Destination |
python | huggingface__transformers | benchmark_v2/framework/hardware_metrics.py | {
"start": 4550,
"end": 6806
} | class ____:
"""Monitor GPU utilization during benchmark execution."""
def __init__(self, sample_interval_sec: float = 0.1, logger: Logger | None = None):
self.sample_interval_sec = sample_interval_sec
self.logger = logger if logger is not None else logging.getLogger(__name__)
self.num_available_gpus = torch.cuda.device_count()
if self.num_available_gpus == 0:
raise RuntimeError("No GPUs detected by torch.cuda.device_count().")
self.gpu_stats_getter = GPUStatsCollector()
def start(self):
"""Start monitoring GPU metrics."""
# Clear the stop event to enable monitoring
self.stop_event = threading.Event()
self.gpu_utilization = []
self.gpu_memory_used = []
self.timestamps = []
self.thread = threading.Thread(target=self._monitor_loop)
self.thread.start()
self.logger.debug("GPU monitoring started")
def stop_and_collect(self) -> GPURawMetrics:
"""Stop monitoring and return collected metrics."""
self.stop_event.set()
self.thread.join()
if self.gpu_utilization:
timestamp_0 = self.timestamps[0]
metrics = GPURawMetrics(
utilization=self.gpu_utilization,
memory_used=self.gpu_memory_used,
timestamps=[t - timestamp_0 for t in self.timestamps],
timestamp_0=timestamp_0,
monitoring_status=GPUMonitoringStatus.SUCCESS,
)
self.logger.debug(f"GPU monitoring completed: {len(self.gpu_utilization)} samples collected")
else:
metrics = GPURawMetrics(monitoring_status=GPUMonitoringStatus.NO_SAMPLES_COLLECTED)
return metrics
def _monitor_loop(self):
"""Background monitoring loop using threading.Event for communication."""
while not self.stop_event.is_set():
utilization, memory_used = self.gpu_stats_getter.get_utilization_and_memory_used()
self.gpu_utilization.append(utilization)
self.gpu_memory_used.append(memory_used)
self.timestamps.append(time.time())
if self.stop_event.wait(timeout=self.sample_interval_sec):
break
| GPUMonitor |
python | docker__docker-py | docker/errors.py | {
"start": 2956,
"end": 3334
} | class ____(DockerException):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg + (". TLS configurations should map the Docker CLI "
"client configurations. See "
"https://docs.docker.com/engine/articles/https/ "
"for API details.")
| TLSParameterError |
python | tensorflow__tensorflow | tensorflow/python/framework/device_spec.py | {
"start": 1723,
"end": 13870
} | class ____(object):
"""Represents a (possibly partial) specification for a TensorFlow device.
`DeviceSpec`s are used throughout TensorFlow to describe where state is stored
and computations occur. Using `DeviceSpec` allows you to parse device spec
strings to verify their validity, merge them or compose them programmatically.
Example:
```python
# Place the operations on device "GPU:0" in the "ps" job.
device_spec = DeviceSpec(job="ps", device_type="GPU", device_index=0)
with tf.device(device_spec.to_string()):
# Both my_var and squared_var will be placed on /job:ps/device:GPU:0.
my_var = tf.Variable(..., name="my_variable")
squared_var = tf.square(my_var)
```
With eager execution disabled (by default in TensorFlow 1.x and by calling
disable_eager_execution() in TensorFlow 2.x), the following syntax
can be used:
```python
tf.compat.v1.disable_eager_execution()
# Same as previous
device_spec = DeviceSpec(job="ps", device_type="GPU", device_index=0)
# No need of .to_string() method.
with tf.device(device_spec):
my_var = tf.Variable(..., name="my_variable")
squared_var = tf.square(my_var)
```
If a `DeviceSpec` is partially specified, it will be merged with other
`DeviceSpec`s according to the scope in which it is defined. `DeviceSpec`
components defined in inner scopes take precedence over those defined in
outer scopes.
```python
gpu0_spec = DeviceSpec(job="ps", device_type="GPU", device_index=0)
with tf.device(DeviceSpec(job="train").to_string()):
with tf.device(gpu0_spec.to_string()):
# Nodes created here will be assigned to /job:ps/device:GPU:0.
with tf.device(DeviceSpec(device_type="GPU", device_index=1).to_string()):
# Nodes created here will be assigned to /job:train/device:GPU:1.
```
A `DeviceSpec` consists of 5 components -- each of
which is optionally specified:
* Job: The job name.
* Replica: The replica index.
* Task: The task index.
* Device type: The device type string (e.g. "CPU" or "GPU").
* Device index: The device index.
"""
__slots__ = ("_job", "_replica", "_task", "_device_type", "_device_index",
"_as_string", "_hash")
def __init__(self,
job=None,
replica=None,
task=None,
device_type=None,
device_index=None):
"""Create a new `DeviceSpec` object.
Args:
job: string. Optional job name.
replica: int. Optional replica index.
task: int. Optional task index.
device_type: Optional device type string (e.g. "CPU" or "GPU")
device_index: int. Optional device index. If left unspecified, device
represents 'any' device_index.
"""
self._job = _as_str_or_none(job)
self._replica = _as_int_or_none(replica)
self._task = _as_int_or_none(task)
self._device_type = _as_device_str_or_none(device_type)
self._device_index = _as_int_or_none(device_index)
self._as_string = self._components_to_string(
job=self._job,
replica=self._replica,
task=self._task,
device_type=self._device_type,
device_index=self._device_index)
self._hash = hash(self.to_string())
def to_string(self):
"""Return a string representation of this `DeviceSpec`.
Returns:
a string of the form
/job:<name>/replica:<id>/task:<id>/device:<device_type>:<id>.
"""
return self._as_string
@classmethod
def from_string(cls, spec):
"""Construct a `DeviceSpec` from a string.
Args:
spec: a string of the form
/job:<name>/replica:<id>/task:<id>/device:CPU:<id> or
/job:<name>/replica:<id>/task:<id>/device:GPU:<id> as cpu and gpu are
mutually exclusive. All entries are optional.
Returns:
A DeviceSpec.
"""
return cls(*cls._string_to_components(spec))
def parse_from_string(self, spec):
"""Parse a `DeviceSpec` name into its components.
**2.x behavior change**:
In TensorFlow 1.x, this function mutates its own state and returns itself.
In 2.x, DeviceSpecs are immutable, and this function will return a
DeviceSpec which contains the spec.
* Recommended:
```
# my_spec and my_updated_spec are unrelated.
my_spec = tf.DeviceSpec.from_string("/CPU:0")
my_updated_spec = tf.DeviceSpec.from_string("/GPU:0")
with tf.device(my_updated_spec):
...
```
* Will work in 1.x and 2.x (though deprecated in 2.x):
```
my_spec = tf.DeviceSpec.from_string("/CPU:0")
my_updated_spec = my_spec.parse_from_string("/GPU:0")
with tf.device(my_updated_spec):
...
```
* Will NOT work in 2.x:
```
my_spec = tf.DeviceSpec.from_string("/CPU:0")
my_spec.parse_from_string("/GPU:0") # <== Will not update my_spec
with tf.device(my_spec):
...
```
In general, `DeviceSpec.from_string` should completely replace
`DeviceSpec.parse_from_string`, and `DeviceSpec.replace` should
completely replace setting attributes directly.
Args:
spec: an optional string of the form
/job:<name>/replica:<id>/task:<id>/device:CPU:<id> or
/job:<name>/replica:<id>/task:<id>/device:GPU:<id> as cpu and gpu are
mutually exclusive. All entries are optional.
Returns:
The `DeviceSpec`.
Raises:
ValueError: if the spec was not valid.
"""
return self.from_string(spec)
def make_merged_spec(self, dev):
"""Returns a new DeviceSpec which incorporates `dev`.
When combining specs, `dev` will take precedence over the current spec.
So for instance:
```
first_spec = tf.DeviceSpec(job=0, device_type="CPU")
second_spec = tf.DeviceSpec(device_type="GPU")
combined_spec = first_spec.make_merged_spec(second_spec)
```
is equivalent to:
```
combined_spec = tf.DeviceSpec(job=0, device_type="GPU")
```
Args:
dev: a `DeviceSpec`
Returns:
A new `DeviceSpec` which combines `self` and `dev`
"""
return self.__class__(*self._get_combined_properties(dev))
def replace(self, **kwargs):
"""Convenience method for making a new DeviceSpec by overriding fields.
For instance:
```
my_spec = DeviceSpec=(job="my_job", device="CPU")
my_updated_spec = my_spec.replace(device="GPU")
my_other_spec = my_spec.replace(device=None)
```
Args:
**kwargs: This method takes the same args as the DeviceSpec constructor
Returns:
A DeviceSpec with the fields specified in kwargs overridden.
"""
init_kwargs = dict(
job=self.job,
replica=self.replica,
task=self.task,
device_type=self.device_type,
device_index=self.device_index)
# Explicitly provided kwargs take precedence.
init_kwargs.update(kwargs)
return self.__class__(**init_kwargs)
@property
def job(self):
return self._job
@property
def replica(self):
return self._replica
@property
def task(self):
return self._task
@property
def device_type(self):
return self._device_type
@property
def device_index(self):
return self._device_index
def _get_combined_properties(self, dev):
"""Combine the current DeviceSpec with another DeviceSpec.
The combination of DeviceSpecs is will give priority to dev.
Args:
dev: a `DeviceSpec`
Returns:
A tuple of (job, replica, task, device_type, device_index) which
represents the combination of self and dev.
"""
return (
dev.job if dev.job is not None else self.job,
dev.replica if dev.replica is not None else self.replica,
dev.task if dev.task is not None else self.task,
dev.device_type if dev.device_type is not None else self.device_type,
dev.device_index if dev.device_index is not None else self.device_index,
)
@staticmethod
def _get_valid_device_types():
valid_device_types = set({})
physical_devices = pywrap_tfe.TF_ListPluggablePhysicalDevices()
for device in physical_devices:
valid_device_types.add(device.decode().split(":")[1])
valid_device_types = valid_device_types | _VALID_DEVICE_TYPES
return valid_device_types
@staticmethod
def _string_to_components(spec=None):
"""Stateless portion of device spec string parsing.
Args:
spec: An optional string specifying a device specification.
Returns:
The parsed components of `spec`. Note that the result of this function
must go through attribute setters of DeviceSpec, and should therefore NOT
be used directly.
"""
cached_result = _STRING_TO_COMPONENTS_CACHE.get(spec)
if cached_result is not None:
return cached_result
raw_spec = spec # keep a copy of the original to update the cache
job, replica, task, device_type, device_index = None, None, None, None, None
spec = spec or ""
splits = [x.split(":") for x in spec.split("/")]
valid_device_types = DeviceSpecV2._get_valid_device_types()
for y in splits:
ly = len(y)
if y:
# NOTE(taylorrobie): these will go through setters later.
if ly == 2 and y[0] == "job":
job = y[1]
elif ly == 2 and y[0] == "replica":
replica = y[1]
elif ly == 2 and y[0] == "task":
task = y[1]
elif ((ly == 1 or ly == 2) and (y[0].upper() in valid_device_types)):
if device_type is not None:
raise ValueError(f"Multiple device types are not allowed "
f"while parsing the device spec: {spec}.")
device_type = y[0].upper()
if ly == 2 and y[1] != "*":
device_index = int(y[1])
elif ly == 3 and y[0] == "device":
if device_type is not None:
raise ValueError(f"Multiple device types are not allowed "
f"while parsing the device spec: {spec}.")
device_type = y[1]
if y[2] != "*":
device_index = int(y[2])
elif ly and y[0] != "": # pylint: disable=g-explicit-bool-comparison
raise ValueError(f"Unknown attribute '{y[0]}' is encountered "
f"while parsing the device spec: '{spec}'.")
output = (job, replica, task, device_type, device_index)
_STRING_TO_COMPONENTS_CACHE[raw_spec] = output
return output
@staticmethod
def _components_to_string(job, replica, task, device_type, device_index):
"""Stateless portion of `to_string` (separated to allow caching)."""
key = (job, replica, task, device_type, device_index)
cached_result = _COMPONENTS_TO_STRING_CACHE.get(key)
if cached_result is not None:
return cached_result
output = []
if job is not None:
output.append("/job:" + job)
if replica is not None:
output.append("/replica:" + str(replica))
if task is not None:
output.append("/task:" + str(task))
if device_type is not None:
device_index_string = "*"
if device_index is not None:
# Unlike the others, device_index is stored as an int.
device_index_string = str(device_index)
output.append("/device:%s:%s" % (device_type, device_index_string))
output = "".join(output)
_COMPONENTS_TO_STRING_CACHE[key] = output
return output
def __eq__(self, other):
"""Checks if the `other` DeviceSpec is same as the current instance, eg have
same value for all the internal fields.
Args:
other: Another DeviceSpec
Returns:
Return `True` if `other` is also a DeviceSpec instance and has same value
as the current instance.
Return `False` otherwise.
"""
return (isinstance(other, self.__class__) and
self.to_string() == other.to_string())
def __hash__(self):
return self._hash
def __repr__(self):
return (
f"<DeviceSpec(job={self.job}, replica={self.replica}, task={self.task}, "
f"device_type={self.device_type}, device_index={self.device_index})>")
@tf_export(v1=["DeviceSpec"]) # pylint: disable=missing-docstring
| DeviceSpecV2 |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/networks.py | {
"start": 27653,
"end": 28725
} | class ____(SimpleActor, Critic):
def __init__(
self,
observation_specs: List[ObservationSpec],
network_settings: NetworkSettings,
action_spec: ActionSpec,
stream_names: List[str],
conditional_sigma: bool = False,
tanh_squash: bool = False,
):
self.use_lstm = network_settings.memory is not None
super().__init__(
observation_specs,
network_settings,
action_spec,
conditional_sigma,
tanh_squash,
)
self.stream_names = stream_names
self.value_heads = ValueHeads(stream_names, self.encoding_size)
def critic_pass(
self,
inputs: List[torch.Tensor],
memories: Optional[torch.Tensor] = None,
sequence_length: int = 1,
) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
encoding, memories_out = self.network_body(
inputs, memories=memories, sequence_length=sequence_length
)
return self.value_heads(encoding), memories_out
| SharedActorCritic |
python | pytorch__pytorch | torch/_dynamo/types.py | {
"start": 3663,
"end": 3782
} | class ____(Protocol):
def __call__(
self,
cache_hit: bool,
) -> bool: ...
| DynamoGuardCompleteHook |
python | pytorch__pytorch | scripts/release_notes/commitlist.py | {
"start": 1269,
"end": 22125
} | class ____:
# NB: Private ctor. Use `from_existing` or `create_new`.
def __init__(self, path: str, commits: List[Commit]):
self.path = path
self.commits = commits
@staticmethod
def from_existing(path):
commits = CommitList.read_from_disk(path)
return CommitList(path, commits)
@staticmethod
def create_new(path, base_version, new_version):
if os.path.exists(path):
raise ValueError(
"Attempted to create a new commitlist but one exists already!"
)
commits = CommitList.get_commits_between(base_version, new_version)
return CommitList(path, commits)
@staticmethod
def read_from_disk(path) -> List[Commit]:
with open(path) as csvfile:
reader = csv.DictReader(csvfile)
rows = []
for row in reader:
if row.get("new_title", "") != "":
row["title"] = row["new_title"]
filtered_rows = {k: row.get(k, "") for k in commit_fields}
rows.append(Commit(**filtered_rows))
return rows
def write_result(self):
self.write_to_disk_static(self.path, self.commits)
@staticmethod
def write_to_disk_static(path, commit_list):
os.makedirs(Path(path).parent, exist_ok=True)
with open(path, "w") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(commit_fields)
for commit in commit_list:
writer.writerow(dataclasses.astuple(commit))
@staticmethod
def keywordInFile(file, keywords):
for key in keywords:
if key in file:
return True
return False
@staticmethod
def gen_commit(commit_hash):
feature_item = get_commit_data_cache().get(commit_hash)
features = features_to_dict(feature_item)
category, topic = CommitList.categorize(features)
a1, a2, a3 = (features["accepters"] + ("", "", ""))[:3]
if features["pr_number"] is not None:
pr_link = f"https://github.com/pytorch/pytorch/pull/{features['pr_number']}"
else:
pr_link = None
files_changed_str = " ".join(features["files_changed"])
return Commit(
commit_hash,
category,
topic,
features["title"],
files_changed_str,
pr_link,
features["author"],
a1,
a2,
a3,
)
@staticmethod
def category_remapper(category: str) -> str:
if category in frontend_categories:
category = category + "_frontend"
return category
if category == "Meta API":
category = "composability"
return category
if category in common.quantization.categories:
category = common.quantization.name
return category
if category in common.distributed.categories:
category = common.distributed.name
return category
return category
@staticmethod
def bracket_category_matcher(title: str):
"""Categorize a commit based on the presence of a bracketed category in the title.
Args:
title (str): title to seaarch
Returns:
optional[str]
"""
pairs = [
("[dynamo]", "dynamo"),
("[torchdynamo]", "dynamo"),
("[torchinductor]", "inductor"),
("[inductor]", "inductor"),
("[codemod", "skip"),
("[profiler]", "profiler"),
("[functorch]", "functorch"),
("[autograd]", "autograd_frontend"),
("[quantization]", "quantization"),
("[nn]", "nn_frontend"),
("[complex]", "complex_frontend"),
("[mps]", "mps"),
("[optimizer]", "optimizer_frontend"),
("[xla]", "xla"),
]
title_lower = title.lower()
for bracket, category in pairs:
if bracket in title_lower:
return category
return None
@staticmethod
def categorize(features):
title = features["title"]
labels = features["labels"]
category = "Uncategorized"
topic = "Untopiced"
# Revert commits are merged directly to master with no associated PR number
if features["pr_number"] is None:
if title.startswith("Revert"):
return "skip", topic
# We ask contributors to label their PR's appropriately
# when they're first landed.
# Check if the labels are there first.
already_categorized = already_topiced = False
for label in labels:
if label.startswith("release notes: "):
category = label.split("release notes: ", 1)[1]
category = CommitList.category_remapper(category)
already_categorized = True
if label.startswith("topic: "):
topic = label.split("topic: ", 1)[1]
already_topiced = True
if already_categorized and already_topiced:
return category, topic
# update this to check if each file starts with caffe2
if "caffe2" in title:
return "caffe2", topic
if "Reverted" in labels:
return "skip", topic
if "module: deprecation" in labels:
topic = "deprecation"
found_bracket_category = CommitList.bracket_category_matcher(title)
if found_bracket_category:
return found_bracket_category, topic
files_changed = features["files_changed"]
for file in files_changed:
file_lowercase = file.lower()
if CommitList.keywordInFile(
file,
[
"docker/",
".circleci",
".github",
".jenkins",
".ci",
".azure_pipelines",
],
):
category = "releng"
break
# datapipe(s), torch/utils/data, test_{dataloader, datapipe}
if CommitList.keywordInFile(
file, ["torch/utils/data", "test_dataloader", "test_datapipe"]
):
category = "dataloader_frontend"
break
if CommitList.keywordInFile(file, ["torch/csrc/api", "test/cpp/api"]):
category = "cpp_frontend"
break
if CommitList.keywordInFile(file, ["distributed", "c10d"]):
category = "distributed"
break
if "vulkan" in file_lowercase:
category = "vulkan"
break
if "Foreach" in file_lowercase:
category = "foreach_frontend"
break
if "onnx" in file_lowercase:
category = "onnx"
break
if CommitList.keywordInFile(file, ["torch/fx", "test_fx"]):
category = "fx"
break
if CommitList.keywordInFile(file, ["torch/ao", "test/ao"]):
category = common.quantization.name
break
# torch/quantization, test/quantization, aten/src/ATen/native/quantized, torch/nn/{quantized, quantizable}
if CommitList.keywordInFile(
file,
[
"torch/quantization",
"test/quantization",
"aten/src/ATen/native/quantized",
"torch/nn/quantiz",
],
):
category = common.quantization.name
break
if CommitList.keywordInFile(file, ["torch/package", "test/package"]):
category = "package"
break
if CommitList.keywordInFile(
file,
[
"torch/csrc/jit/mobile",
"aten/src/ATen/native/metal",
"test/mobile",
"torch/backends/_nnapi/",
"test/test_nnapi.py",
],
):
category = "mobile"
break
if CommitList.keywordInFile(
file,
[
"aten/src/ATen/native/LinearAlgebra.cpp",
"test/test_linalg.py",
"torch/linalg",
],
):
category = "linalg_frontend"
break
if CommitList.keywordInFile(
file,
[
"torch/sparse",
"aten/src/ATen/native/sparse",
"torch/_masked/__init__.py",
],
):
category = "sparse_frontend"
break
if CommitList.keywordInFile(file, ["tools/autograd"]):
category = "autograd_frontend"
break
if CommitList.keywordInFile(
file,
[
"test/test_nn.py",
"test/test_module.py",
"torch/nn/modules",
"torch/nn/functional.py",
],
):
category = "nn_frontend"
break
if CommitList.keywordInFile(file, ["torch/csrc/jit", "torch/jit"]):
category = "jit"
break
if CommitList.keywordInFile(
file,
[
"torch/_meta_registrations.py",
"torch/_decomp",
"torch/_prims",
"torch/_refs",
],
):
category = "composability"
break
if CommitList.keywordInFile(file, ["torch/_dynamo"]):
category = "dynamo"
break
if CommitList.keywordInFile(file, ["torch/_inductor"]):
category = "inductor"
break
else:
# Below are some extra quick checks that aren't necessarily file-path related,
# but I found that to catch a decent number of extra commits.
if len(files_changed) > 0 and all(
f_name.endswith((".cu", ".cuh")) for f_name in files_changed
):
category = "cuda"
elif "[PyTorch Edge]" in title:
category = "mobile"
elif (
len(files_changed) == 1
and "torch/testing/_internal/common_methods_invocations.py"
in files_changed[0]
):
# when this is the only file changed, it's almost always an OpInfo change.
category = "python_frontend"
elif len(files_changed) == 1 and "torch/_torch_docs.py" in files_changed[0]:
# individual torch_docs changes are usually for python ops
category = "python_frontend"
# If we couldn't find a category but the topic is not user facing we can skip these:
if category == "Uncategorized" and topic == "not user facing":
category = "skip"
return category, topic
@staticmethod
def get_commits_between(base_version, new_version):
cmd = f"git merge-base {base_version} {new_version}"
rc, merge_base, _ = run(cmd)
assert rc == 0
# Returns a list of something like
# b33e38ec47 Allow a higher-precision step type for Vec256::arange (#34555)
cmd = f"git log --reverse --oneline {merge_base}..{new_version}"
rc, commits, _ = run(cmd)
assert rc == 0
log_lines = commits.split("\n")
hashes, titles = zip(*[log_line.split(" ", 1) for log_line in log_lines])
return [CommitList.gen_commit(commit_hash) for commit_hash in hashes]
def filter(self, *, category=None, topic=None):
commits = self.commits
if category is not None:
commits = [commit for commit in commits if commit.category == category]
if topic is not None:
commits = [commit for commit in commits if commit.topic == topic]
return commits
def update_to(self, new_version):
last_hash = self.commits[-1].commit_hash
new_commits = CommitList.get_commits_between(last_hash, new_version)
self.commits += new_commits
def stat(self):
counts = defaultdict(lambda: defaultdict(int))
for commit in self.commits:
counts[commit.category][commit.topic] += 1
return counts
def create_new(path, base_version, new_version):
commits = CommitList.create_new(path, base_version, new_version)
commits.write_result()
def update_existing(path, new_version):
commits = CommitList.from_existing(path)
commits.update_to(new_version)
commits.write_result()
def rerun_with_new_filters(path):
current_commits = CommitList.from_existing(path)
for i, commit in enumerate(current_commits.commits):
current_category = commit.category
if (
current_category == "Uncategorized"
or current_category not in common.categories
):
feature_item = get_commit_data_cache().get(commit.commit_hash)
features = features_to_dict(feature_item)
category, topic = CommitList.categorize(features)
current_commits.commits[i] = dataclasses.replace(
commit, category=category, topic=topic
)
current_commits.write_result()
def get_hash_or_pr_url(commit: Commit):
# cdc = get_commit_data_cache()
pr_link = commit.pr_link
if pr_link is None:
return commit.commit_hash
else:
regex = r"https://github.com/pytorch/pytorch/pull/([0-9]+)"
matches = re.findall(regex, pr_link)
if len(matches) == 0:
return commit.commit_hash
return f"[#{matches[0]}]({pr_link})"
def to_markdown(commit_list: CommitList, category):
def cleanup_title(commit):
match = re.match(r"(.*) \(#\d+\)", commit.title)
if match is None:
return commit.title
return match.group(1)
merge_mapping = defaultdict(list)
for commit in commit_list.commits:
if commit.merge_into:
merge_mapping[commit.merge_into].append(commit)
cdc = get_commit_data_cache()
lines = [f"\n## {category}\n"]
for topic in topics:
lines.append(f"### {topic}\n")
commits = commit_list.filter(category=category, topic=topic)
if "_" in topic:
commits.extend(
commit_list.filter(category=category, topic=topic.replace("_", " "))
)
if " " in topic:
commits.extend(
commit_list.filter(category=category, topic=topic.replace(" ", "_"))
)
for commit in commits:
if commit.merge_into:
continue
all_related_commits = merge_mapping[commit.commit_hash] + [commit]
commit_list_md = ", ".join(
get_hash_or_pr_url(c) for c in all_related_commits
)
result = f"- {cleanup_title(commit)} ({commit_list_md})\n"
lines.append(result)
return lines
def get_markdown_header(category):
header = f"""
# Release Notes worksheet {category}
The main goal of this process is to rephrase all the commit messages below to make them **clear and easy to read** by the end user. You should follow the following instructions to do so:
* **Please clean up and format commit titles to be readable by the general PyTorch user.** Make sure you're [following the guidance here](https://docs.google.com/document/d/14OmgGBr1w6gl1VO47GGGdwrIaUNr92DFhQbY_NEk8mQ/edit)! Your resulting notes must be consistent and easy to read.
* Please sort commits into the following categories (you should not rename the categories!), I tried to pre-sort these to ease your work, feel free to move commits around if the current categorization is not good.
* Anything that is not public facing needs to be removed.
* If anything is miscategorized/belongs to another domain, move it to `miscategorized.md`.
* Please scan through `miscategorized.md` and handle any commits that belong within your domain according to these instructions.
* We place a lot of emphasis on the “BC-breaking” and “deprecation” sections. Those should be where the most effort goes in. The “improvements” and “bug fixes” for Python API should be nice as well.
* Once you are finished, move this very file from `todo/` to `done/` and submit a pull request.
The categories below are as follows:
* BC breaking: All commits that are BC-breaking. These are the most important commits. If any pre-sorted commit is actually BC-breaking, do move it to this section. Each commit should contain a paragraph explaining the rational behind the change as well as an example for how to update user code [BC-Guidelines](https://docs.google.com/document/d/14OmgGBr1w6gl1VO47GGGdwrIaUNr92DFhQbY_NEk8mQ/edit#heading=h.a9htwgvvec1m).
* Deprecations: All commits introducing deprecation. Each commit should include a small example explaining what should be done to update user code.
* new_features: All commits introducing a new feature (new functions, new submodule, new supported platform etc)
* improvements: All commits providing improvements to existing feature should be here (new backend for a function, new argument, better numerical stability)
* bug fixes: All commits that fix bugs and behaviors that do not match the documentation
* performance: All commits that are added mainly for performance (we separate this from improvements above to make it easier for users to look for it)
* documentation: All commits that add/update documentation
* Developers: All commits that are not end-user facing but still impact people that compile from source, develop into pytorch, extend pytorch, etc
* not user facing: All commits that are not public end-user facing and hence should be dropped from the release notes
"""
return [header]
def main():
parser = argparse.ArgumentParser(description="Tool to create a commit list")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--create-new", "--create_new", nargs=2)
group.add_argument("--update-to", "--update_to")
# I found this flag useful when experimenting with adding new auto-categorizing filters.
# After running commitlist.py the first time, if you add any new filters in this file,
# re-running with "rerun_with_new_filters" will update the existing commitlist.csv file,
# but only affect the rows that were previously marked as "Uncategorized"
group.add_argument(
"--rerun-with-new-filters", "--rerun_with_new_filters", action="store_true"
)
group.add_argument("--stat", action="store_true")
group.add_argument("--export-markdown", "--export_markdown", action="store_true")
group.add_argument(
"--export-csv-categories", "--export_csv_categories", action="store_true"
)
parser.add_argument("--path", default="results/commitlist.csv")
args = parser.parse_args()
if args.create_new:
create_new(args.path, args.create_new[0], args.create_new[1])
print(
"Finished creating new commit list. Results have been saved to results/commitlist.csv"
)
return
if args.update_to:
update_existing(args.path, args.update_to)
return
if args.rerun_with_new_filters:
rerun_with_new_filters(args.path)
return
if args.stat:
commits = CommitList.from_existing(args.path)
stats = commits.stat()
pprint.pprint(stats)
return
if args.export_csv_categories:
commits = CommitList.from_existing(args.path)
categories = list(commits.stat().keys())
for category in categories:
print(f"Exporting {category}...")
filename = f"results/export/result_{category}.csv"
CommitList.write_to_disk_static(filename, commits.filter(category=category))
return
if args.export_markdown:
commits = CommitList.from_existing(args.path)
categories = list(commits.stat().keys())
for category in categories:
print(f"Exporting {category}...")
lines = get_markdown_header(category)
lines += to_markdown(commits, category)
filename = f"results/export/result_{category}.md"
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w") as f:
f.writelines(lines)
return
raise AssertionError
if __name__ == "__main__":
main()
| CommitList |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.