language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | dagster-io__dagster | python_modules/libraries/dagster-powerbi/dagster_powerbi/translator.py | {
"start": 2509,
"end": 2749
} | class ____:
"""A record representing a piece of content in PowerBI.
Includes the content's type and data as returned from the API.
"""
content_type: PowerBIContentType
properties: dict[str, Any]
@record
| PowerBIContentData |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/transformer_test.py | {
"start": 1009,
"end": 9589
} | class ____(test.TestCase):
def _simple_context(self):
entity_info = transformer.EntityInfo(
name='Test_fn',
source_code=None,
source_file=None,
future_features=(),
namespace=None)
return transformer.Context(entity_info, None, None)
def assertSameAnno(self, first, second, key):
self.assertIs(anno.getanno(first, key), anno.getanno(second, key))
def assertDifferentAnno(self, first, second, key):
self.assertIsNot(anno.getanno(first, key), anno.getanno(second, key))
def test_state_tracking(self):
class LoopState(object):
pass
class CondState(object):
pass
class TestTransformer(transformer.Base):
def visit(self, node):
anno.setanno(node, 'loop_state', self.state[LoopState].value)
anno.setanno(node, 'cond_state', self.state[CondState].value)
return super(TestTransformer, self).visit(node)
def visit_While(self, node):
self.state[LoopState].enter()
node = self.generic_visit(node)
self.state[LoopState].exit()
return node
def visit_If(self, node):
self.state[CondState].enter()
node = self.generic_visit(node)
self.state[CondState].exit()
return node
tr = TestTransformer(self._simple_context())
def test_function(a):
a = 1
while a:
_ = 'a'
if a > 2:
_ = 'b'
while True:
raise '1'
if a > 3:
_ = 'c'
while True:
raise '1'
node, _ = parser.parse_entity(test_function, future_features=())
node = tr.visit(node)
fn_body = node.body
outer_while_body = fn_body[1].body
self.assertSameAnno(fn_body[0], outer_while_body[0], 'cond_state')
self.assertDifferentAnno(fn_body[0], outer_while_body[0], 'loop_state')
first_if_body = outer_while_body[1].body
self.assertDifferentAnno(outer_while_body[0], first_if_body[0],
'cond_state')
self.assertSameAnno(outer_while_body[0], first_if_body[0], 'loop_state')
first_inner_while_body = first_if_body[1].body
self.assertSameAnno(first_if_body[0], first_inner_while_body[0],
'cond_state')
self.assertDifferentAnno(first_if_body[0], first_inner_while_body[0],
'loop_state')
second_if_body = outer_while_body[2].body
self.assertDifferentAnno(first_if_body[0], second_if_body[0], 'cond_state')
self.assertSameAnno(first_if_body[0], second_if_body[0], 'loop_state')
second_inner_while_body = second_if_body[1].body
self.assertDifferentAnno(first_inner_while_body[0],
second_inner_while_body[0], 'cond_state')
self.assertDifferentAnno(first_inner_while_body[0],
second_inner_while_body[0], 'loop_state')
def test_state_tracking_context_manager(self):
class CondState(object):
pass
class TestTransformer(transformer.Base):
def visit(self, node):
anno.setanno(node, 'cond_state', self.state[CondState].value)
return super(TestTransformer, self).visit(node)
def visit_If(self, node):
with self.state[CondState]:
return self.generic_visit(node)
tr = TestTransformer(self._simple_context())
def test_function(a):
a = 1
if a > 2:
_ = 'b'
if a < 5:
_ = 'c'
_ = 'd'
node, _ = parser.parse_entity(test_function, future_features=())
node = tr.visit(node)
fn_body = node.body
outer_if_body = fn_body[1].body
self.assertDifferentAnno(fn_body[0], outer_if_body[0], 'cond_state')
self.assertSameAnno(outer_if_body[0], outer_if_body[2], 'cond_state')
inner_if_body = outer_if_body[1].body
self.assertDifferentAnno(inner_if_body[0], outer_if_body[0], 'cond_state')
def test_visit_block_postprocessing(self):
class TestTransformer(transformer.Base):
def _process_body_item(self, node):
if isinstance(node, gast.Assign) and (node.value.id == 'y'):
if_node = gast.If(
gast.Name(
'x', ctx=gast.Load(), annotation=None, type_comment=None),
[node], [])
return if_node, if_node.body
return node, None
def visit_FunctionDef(self, node):
node.body = self.visit_block(
node.body, after_visit=self._process_body_item)
return node
def test_function(x, y):
z = x
z = y
return z
tr = TestTransformer(self._simple_context())
node, _ = parser.parse_entity(test_function, future_features=())
node = tr.visit(node)
self.assertEqual(len(node.body), 2)
self.assertIsInstance(node.body[0], gast.Assign)
self.assertIsInstance(node.body[1], gast.If)
self.assertIsInstance(node.body[1].body[0], gast.Assign)
self.assertIsInstance(node.body[1].body[1], gast.Return)
def test_robust_error_on_list_visit(self):
class BrokenTransformer(transformer.Base):
def visit_If(self, node):
# This is broken because visit expects a single node, not a list, and
# the body of an if is a list.
# Importantly, the default error handling in visit also expects a single
# node. Therefore, mistakes like this need to trigger a type error
# before the visit called here installs its error handler.
# That type error can then be caught by the enclosing call to visit,
# and correctly blame the If node.
self.visit(node.body)
return node
def test_function(x):
if x > 0:
return x
tr = BrokenTransformer(self._simple_context())
node, _ = parser.parse_entity(test_function, future_features=())
with self.assertRaises(ValueError) as cm:
node = tr.visit(node)
obtained_message = str(cm.exception)
expected_message = r'expected "ast.AST", got "\<(type|class) \'list\'\>"'
self.assertRegex(obtained_message, expected_message)
def test_robust_error_on_ast_corruption(self):
# A child class should not be able to be so broken that it causes the error
# handling in `transformer.Base` to raise an exception. Why not? Because
# then the original error location is dropped, and an error handler higher
# up in the call stack gives misleading information.
# Here we test that the error handling in `visit` completes, and blames the
# correct original exception, even if the AST gets corrupted.
class NotANode(object):
pass
class BrokenTransformer(transformer.Base):
def visit_If(self, node):
node.body = NotANode()
raise ValueError('I blew up')
def test_function(x):
if x > 0:
return x
tr = BrokenTransformer(self._simple_context())
node, _ = parser.parse_entity(test_function, future_features=())
with self.assertRaises(ValueError) as cm:
node = tr.visit(node)
obtained_message = str(cm.exception)
# The message should reference the exception actually raised, not anything
# from the exception handler.
expected_substring = 'I blew up'
self.assertIn(expected_substring, obtained_message)
def test_origin_info_propagated_to_new_nodes(self):
class TestTransformer(transformer.Base):
def visit_If(self, node):
return gast.Pass()
tr = TestTransformer(self._simple_context())
def test_fn():
x = 1
if x > 0:
x = 1
return x
node, source = parser.parse_entity(test_fn, future_features=())
origin_info.resolve(node, source, 'test_file', 100, 0)
node = tr.visit(node)
created_pass_node = node.body[1]
# Takes the line number of the if statement.
self.assertEqual(
anno.getanno(created_pass_node, anno.Basic.ORIGIN).loc.lineno, 102)
def test_origin_info_preserved_in_moved_nodes(self):
class TestTransformer(transformer.Base):
def visit_If(self, node):
return node.body
tr = TestTransformer(self._simple_context())
def test_fn():
x = 1
if x > 0:
x = 1
x += 3
return x
node, source = parser.parse_entity(test_fn, future_features=())
origin_info.resolve(node, source, 'test_file', 100, 0)
node = tr.visit(node)
assign_node = node.body[1]
aug_assign_node = node.body[2]
# Keep their original line numbers.
self.assertEqual(
anno.getanno(assign_node, anno.Basic.ORIGIN).loc.lineno, 103)
self.assertEqual(
anno.getanno(aug_assign_node, anno.Basic.ORIGIN).loc.lineno, 104)
| TransformerTest |
python | pypa__pip | src/pip/_internal/utils/deprecation.py | {
"start": 357,
"end": 3696
} | class ____(Warning):
pass
_original_showwarning: Any = None
# Warnings <-> Logging Integration
def _showwarning(
message: Warning | str,
category: type[Warning],
filename: str,
lineno: int,
file: TextIO | None = None,
line: str | None = None,
) -> None:
if file is not None:
if _original_showwarning is not None:
_original_showwarning(message, category, filename, lineno, file, line)
elif issubclass(category, PipDeprecationWarning):
# We use a specially named logger which will handle all of the
# deprecation messages for pip.
logger = logging.getLogger("pip._internal.deprecations")
logger.warning(message)
else:
_original_showwarning(message, category, filename, lineno, file, line)
def install_warning_logger() -> None:
# Enable our Deprecation Warnings
warnings.simplefilter("default", PipDeprecationWarning, append=True)
global _original_showwarning
if _original_showwarning is None:
_original_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
def deprecated(
*,
reason: str,
replacement: str | None,
gone_in: str | None,
feature_flag: str | None = None,
issue: int | None = None,
) -> None:
"""Helper to deprecate existing functionality.
reason:
Textual reason shown to the user about why this functionality has
been deprecated. Should be a complete sentence.
replacement:
Textual suggestion shown to the user about what alternative
functionality they can use.
gone_in:
The version of pip does this functionality should get removed in.
Raises an error if pip's current version is greater than or equal to
this.
feature_flag:
Command-line flag of the form --use-feature={feature_flag} for testing
upcoming functionality.
issue:
Issue number on the tracker that would serve as a useful place for
users to find related discussion and provide feedback.
"""
# Determine whether or not the feature is already gone in this version.
is_gone = gone_in is not None and parse(current_version) >= parse(gone_in)
message_parts = [
(reason, f"{DEPRECATION_MSG_PREFIX}{{}}"),
(
gone_in,
(
"pip {} will enforce this behaviour change."
if not is_gone
else "Since pip {}, this is no longer supported."
),
),
(
replacement,
"A possible replacement is {}.",
),
(
feature_flag,
(
"You can use the flag --use-feature={} to test the upcoming behaviour."
if not is_gone
else None
),
),
(
issue,
"Discussion can be found at https://github.com/pypa/pip/issues/{}",
),
]
message = " ".join(
format_str.format(value)
for value, format_str in message_parts
if format_str is not None and value is not None
)
# Raise as an error if this behaviour is deprecated.
if is_gone:
raise PipDeprecationWarning(message)
warnings.warn(message, category=PipDeprecationWarning, stacklevel=2)
| PipDeprecationWarning |
python | doocs__leetcode | solution/2200-2299/2290.Minimum Obstacle Removal to Reach Corner/Solution.py | {
"start": 0,
"end": 686
} | class ____:
def minimumObstacles(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
q = deque([(0, 0, 0)])
vis = set()
dirs = (-1, 0, 1, 0, -1)
while 1:
i, j, k = q.popleft()
if i == m - 1 and j == n - 1:
return k
if (i, j) in vis:
continue
vis.add((i, j))
for a, b in pairwise(dirs):
x, y = i + a, j + b
if 0 <= x < m and 0 <= y < n:
if grid[x][y] == 0:
q.appendleft((x, y, k))
else:
q.append((x, y, k + 1))
| Solution |
python | pytorch__pytorch | benchmarks/dynamo/pr_time_benchmarks/benchmarks/dynamo_inline.py | {
"start": 697,
"end": 2153
} | class ____(BenchmarkBase):
def __init__(
self,
ModuleClass,
backend="eager",
is_gpu=False,
dynamic=False,
):
self.ModuleClass = ModuleClass
self._name = ModuleClass.__name__
self._is_gpu = is_gpu
super().__init__(
category="basic",
backend=backend,
device="cuda" if self._is_gpu else "cpu",
dynamic=dynamic,
)
def name(self):
prefix = f"{self.category()}_{self._name}_{self.backend()}"
return prefix
def _prepare_once(self):
self.m = self.ModuleClass()
torch.set_float32_matmul_precision("high")
self.input = torch.ones(10, device=self.device())
def _prepare(self):
torch._dynamo.reset()
def _work(self):
# enable_cpp_symbolic_shape_guards has impact on this benchmark
# Keep using False value for consistency.
with (
fresh_cache(),
):
opt_m = torch.compile(backend=self.backend(), dynamic=self.is_dynamic())(
self.m.cuda() if self._is_gpu else self.m
)
opt_m(self.input)
def main():
result_path = sys.argv[1]
benchmarks = [
Benchmark(InlineMod),
]
for b in benchmarks:
b.enable_compile_time_instruction_count().collect_all().append_results(
result_path
)
if __name__ == "__main__":
main()
| Benchmark |
python | getsentry__sentry | tests/sentry/sentry_apps/api/endpoints/test_sentry_app_authorizations.py | {
"start": 694,
"end": 13881
} | class ____(APITestCase):
endpoint = "sentry-api-0-sentry-app-installation-authorizations"
method = "post"
def setUp(self) -> None:
self.sentry_app = self.create_sentry_app(
name="nulldb",
organization=self.create_organization(),
scopes=["org:read"],
webhook_url="http://example.com",
)
self.other_sentry_app = self.create_sentry_app(
name="slowdb",
organization=self.create_organization(),
scopes=[],
webhook_url="http://example.com",
)
self.install = self.create_sentry_app_installation(
organization=self.organization,
slug="nulldb",
user=self.user,
prevent_token_exchange=True,
)
def get_response(self, *args: int | str, **params: int | str) -> Response:
"""Overriding `get_response` with some default data."""
return super().get_response(
self.install.uuid,
**{
"client_id": self.sentry_app.application.client_id,
"client_secret": self.sentry_app.application.client_secret,
"grant_type": GrantTypes.AUTHORIZATION,
"code": self.install.api_grant.code,
**params,
},
)
def test_exchanges_for_token_successfully(self) -> None:
expected_expires_at = (timezone.now() + timedelta(hours=8)).replace(second=0, microsecond=0)
response = self.get_success_response()
token = ApiToken.objects.get(application=self.sentry_app.application)
assert response.data["scopes"] == self.sentry_app.scope_list
assert response.data["token"] == token.token
assert response.data["refreshToken"] == token.refresh_token
expires_at = response.data["expiresAt"].replace(second=0, microsecond=0)
assert expires_at == expected_expires_at
def test_exchange_for_token_missing_data(self) -> None:
response = self.get_error_response(code=None)
assert response.status_code == 400
# This is rejected by the base `SentryAppAuthorizationBaseEndpoint`
# class's authentication, so expect an unauthorized error.
response = self.get_error_response(client_id=None)
assert response.status_code == 401
def test_incorrect_grant_type(self) -> None:
self.get_error_response(grant_type="notit", status_code=403)
def test_invalid_installation(self) -> None:
self.install = self.create_sentry_app_installation(
organization=self.organization,
slug="slowdb",
user=self.user,
prevent_token_exchange=True,
)
# URL with this new Install's uuid in it
self.get_error_response(self.install.uuid, status_code=403)
def test_non_sentry_app_user(self) -> None:
app = ApiApplication.objects.create(owner=self.create_user())
self.get_error_response(
client_id=app.client_id, client_secret=app.client_secret, status_code=401
)
def test_invalid_grant(self) -> None:
self.get_error_response(code="123", status_code=401)
def test_expired_grant(self) -> None:
self.install.api_grant.update(expires_at=timezone.now() - timedelta(minutes=2))
response = self.get_error_response(status_code=401)
assert response.data["detail"] == "Grant has already expired"
def test_request_with_exchanged_access_token(self) -> None:
response = self.get_response()
token = response.data["token"]
url = reverse("sentry-api-0-organization-details", args=[self.organization.slug])
with assume_test_silo_mode(SiloMode.REGION):
response = self.client.get(url, HTTP_AUTHORIZATION=f"Bearer {token}")
assert response.status_code == 200
assert response.data["id"] == str(self.organization.id)
def test_state(self) -> None:
response = self.get_success_response(state="abc123")
assert response.data["state"] == "abc123"
def test_refresh_token_exchange(self) -> None:
response = self.get_success_response()
token_id = response.data["id"]
token = response.data["token"]
refresh_token = response.data["refreshToken"]
response = self.get_success_response(
code=None, refresh_token=refresh_token, grant_type="refresh_token"
)
assert response.data["token"] != token
assert response.data["refreshToken"] != refresh_token
assert response.data["expiresAt"] > timezone.now()
old_token = ApiToken.objects.filter(id=token_id)
assert not old_token.exists()
new_token = ApiToken.objects.filter(token=response.data["token"])
assert new_token.exists()
new_token = ApiToken.objects.filter(refresh_token=response.data["refreshToken"])
assert new_token.exists()
def test_refresh_token_exchange_with_missing_data(self) -> None:
response = self.get_success_response()
refresh_token = response.data["refreshToken"]
assert response.data["refreshToken"] is not None
response = self.get_error_response(
code=None, refresh_token=None, grant_type="refresh_token"
)
assert response.status_code == 400
# This is rejected by the base `SentryAppAuthorizationBaseEndpoint`
# class's authentication, so expect an unauthorized error.
response = self.get_error_response(
code=None, refresh_token=refresh_token, grant_type="refresh_token", client_id=None
)
assert response.status_code == 401
def _create_jwt(self, client_id: str, client_secret: str, exp: datetime | None = None) -> str:
"""Helper to create a JWT token for client_secret_jwt grant type"""
if exp is None:
exp = datetime.now(UTC) + timedelta(hours=1)
payload = {
"iss": client_id, # Issuer
"sub": client_id, # Subject
"iat": int(datetime.now(UTC).timestamp()), # Issued at
"exp": int(exp.timestamp()), # Expiration
"jti": str(uuid4()), # JWT ID (unique identifier)
}
return jwt.encode(payload, client_secret, algorithm="HS256")
@with_feature("organizations:sentry-app-manual-token-refresh")
def test_client_secret_jwt_exchange_success(self) -> None:
# First exchange the grant for a token
self.get_success_response()
# Now use client_secret_jwt to refresh
jwt_token = self._create_jwt(
self.sentry_app.application.client_id, self.sentry_app.application.client_secret
)
response = self.get_success_response(
self.install.uuid,
grant_type=GrantTypes.CLIENT_SECRET_JWT,
extra_headers={"HTTP_AUTHORIZATION": f"Bearer {jwt_token}"},
status_code=201,
)
assert response.data["scopes"] == self.sentry_app.scope_list
assert response.data["token"] is not None
assert response.data["refreshToken"] is not None
assert response.data["expiresAt"] > timezone.now()
@with_feature("organizations:sentry-app-manual-token-refresh")
def test_client_secret_jwt_deletes_old_token(self) -> None:
# First exchange the grant for a token
initial_response = self.get_success_response()
old_token_id = initial_response.data["id"]
# Now use client_secret_jwt to refresh
jwt_token = self._create_jwt(
self.sentry_app.application.client_id, self.sentry_app.application.client_secret
)
response = self.get_success_response(
self.install.uuid,
grant_type=GrantTypes.CLIENT_SECRET_JWT,
extra_headers={"HTTP_AUTHORIZATION": f"Bearer {jwt_token}"},
status_code=201,
)
assert not ApiToken.objects.filter(id=old_token_id).exists()
new_token = ApiToken.objects.filter(token=response.data["token"])
assert new_token.exists()
@with_feature("organizations:sentry-app-manual-token-refresh")
def test_client_secret_jwt_missing_authorization_header(self) -> None:
# First exchange the grant for a token
self.get_success_response()
response = self.get_error_response(
self.install.uuid,
grant_type=GrantTypes.CLIENT_SECRET_JWT,
status_code=401,
)
assert "Header is in invalid form" in response.data["detail"]
@with_feature("organizations:sentry-app-manual-token-refresh")
def test_client_secret_jwt_expired_token(self) -> None:
# First exchange the grant for a token
self.get_success_response()
# Create an expired JWT
expired_time = datetime.now(UTC) - timedelta(hours=1)
jwt_token = self._create_jwt(
self.sentry_app.application.client_id,
self.sentry_app.application.client_secret,
exp=expired_time,
)
response = self.get_error_response(
self.install.uuid,
grant_type=GrantTypes.CLIENT_SECRET_JWT,
extra_headers={"HTTP_AUTHORIZATION": f"Bearer {jwt_token}"},
status_code=401,
)
assert "Could not validate JWT" in response.data["detail"]
@with_feature("organizations:sentry-app-manual-token-refresh")
def test_client_secret_jwt_invalid_signature(self) -> None:
# First exchange the grant for a token
self.get_success_response()
# Create a JWT with wrong secret
jwt_token = self._create_jwt(self.sentry_app.application.client_id, "wrong-secret")
response = self.get_error_response(
self.install.uuid,
grant_type=GrantTypes.CLIENT_SECRET_JWT,
extra_headers={"HTTP_AUTHORIZATION": f"Bearer {jwt_token}"},
status_code=401,
)
assert "Could not validate JWT" in response.data["detail"]
@with_feature("organizations:sentry-app-manual-token-refresh")
def test_client_secret_jwt_wrong_client_id(self) -> None:
# First exchange the grant for a token
self.get_success_response()
jwt_token = self._create_jwt("wrong-client-id", self.sentry_app.application.client_secret)
response = self.get_error_response(
self.install.uuid,
grant_type=GrantTypes.CLIENT_SECRET_JWT,
extra_headers={"HTTP_AUTHORIZATION": f"Bearer {jwt_token}"},
status_code=401,
)
assert "JWT is not valid for this application" in response.data["detail"]
@with_feature("organizations:sentry-app-manual-token-refresh")
def test_client_secret_jwt_requires_existing_token(self) -> None:
# CLIENT_SECRET_JWT should only be used to refresh existing tokens
# Attempting to use it without first exchanging the grant should fail
jwt_token = self._create_jwt(
self.sentry_app.application.client_id, self.sentry_app.application.client_secret
)
response = self.get_error_response(
self.install.uuid,
grant_type=GrantTypes.CLIENT_SECRET_JWT,
extra_headers={"HTTP_AUTHORIZATION": f"Bearer {jwt_token}"},
status_code=401,
)
# Should fail because there's no existing token to refresh
assert response.data["detail"] == "Installation does not have a token"
def test_client_secret_jwt_requires_feature_flag(self) -> None:
self.get_success_response()
jwt_token = self._create_jwt(
self.sentry_app.application.client_id, self.sentry_app.application.client_secret
)
response = self.get_error_response(
self.install.uuid,
grant_type=GrantTypes.CLIENT_SECRET_JWT,
extra_headers={"HTTP_AUTHORIZATION": f"Bearer {jwt_token}"},
status_code=403,
)
assert (
response.data["detail"] == "Manual token refresh is not enabled for this organization"
)
@with_feature("organizations:sentry-app-manual-token-refresh")
def test_client_secret_jwt_request_with_new_token(self) -> None:
# First exchange the grant for a token
self.get_success_response()
# Use client_secret_jwt to get a new token
jwt_token = self._create_jwt(
self.sentry_app.application.client_id, self.sentry_app.application.client_secret
)
response = self.get_success_response(
self.install.uuid,
grant_type=GrantTypes.CLIENT_SECRET_JWT,
extra_headers={"HTTP_AUTHORIZATION": f"Bearer {jwt_token}"},
status_code=201,
)
new_token = response.data["token"]
# Verify the new token works for API requests
url = reverse("sentry-api-0-organization-details", args=[self.organization.slug])
with assume_test_silo_mode(SiloMode.REGION):
response = self.client.get(url, HTTP_AUTHORIZATION=f"Bearer {new_token}")
assert response.status_code == 200
assert response.data["id"] == str(self.organization.id)
| TestSentryAppAuthorizations |
python | allegroai__clearml | clearml/backend_api/services/v2_9/tasks.py | {
"start": 289017,
"end": 292166
} | class ____(Request):
"""
Signal a task has stopped
:param force: If not true, call fails if the task status is not 'stopped'
:type force: bool
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "stopped"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "If not true, call fails if the task status is not 'stopped'",
"type": ["boolean", "null"],
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task: str,
force: Optional[bool] = False,
status_reason: Optional[str] = None,
status_message: Optional[str] = None,
**kwargs: Any
) -> None:
super(StoppedRequest, self).__init__(**kwargs)
self.force = force
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("force")
def force(self) -> Optional[bool]:
return self._property_force
@force.setter
def force(self, value: Optional[bool]) -> None:
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self) -> Optional[str]:
return self._property_status_reason
@status_reason.setter
def status_reason(self, value: Optional[str]) -> None:
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self) -> Optional[str]:
return self._property_status_message
@status_message.setter
def status_message(self, value: Optional[str]) -> None:
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
| StoppedRequest |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/relu_op_test.py | {
"start": 7493,
"end": 9986
} | class ____(test.TestCase):
def _npRelu6(self, np_features):
sixes = np.copy(np_features)
sixes.fill(6.0)
return np.minimum(
np.maximum(np_features, np.zeros(np_features.shape)), sixes)
def testNpRelu6(self):
self.assertAllClose(
np.array([[0.0, 0.7, 0.0, 0.3, 6.0], [0.1, 0.0, 6.0, 0.0, 0.9]]),
self._npRelu6(
np.array([[-0.9, 0.7, -0.5, 0.3, 6.0], [0.1, -0.3, 6.5, -0.7,
0.9]])))
def _testRelu6(self, np_features):
np_relu6 = self._npRelu6(np_features)
tf_relu6 = nn_ops.relu6(np_features)
self.assertAllClose(np_relu6, tf_relu6)
self.assertShapeEqual(np_relu6, tf_relu6)
def testNumbersCPU(self):
for t in [
np.int32, np.int64, np.float16, np.float32, np.float64,
dtypes.bfloat16.as_numpy_dtype
]:
# Force execution on CPU even if a GPU kernel is available for the type.
with ops.device("/device:CPU:0"):
self._testRelu6(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
def testNumbersGPU(self):
if not test.is_gpu_available():
self.skipTest("No GPU available")
for t in [
np.float16,
np.float32,
np.float64,
dtypes.bfloat16.as_numpy_dtype,
]:
self._testRelu6(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t))
@test_util.disable_xla("b/157978028: Does not yet pass with XLA")
def testNaNPropagation(self):
for t in [np.float16, np.float32, np.float64]:
self._testRelu6(np.array([-1, np.nan, 1, 7, np.nan]).astype(t))
# The gradient test for ReLU6 is a bit tricky as the derivative is
# not well defined at around zero and six and we want to avoid that
# in terms of input values.
def testGradientFloat32(self):
with self.cached_session():
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [6.1, 6.3, 6.5, 6.7, 6.9]],
dtype=np.float32,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.relu6, [x]))
self.assertLess(err, 1e-4)
def testGradientFloat64(self):
with self.cached_session():
x = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [6.1, 6.3, 6.5, 6.7, 6.9]],
dtype=np.float64,
order="F")
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.relu6, [x]))
self.assertLess(err, 1e-10)
| Relu6Test |
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/ScatterPlotItem.py | {
"start": 42440,
"end": 46373
} | class ____(object):
"""
Class referring to individual spots in a scatter plot.
These can be retrieved by calling ScatterPlotItem.points() or
by connecting to the ScatterPlotItem's click signals.
"""
def __init__(self, data, plot, index):
self._data = data
self._index = index
# SpotItems are kept in plot.data["items"] numpy object array which
# does not support cyclic garbage collection (numpy issue 6581).
# Keeping a strong ref to plot here would leak the cycle
self.__plot_ref = weakref.ref(plot)
@property
def _plot(self):
return self.__plot_ref()
def data(self):
"""Return the user data associated with this spot."""
return self._data['data']
def index(self):
"""Return the index of this point as given in the scatter plot data."""
return self._index
def size(self):
"""Return the size of this spot.
If the spot has no explicit size set, then return the ScatterPlotItem's default size instead."""
if self._data['size'] == -1:
return self._plot.opts['size']
else:
return self._data['size']
def pos(self):
return Point(self._data['x'], self._data['y'])
def viewPos(self):
return self._plot.mapToView(self.pos())
def setSize(self, size):
"""Set the size of this spot.
If the size is set to -1, then the ScatterPlotItem's default size
will be used instead."""
self._data['size'] = size
self.updateItem()
def symbol(self):
"""Return the symbol of this spot.
If the spot has no explicit symbol set, then return the ScatterPlotItem's default symbol instead.
"""
symbol = self._data['symbol']
if symbol is None:
symbol = self._plot.opts['symbol']
try:
n = int(symbol)
symbol = list(Symbols.keys())[n % len(Symbols)]
except:
pass
return symbol
def setSymbol(self, symbol):
"""Set the symbol for this spot.
If the symbol is set to '', then the ScatterPlotItem's default symbol will be used instead."""
self._data['symbol'] = symbol
self.updateItem()
def pen(self):
pen = self._data['pen']
if pen is None:
pen = self._plot.opts['pen']
return fn.mkPen(pen)
def setPen(self, *args, **kargs):
"""Set the outline pen for this spot"""
self._data['pen'] = _mkPen(*args, **kargs)
self.updateItem()
def resetPen(self):
"""Remove the pen set for this spot; the scatter plot's default pen will be used instead."""
self._data['pen'] = None ## Note this is NOT the same as calling setPen(None)
self.updateItem()
def brush(self):
brush = self._data['brush']
if brush is None:
brush = self._plot.opts['brush']
return fn.mkBrush(brush)
def setBrush(self, *args, **kargs):
"""Set the fill brush for this spot"""
self._data['brush'] = _mkBrush(*args, **kargs)
self.updateItem()
def resetBrush(self):
"""Remove the brush set for this spot; the scatter plot's default brush will be used instead."""
self._data['brush'] = None ## Note this is NOT the same as calling setBrush(None)
self.updateItem()
def isVisible(self):
return self._data['visible']
def setVisible(self, visible):
"""Set whether or not this spot is visible."""
self._data['visible'] = visible
self.updateItem()
def setData(self, data):
"""Set the user-data associated with this spot"""
self._data['data'] = data
def updateItem(self):
self._data['sourceRect'] = (0, 0, 0, 0) # numpy <=1.13.1 won't let us set this with a single zero
self._plot.updateSpots(self._data.reshape(1))
| SpotItem |
python | tornadoweb__tornado | tornado/httpclient.py | {
"start": 13299,
"end": 24608
} | class ____:
"""HTTP client request object."""
_headers = None # type: Union[Dict[str, str], httputil.HTTPHeaders]
# Default values for HTTPRequest parameters.
# Merged with the values on the request object by AsyncHTTPClient
# implementations.
_DEFAULTS = dict(
connect_timeout=20.0,
request_timeout=20.0,
follow_redirects=True,
max_redirects=5,
decompress_response=True,
proxy_password="",
allow_nonstandard_methods=False,
validate_cert=True,
)
def __init__(
self,
url: str,
method: str = "GET",
headers: Optional[Union[Dict[str, str], httputil.HTTPHeaders]] = None,
body: Optional[Union[bytes, str]] = None,
auth_username: Optional[str] = None,
auth_password: Optional[str] = None,
auth_mode: Optional[str] = None,
connect_timeout: Optional[float] = None,
request_timeout: Optional[float] = None,
if_modified_since: Optional[Union[float, datetime.datetime]] = None,
follow_redirects: Optional[bool] = None,
max_redirects: Optional[int] = None,
user_agent: Optional[str] = None,
use_gzip: Optional[bool] = None,
network_interface: Optional[str] = None,
streaming_callback: Optional[
Callable[[bytes], Optional[Awaitable[None]]]
] = None,
header_callback: Optional[Callable[[str], None]] = None,
prepare_curl_callback: Optional[Callable[[Any], None]] = None,
proxy_host: Optional[str] = None,
proxy_port: Optional[int] = None,
proxy_username: Optional[str] = None,
proxy_password: Optional[str] = None,
proxy_auth_mode: Optional[str] = None,
allow_nonstandard_methods: Optional[bool] = None,
validate_cert: Optional[bool] = None,
ca_certs: Optional[str] = None,
allow_ipv6: Optional[bool] = None,
client_key: Optional[str] = None,
client_cert: Optional[str] = None,
body_producer: Optional[
Callable[[Callable[[bytes], None]], "Future[None]"]
] = None,
expect_100_continue: bool = False,
decompress_response: Optional[bool] = None,
ssl_options: Optional[Union[Dict[str, Any], ssl.SSLContext]] = None,
) -> None:
r"""All parameters except ``url`` are optional.
:arg str url: URL to fetch
:arg str method: HTTP method, e.g. "GET" or "POST"
:arg headers: Additional HTTP headers to pass on the request
:type headers: `~tornado.httputil.HTTPHeaders` or `dict`
:arg body: HTTP request body as a string (byte or unicode; if unicode
the utf-8 encoding will be used)
:type body: `str` or `bytes`
:arg collections.abc.Callable body_producer: Callable used for
lazy/asynchronous request bodies.
It is called with one argument, a ``write`` function, and should
return a `.Future`. It should call the write function with new
data as it becomes available. The write function returns a
`.Future` which can be used for flow control.
Only one of ``body`` and ``body_producer`` may
be specified. ``body_producer`` is not supported on
``curl_httpclient``. When using ``body_producer`` it is recommended
to pass a ``Content-Length`` in the headers as otherwise chunked
encoding will be used, and many servers do not support chunked
encoding on requests. New in Tornado 4.0
:arg str auth_username: Username for HTTP authentication
:arg str auth_password: Password for HTTP authentication
:arg str auth_mode: Authentication mode; default is "basic".
Allowed values are implementation-defined; ``curl_httpclient``
supports "basic" and "digest"; ``simple_httpclient`` only supports
"basic"
:arg float connect_timeout: Timeout for initial connection in seconds,
default 20 seconds (0 means no timeout)
:arg float request_timeout: Timeout for entire request in seconds,
default 20 seconds (0 means no timeout)
:arg if_modified_since: Timestamp for ``If-Modified-Since`` header
:type if_modified_since: `datetime` or `float`
:arg bool follow_redirects: Should redirects be followed automatically
or return the 3xx response? Default True.
:arg int max_redirects: Limit for ``follow_redirects``, default 5.
:arg str user_agent: String to send as ``User-Agent`` header
:arg bool decompress_response: Request a compressed response from
the server and decompress it after downloading. Default is True.
New in Tornado 4.0.
:arg bool use_gzip: Deprecated alias for ``decompress_response``
since Tornado 4.0.
:arg str network_interface: Network interface or source IP to use for request.
See ``curl_httpclient`` note below.
:arg collections.abc.Callable streaming_callback: If set, ``streaming_callback`` will
be run with each chunk of data as it is received, and
``HTTPResponse.body`` and ``HTTPResponse.buffer`` will be empty in
the final response.
:arg collections.abc.Callable header_callback: If set, ``header_callback`` will
be run with each header line as it is received (including the
first line, e.g. ``HTTP/1.0 200 OK\r\n``, and a final line
containing only ``\r\n``. All lines include the trailing newline
characters). ``HTTPResponse.headers`` will be empty in the final
response. This is most useful in conjunction with
``streaming_callback``, because it's the only way to get access to
header data while the request is in progress.
:arg collections.abc.Callable prepare_curl_callback: If set, will be called with
a ``pycurl.Curl`` object to allow the application to make additional
``setopt`` calls.
:arg str proxy_host: HTTP proxy hostname. To use proxies,
``proxy_host`` and ``proxy_port`` must be set; ``proxy_username``,
``proxy_pass`` and ``proxy_auth_mode`` are optional. Proxies are
currently only supported with ``curl_httpclient``.
:arg int proxy_port: HTTP proxy port
:arg str proxy_username: HTTP proxy username
:arg str proxy_password: HTTP proxy password
:arg str proxy_auth_mode: HTTP proxy Authentication mode;
default is "basic". supports "basic" and "digest"
:arg bool allow_nonstandard_methods: Allow unknown values for ``method``
argument? Default is False.
:arg bool validate_cert: For HTTPS requests, validate the server's
certificate? Default is True.
:arg str ca_certs: filename of CA certificates in PEM format,
or None to use defaults. See note below when used with
``curl_httpclient``.
:arg str client_key: Filename for client SSL key, if any. See
note below when used with ``curl_httpclient``.
:arg str client_cert: Filename for client SSL certificate, if any.
See note below when used with ``curl_httpclient``.
:arg ssl.SSLContext ssl_options: `ssl.SSLContext` object for use in
``simple_httpclient`` (unsupported by ``curl_httpclient``).
Overrides ``validate_cert``, ``ca_certs``, ``client_key``,
and ``client_cert``.
:arg bool allow_ipv6: Use IPv6 when available? Default is True.
:arg bool expect_100_continue: If true, send the
``Expect: 100-continue`` header and wait for a continue response
before sending the request body. Only supported with
``simple_httpclient``.
.. note::
When using ``curl_httpclient`` certain options may be
inherited by subsequent fetches because ``pycurl`` does
not allow them to be cleanly reset. This applies to the
``ca_certs``, ``client_key``, ``client_cert``, and
``network_interface`` arguments. If you use these
options, you should pass them on every request (you don't
have to always use the same values, but it's not possible
to mix requests that specify these options with ones that
use the defaults).
.. versionadded:: 3.1
The ``auth_mode`` argument.
.. versionadded:: 4.0
The ``body_producer`` and ``expect_100_continue`` arguments.
.. versionadded:: 4.2
The ``ssl_options`` argument.
.. versionadded:: 4.5
The ``proxy_auth_mode`` argument.
"""
# Note that some of these attributes go through property setters
# defined below.
self.headers = headers # type: ignore
if if_modified_since:
self.headers["If-Modified-Since"] = httputil.format_timestamp(
if_modified_since
)
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_username = proxy_username
self.proxy_password = proxy_password
self.proxy_auth_mode = proxy_auth_mode
self.url = url
self.method = method
self.body = body # type: ignore
self.body_producer = body_producer
self.auth_username = auth_username
self.auth_password = auth_password
self.auth_mode = auth_mode
self.connect_timeout = connect_timeout
self.request_timeout = request_timeout
self.follow_redirects = follow_redirects
self.max_redirects = max_redirects
self.user_agent = user_agent
if decompress_response is not None:
self.decompress_response = decompress_response # type: Optional[bool]
else:
self.decompress_response = use_gzip
self.network_interface = network_interface
self.streaming_callback = streaming_callback
self.header_callback = header_callback
self.prepare_curl_callback = prepare_curl_callback
self.allow_nonstandard_methods = allow_nonstandard_methods
self.validate_cert = validate_cert
self.ca_certs = ca_certs
self.allow_ipv6 = allow_ipv6
self.client_key = client_key
self.client_cert = client_cert
self.ssl_options = ssl_options
self.expect_100_continue = expect_100_continue
self.start_time = time.time()
@property
def headers(self) -> httputil.HTTPHeaders:
# TODO: headers may actually be a plain dict until fairly late in
# the process (AsyncHTTPClient.fetch), but practically speaking,
# whenever the property is used they're already HTTPHeaders.
return self._headers # type: ignore
@headers.setter
def headers(self, value: Union[Dict[str, str], httputil.HTTPHeaders]) -> None:
if value is None:
self._headers = httputil.HTTPHeaders()
else:
self._headers = value # type: ignore
@property
def body(self) -> bytes:
return self._body
@body.setter
def body(self, value: Union[bytes, str]) -> None:
self._body = utf8(value)
| HTTPRequest |
python | Textualize__textual | src/textual/drivers/win32.py | {
"start": 2015,
"end": 2301
} | class ____(Structure):
"""https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str"""
_fields_ = [
("dwMousePosition", COORD),
("dwButtonState", DWORD),
("dwControlKeyState", DWORD),
("dwEventFlags", DWORD),
]
| MOUSE_EVENT_RECORD |
python | tensorflow__tensorflow | tensorflow/python/framework/test_util.py | {
"start": 72934,
"end": 74645
} | class ____:
"""Fake session so tests that conditionally use placeholders can use eager.
There are a number of tests that conditionally use placeholders for shape
inference. The pattern is demonstrated here:
```python
with self.cached_session() as sess:
if static_shape:
y = math_ops.matmul(x, ...)
feed_dict = {}
else:
x_ph = array_ops.placeholder(...)
y = math_ops.matmul(x_ph, ...)
feed_dict = {x_ph: x}
val = sess.run(y, feed_dict=feed_dict)
```
Since the feed_dict is empty when not using placeholders we should be able to
call self.evaluate(), however this requires rewriting the test case.
This class should be considered a stop-gap solution to get tests running with
eager with minimal changes to the actual test.
"""
def __init__(self, test_case):
self._test_case = test_case
def run(self, fetches, *args, **kwargs):
"""Evaluate `fetches`.
Fail if additional args are specified.
Args:
fetches: A Tensor or a nested list/tuple of Tensors.
*args: Positional arguments
**kwargs: Keyword arguments
Raises:
RuntimeError: If args or kwargs are specified.
Returns:
Tensors as numpy values.
"""
feed_dict = kwargs.pop("feed_dict", {})
if feed_dict:
raise RuntimeError(
"feed_dict is not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
if args or kwargs:
raise RuntimeError(
"Optional args are not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
return self._test_case.evaluate(fetches)
| FakeEagerSession |
python | wandb__wandb | wandb/automations/_generated/delete_automation.py | {
"start": 225,
"end": 319
} | class ____(GQLResult):
success: bool
DeleteAutomation.model_rebuild()
| DeleteAutomationResult |
python | TheAlgorithms__Python | data_structures/linked_list/skip_list.py | {
"start": 312,
"end": 1116
} | class ____[KT, VT]:
def __init__(self, key: KT | str = "root", value: VT | None = None):
self.key = key
self.value = value
self.forward: list[Node[KT, VT]] = []
def __repr__(self) -> str:
"""
:return: Visual representation of Node
>>> node = Node("Key", 2)
>>> repr(node)
'Node(Key: 2)'
"""
return f"Node({self.key}: {self.value})"
@property
def level(self) -> int:
"""
:return: Number of forward references
>>> node = Node("Key", 2)
>>> node.level
0
>>> node.forward.append(Node("Key2", 4))
>>> node.level
1
>>> node.forward.append(Node("Key3", 6))
>>> node.level
2
"""
return len(self.forward)
| Node |
python | huggingface__transformers | src/transformers/models/arcee/modeling_arcee.py | {
"start": 2166,
"end": 2758
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
return self.down_proj(self.act_fn(self.up_proj(x)))
@use_kernel_forward_from_hub("RMSNorm")
| ArceeMLP |
python | ray-project__ray | release/llm_tests/benchmark/load_test.py | {
"start": 17658,
"end": 39435
} | class ____(HttpUser):
# no wait time, so every user creates a continuous load, sending requests as quickly as possible
def on_start(self):
try:
self._on_start()
except Exception as e:
print(f"Failed to initialize: {repr(e)}")
print(traceback.format_exc())
sys.exit(1)
def _guess_provider(self):
self.model = self.environment.parsed_options.model
self.provider = self.environment.parsed_options.provider
# guess based on URL
if self.provider is None:
if "fireworks.ai" in self.host:
self.provider = "fireworks"
elif "together" in self.host:
self.provider = "together"
elif "openai" in self.host:
self.provider = "openai"
elif "anyscale" in self.host:
self.provider = "anyscale"
if (
self.model is None
and self.provider is not None
and PROVIDER_CLASS_MAP[self.provider].DEFAULT_MODEL_NAME is not None
):
self.model = PROVIDER_CLASS_MAP[self.provider].DEFAULT_MODEL_NAME
if self.model and self.provider:
return
# vllm doesn't support /model/<name> endpoint, so iterate over all models
try:
resp = self.client.get("/v1/models")
resp.raise_for_status()
resp = resp.json()
except Exception as e:
raise ValueError(
"Argument --model or --provider was not specified and /v1/models failed"
) from e
models = resp["data"]
assert len(models) > 0, "No models found in /v1/models"
owned_by = None
# pick the first model
for m in models:
if self.model is None or m["id"] == self.model:
self.model = m["id"]
owned_by = m["owned_by"]
break
if self.provider is None:
if not owned_by:
raise ValueError(
f"Model {self.model} not found in /v1/models. Specify --provider explicitly"
)
if owned_by in PROVIDER_CLASS_MAP:
self.provider = owned_by
else:
raise ValueError(
f"Can't detect provider, specify it explicitly with --provider, owned_by={owned_by}"
)
def _on_start(self):
self.client.headers["Content-Type"] = "application/json"
if self.environment.parsed_options.api_key:
self.client.headers["Authorization"] = (
"Bearer " + self.environment.parsed_options.api_key
)
if self.environment.parsed_options.header:
for header in self.environment.parsed_options.header:
key, val = header.split(":", 1)
self.client.headers[key] = val
self._guess_provider()
print(f" Provider {self.provider} using model {self.model} ".center(80, "*"))
self.provider_formatter = PROVIDER_CLASS_MAP[self.provider](
self.model, self.environment.parsed_options
)
self.stream = self.environment.parsed_options.stream
prompt_chars = self.environment.parsed_options.prompt_chars
if self.environment.parsed_options.prompt_text:
self.input = _load_curl_like_data(
self.environment.parsed_options.prompt_text
)
elif prompt_chars:
self.input = (
PROMPT_PREFIX_TOKEN * (prompt_chars // len(PROMPT_PREFIX_TOKEN) + 1)
+ PROMPT_SUFFIX
)[:prompt_chars]
else:
assert (
self.environment.parsed_options.prompt_tokens >= PROMPT_SUFFIX_TOKENS
), f"Minimal prompt length is {PROMPT_SUFFIX_TOKENS}"
self.input = (
PROMPT_PREFIX_TOKEN
* (self.environment.parsed_options.prompt_tokens - PROMPT_SUFFIX_TOKENS)
+ PROMPT_SUFFIX
)
self.max_tokens_sampler = LengthSampler(
distribution=self.environment.parsed_options.max_tokens_distribution,
mean=self.environment.parsed_options.max_tokens,
cap=self.environment.parsed_options.max_tokens_cap,
alpha=self.environment.parsed_options.max_tokens_range,
)
self.temperature = self.environment.parsed_options.temperature
logging_params = {
# TODO: add some server info with git version
"provider": self.provider,
"model": self.model,
"prompt_tokens": self.environment.parsed_options.prompt_tokens, # might be overwritten based on metric
"generation_tokens": str(self.max_tokens_sampler),
"stream": self.stream,
"temperature": self.temperature,
"logprobs": self.environment.parsed_options.logprobs,
}
InitTracker.notify_init(self.environment, logging_params)
self.tokenizer = InitTracker.load_tokenizer(
self.environment.parsed_options.tokenizer
)
if self.tokenizer:
self.prompt_tokenizer_tokens = len(
self.tokenizer.encode(self._get_input()[0])
)
else:
self.prompt_tokenizer_tokens = None
if self.environment.parsed_options.qps is not None:
if self.environment.parsed_options.burst:
raise ValueError("Burst and QPS modes are mutually exclusive")
pacer = FixedQPSPacer.instance(
self.environment.parsed_options.qps,
self.environment.parsed_options.qps_distribution,
)
# it will be called by Locust after each task
self.wait_time = pacer.wait_time_till_next
self.wait()
elif self.environment.parsed_options.burst:
self.wait_time = partial(
constant_pacing(self.environment.parsed_options.burst), self
)
else:
# introduce initial delay to avoid all users hitting the service at the same time
time.sleep(random.random())
self.first_done = False
def _get_input(self):
def _maybe_randomize(prompt):
if not self.environment.parsed_options.prompt_randomize:
return prompt
# single letters are single tokens
num_random_tokens = (len(prompt) - len(PROMPT_SUFFIX)) // len(
PROMPT_PREFIX_TOKEN
)
return (
" ".join(
chr(ord("a") + random.randint(0, 25))
for _ in range(num_random_tokens)
)
+ " "
+ prompt[-len(PROMPT_SUFFIX) :]
)
if isinstance(self.input, str):
return _maybe_randomize(self.input), None
else:
item = self.input[random.randint(0, len(self.input) - 1)]
assert "prompt" in item
return _maybe_randomize(item["prompt"]), item.get("images", None)
@task
def generate_text(self):
max_tokens = self.max_tokens_sampler.sample()
prompt, images = self._get_input()
data = self.provider_formatter.format_payload(prompt, max_tokens, images)
t_start = time.perf_counter()
with self.client.post(
self.provider_formatter.get_url(),
data=json.dumps(data),
stream=True,
catch_response=True,
) as response:
combined_text = ""
done = False
prompt_usage_tokens = self.prompt_tokenizer_tokens
total_usage_tokens = None
total_logprob_tokens = None
try:
response.raise_for_status()
except Exception as e:
raise RuntimeError(f"Error in response: {response.text}") from e
t_first_token = None
for chunk in response.iter_lines(delimiter=b"\n\n"):
if len(chunk) == 0:
continue # come providers send empty lines between data chunks
if done:
if chunk != b"data: [DONE]":
print(f"WARNING: Received more chunks after [DONE]: {chunk}")
try:
now = time.perf_counter()
if self.stream:
assert chunk.startswith(
b"data:"
), f"Unexpected chunk not starting with 'data': {chunk}"
chunk = chunk[len(b"data:") :]
if chunk.strip() == b"[DONE]":
done = True
continue
data = orjson.loads(chunk)
out = self.provider_formatter.parse_output_json(data, prompt)
if out.usage_tokens:
total_usage_tokens = (
total_usage_tokens or 0
) + out.usage_tokens
if out.prompt_usage_tokens:
prompt_usage_tokens = out.prompt_usage_tokens
combined_text += out.text
# some providers (SGLang) send an empty chunk first skewing the TTFT
if combined_text and t_first_token is None:
t_first_token = now
if out.logprob_tokens:
total_logprob_tokens = (
total_logprob_tokens or 0
) + out.logprob_tokens
except Exception as e:
print(f"Failed to parse response: {chunk} with error {repr(e)}")
response.failure(e)
return
assert t_first_token is not None, "empty response received"
if (
(total_logprob_tokens is not None)
and (total_usage_tokens is not None)
and total_logprob_tokens != total_usage_tokens
):
print(
f"WARNING: usage_tokens {total_usage_tokens} != logprob_tokens {total_logprob_tokens}"
)
if total_logprob_tokens is not None:
num_tokens = total_logprob_tokens
else:
num_tokens = total_usage_tokens
if self.tokenizer:
num_tokenizer_tokens = len(self.tokenizer.encode(combined_text))
if num_tokens is None:
num_tokens = num_tokenizer_tokens
elif num_tokens != num_tokenizer_tokens:
print(
f"WARNING: tokenizer token count {num_tokenizer_tokens} != {num_tokens} received from server"
)
num_tokens = num_tokens or 0
num_chars = len(combined_text)
now = time.perf_counter()
dur_total = now - t_start
dur_generation = now - t_first_token
dur_first_token = t_first_token - t_start
print(
f"Response received: total {dur_total*1000:.2f} ms, first token {dur_first_token*1000:.2f} ms, {num_chars} chars, {num_tokens} tokens"
)
if self.environment.parsed_options.show_response:
print("---")
print(combined_text)
print("---")
if num_chars:
add_custom_metric(
"latency_per_char",
dur_generation / num_chars * 1000,
num_chars,
)
if self.stream:
add_custom_metric("time_to_first_token", dur_first_token * 1000)
add_custom_metric("total_latency", dur_total * 1000)
if num_tokens:
if num_tokens != max_tokens:
print(
f"WARNING: wrong number of tokens: {num_tokens}, expected {max_tokens}"
)
add_custom_metric("num_tokens", num_tokens)
add_custom_metric(
"latency_per_token",
dur_generation / num_tokens * 1000,
num_tokens,
)
add_custom_metric(
"overall_latency_per_token",
dur_total / num_tokens * 1000,
num_tokens,
)
if (
prompt_usage_tokens is not None
and self.prompt_tokenizer_tokens is not None
and prompt_usage_tokens != self.prompt_tokenizer_tokens
):
print(
f"WARNING: prompt usage tokens {prompt_usage_tokens} != {self.prompt_tokenizer_tokens} derived from local tokenizer"
)
prompt_tokens = prompt_usage_tokens or self.prompt_tokenizer_tokens
if prompt_tokens:
add_custom_metric("prompt_tokens", prompt_tokens)
if not self.first_done:
self.first_done = True
InitTracker.notify_first_request()
@events.init_command_line_parser.add_listener
def init_parser(parser):
parser.add_argument(
"--provider",
choices=list(PROVIDER_CLASS_MAP.keys()),
type=str,
help="Which flavor of API to use. If not specified, we'll try to guess based on the URL and /v1/models output",
)
parser.add_argument(
"-m",
"--model",
env_var="MODEL",
type=str,
help="The model to use for generating text. If not specified we will pick the first model from the service as returned by /v1/models",
)
parser.add_argument(
"--chat",
action=argparse.BooleanOptionalAction,
default=False,
help="Use /v1/chat/completions API",
)
parser.add_argument(
"-p",
"--prompt-tokens",
env_var="PROMPT_TOKENS",
type=int,
default=512,
help="Length of the prompt in tokens. Default 512",
)
parser.add_argument(
"--prompt-chars",
env_var="PROMPT_CHARS",
type=int,
help="Length of the prompt in characters.",
)
parser.add_argument(
"--prompt-text",
env_var="PROMPT_TEXT",
type=str,
help="Prompt text to use instead of generating one. It can be a file reference starting with an ampersand, e.g. `@prompt.txt`",
)
parser.add_argument(
"--prompt-randomize",
action=argparse.BooleanOptionalAction,
default=False,
help="Include a few random numbers in the generated prompt to avoid caching",
)
parser.add_argument(
"-o",
"--max-tokens",
env_var="MAX_TOKENS",
type=int,
default=64,
help="Max number of tokens to generate. If --max-tokens-distribution is non-constant this is going to be the mean. Defaults to 64",
)
parser.add_argument(
"--max-tokens-cap",
env_var="MAX_TOKENS_CAP",
type=int,
help="If --max-tokens-distribution is non-constant, this truncates the distribition at the specified limit",
)
parser.add_argument(
"--max-tokens-distribution",
env_var="MAX_TOKENS_DISTRIBUTION",
type=str,
choices=["constant", "uniform", "exponential", "normal"],
default="constant",
help="How to sample `max-tokens` on each request",
)
parser.add_argument(
"--max-tokens-range",
env_var="MAX_TOKENS_RANGE",
type=float,
default=0.3,
help="Specifies the width of the distribution. Specified value `alpha` is relative to `max-tokens`. For uniform distribution we'd sample from [max_tokens - max_tokens * alpha, max_tokens + max_tokens * alpha]. For normal distribution we'd sample from `N(max_tokens, max_tokens * alpha)`. Defaults to 0.3",
)
parser.add_argument(
"--stream",
dest="stream",
action=argparse.BooleanOptionalAction,
default=True,
help="Use the streaming API",
)
parser.add_argument(
"-k",
"--api-key",
env_var="API_KEY",
help="Auth for the API",
)
parser.add_argument(
"--temperature",
env_var="TEMPERATURE",
type=float,
default=1.0,
help="Temperature parameter for the API",
)
parser.add_argument(
"--logprobs",
type=int,
default=None,
help="Whether to ask for logprobs, it makes things slower for some providers but is necessary for token count in streaming (unless it's Fireworks API that returns usage in streaming mode)",
)
parser.add_argument(
"--summary-file",
type=str,
help="Append the line with the summary to the specified CSV file. Useful for generating a spreadsheet with perf sweep results. If the file doesn't exist, writes out the header first",
)
parser.add_argument(
"--qps",
type=float,
default=None,
help="Enabled 'fixed QPS' mode where requests are issues at the specified rate regardless of how long the processing takes. In this case --users and --spawn-rate need to be set to a sufficiently high value (e.g. 100)",
)
parser.add_argument(
"--qps-distribution",
type=str,
choices=["constant", "uniform", "exponential"],
default="constant",
help="Must be used with --qps. Specifies how to space out requests: equally ('constant') or by sampling wait times from a distribution ('uniform' or 'exponential'). Expected QPS is going to match --qps",
)
parser.add_argument(
"--burst",
type=float,
default=None,
help="Makes requests to arrive in bursts every specified number of seconds. Note that burst duration has to be longer than maximum time of the response. Size of the burst is controlled by --users. The spawn rate -r is best set to a high value",
)
parser.add_argument(
"--tokenizer",
type=str,
help="Specify HF tokenizer to use for validating the output of the model. It's optional, we're going to rely on 'usage' or 'logprobs' field to get token count information",
)
parser.add_argument(
"--show-response",
action=argparse.BooleanOptionalAction,
default=False,
help="Print the result of each generation",
)
parser.add_argument(
"-pcml",
"--prompt-cache-max-len",
env_var="PROMPT_CACHE_MAX_LEN",
type=int,
default=0,
help="Maximum length of the prompt cache to use. Defaults to 0 (no caching).",
)
parser.add_argument(
"--header",
action="append",
default=[],
help="Arbitrary headers to add to the inference request. Can be used multiple times. For example, --header header1:value1 --header header2:value2",
)
parser.add_argument(
"-n",
"--n",
default=1,
type=int,
help="How many sequences to generate (makes sense to use with non-zero temperature).",
)
@events.quitting.add_listener
# ADDED A NAME TO THE FUNCTION
def collect_metrics(environment, **kw):
total_latency = environment.stats.entries[("total_latency", "METRIC")]
if environment.stats.total.num_failures > 0 or total_latency.num_requests == 0:
print("Test failed due to failed requests")
environment.process_exit_code = 1
return
entries = copy.copy(InitTracker.logging_params)
if environment.parsed_options.qps is not None:
entries[
"concurrency"
] = f"QPS {environment.parsed_options.qps} {environment.parsed_options.qps_distribution}"
else:
entries["concurrency"] = InitTracker.users
for metric_name in [
"time_to_first_token",
"latency_per_token",
"num_tokens",
"total_latency",
"prompt_tokens", # might overwrite the static value based on server side tokenization
]:
entries[metric_name] = environment.stats.entries[
(metric_name, "METRIC")
].avg_response_time
if not environment.parsed_options.stream:
# if there's no streaming these metrics are meaningless
entries["time_to_first_token"] = ""
entries["latency_per_token"] = ""
entries["num_requests"] = total_latency.num_requests
entries["qps"] = total_latency.total_rps
percentile_to_report = [50, 90, 99, 99.9]
percentile_metrics = ["time_to_first_token", "total_latency"]
for percentile_metric in percentile_metrics:
metrics = environment.stats.entries[percentile_metric, "METRIC"]
for percentile in percentile_to_report:
name = f"P{percentile}_{percentile_metric}"
entries[name] = metrics.get_response_time_percentile(percentile / 100)
# Pretty print the entries
def pretty_name(s):
return " ".join([w.capitalize() for w in s.split("_")])
entries = {pretty_name(k): v for k, v in entries.items()}
# print in the final event handler to make sure our output is the last one
@events.quit.add_listener
def exit_printer(**kw):
entries = environment.stats.entries
max_width = max(len(k) for k in entries.keys())
print(" Summary ".center(80, "="))
for k, v in entries.items():
print(f"{k:<{max_width}}: {v}")
print("=" * 80)
if environment.parsed_options.summary_file:
with open(environment.parsed_options.summary_file, "a") as f:
writer = csv.DictWriter(f, fieldnames=entries.keys())
if f.tell() == 0:
writer.writeheader()
writer.writerow(entries)
return entries
| LLMUser |
python | huggingface__transformers | src/transformers/models/maskformer/modeling_maskformer.py | {
"start": 62956,
"end": 65370
} | class ____(nn.Module):
def __init__(self, config: MaskFormerConfig):
"""
Pixel Level Module proposed in [Per-Pixel Classification is Not All You Need for Semantic
Segmentation](https://huggingface.co/papers/2107.06278). It runs the input image through a backbone and a pixel
decoder, generating an image feature map and pixel embeddings.
Args:
config ([`MaskFormerConfig`]):
The configuration used to instantiate this model.
"""
super().__init__()
if getattr(config, "backbone_config") is not None and config.backbone_config.model_type == "swin":
# for backwards compatibility
backbone_config = config.backbone_config
backbone_config = MaskFormerSwinConfig.from_dict(backbone_config.to_dict())
backbone_config.out_features = ["stage1", "stage2", "stage3", "stage4"]
config.backbone_config = backbone_config
self.encoder = load_backbone(config)
feature_channels = self.encoder.channels
self.decoder = MaskFormerPixelDecoder(
in_features=feature_channels[-1],
feature_size=config.fpn_feature_size,
mask_feature_size=config.mask_feature_size,
lateral_widths=feature_channels[:-1],
)
def forward(
self, pixel_values: Tensor, output_hidden_states: bool = False, return_dict: bool = True
) -> MaskFormerPixelLevelModuleOutput:
features = self.encoder(pixel_values).feature_maps
decoder_output = self.decoder(features, output_hidden_states, return_dict=return_dict)
if not return_dict:
last_hidden_state = decoder_output[0]
outputs = (features[-1], last_hidden_state)
if output_hidden_states:
hidden_states = decoder_output[1]
outputs = outputs + (tuple(features),) + (hidden_states,)
return outputs
return MaskFormerPixelLevelModuleOutput(
# the last feature is actually the output from the last layer
encoder_last_hidden_state=features[-1],
decoder_last_hidden_state=decoder_output.last_hidden_state,
encoder_hidden_states=tuple(features) if output_hidden_states else (),
decoder_hidden_states=decoder_output.hidden_states if output_hidden_states else (),
)
| MaskFormerPixelLevelModule |
python | ApeWorX__ape | src/ape_node/provider.py | {
"start": 15523,
"end": 17492
} | class ____(PluginConfig):
"""
Configure your ``node:`` in Ape, the default provider
plugin for live-network nodes. Also, ``ape node`` can
start-up a local development node for testing purposes.
"""
ethereum: EthereumNetworkConfig = EthereumNetworkConfig()
"""
Configure the Ethereum network settings for the ``ape node`` provider,
such as which URIs to use for each network.
"""
executable: Optional[list[str]] = None
"""
For starting nodes, select the executable. Defaults to using
``shutil.which("geth")``.
"""
data_dir: Optional[Path] = None
"""
For node-management, choose where the geth data directory shall
be located. Defaults to using a location within Ape's DATA_FOLDER.
"""
ipc_path: Optional[Path] = None
"""
For IPC connections, select the IPC path. If managing a process,
web3.py can determine the IPC w/o needing to manually configure.
"""
call_trace_approach: Optional[TraceApproach] = None
"""
Select the trace approach to use. Defaults to deducing one
based on your node's client-version and available RPCs.
"""
request_headers: dict = {}
"""
Optionally specify request headers to use whenever using this provider.
"""
rpc_api: Optional[list[str]] = None
"""
RPC APIs to enable. Defaults to all geth APIs.
"""
model_config = SettingsConfigDict(extra="allow", env_prefix="APE_NODE_")
@field_validator("call_trace_approach", mode="before")
@classmethod
def validate_trace_approach(cls, value):
# This handles nicer config values.
return None if value is None else TraceApproach.from_key(value)
@field_validator("executable", mode="before")
@classmethod
def validate_executable(cls, value):
if not value:
return None
elif isinstance(value, str):
return value.split(" ")
return value
| EthereumNodeConfig |
python | tensorflow__tensorflow | tensorflow/python/util/tf_inspect_test.py | {
"start": 21495,
"end": 25832
} | class ____(test.TestCase):
def testReturnsEmptyWhenUnboundFuncHasNoParameters(self):
def empty():
pass
self.assertEqual({}, tf_inspect.getcallargs(empty))
def testClashingParameterNames(self):
def func(positional, func=1, func_and_positional=2, kwargs=3):
return positional, func, func_and_positional, kwargs
kwargs = {}
self.assertEqual(
tf_inspect.getcallargs(func, 0, **kwargs), {
'positional': 0,
'func': 1,
'func_and_positional': 2,
'kwargs': 3
})
kwargs = dict(func=4, func_and_positional=5, kwargs=6)
self.assertEqual(
tf_inspect.getcallargs(func, 0, **kwargs), {
'positional': 0,
'func': 4,
'func_and_positional': 5,
'kwargs': 6
})
def testUnboundFuncWithOneParamPositional(self):
def func(a):
return a
self.assertEqual({'a': 5}, tf_inspect.getcallargs(func, 5))
def testUnboundFuncWithTwoParamsPositional(self):
def func(a, b):
return (a, b)
self.assertEqual({'a': 10, 'b': 20}, tf_inspect.getcallargs(func, 10, 20))
def testUnboundFuncWithOneParamKeyword(self):
def func(a):
return a
self.assertEqual({'a': 5}, tf_inspect.getcallargs(func, a=5))
def testUnboundFuncWithTwoParamsKeyword(self):
def func(a, b):
return (a, b)
self.assertEqual({'a': 6, 'b': 7}, tf_inspect.getcallargs(func, a=6, b=7))
def testUnboundFuncWithOneParamDefault(self):
def func(a=13):
return a
self.assertEqual({'a': 13}, tf_inspect.getcallargs(func))
def testUnboundFuncWithOneParamDefaultOnePositional(self):
def func(a=0):
return a
self.assertEqual({'a': 1}, tf_inspect.getcallargs(func, 1))
def testUnboundFuncWithTwoParamsDefaultOnePositional(self):
def func(a=1, b=2):
return (a, b)
self.assertEqual({'a': 5, 'b': 2}, tf_inspect.getcallargs(func, 5))
def testUnboundFuncWithTwoParamsDefaultTwoPositional(self):
def func(a=1, b=2):
return (a, b)
self.assertEqual({'a': 3, 'b': 4}, tf_inspect.getcallargs(func, 3, 4))
def testUnboundFuncWithOneParamDefaultOneKeyword(self):
def func(a=1):
return a
self.assertEqual({'a': 3}, tf_inspect.getcallargs(func, a=3))
def testUnboundFuncWithTwoParamsDefaultOneKeywordFirst(self):
def func(a=1, b=2):
return (a, b)
self.assertEqual({'a': 3, 'b': 2}, tf_inspect.getcallargs(func, a=3))
def testUnboundFuncWithTwoParamsDefaultOneKeywordSecond(self):
def func(a=1, b=2):
return (a, b)
self.assertEqual({'a': 1, 'b': 4}, tf_inspect.getcallargs(func, b=4))
def testUnboundFuncWithTwoParamsDefaultTwoKeywords(self):
def func(a=1, b=2):
return (a, b)
self.assertEqual({'a': 3, 'b': 4}, tf_inspect.getcallargs(func, a=3, b=4))
def testBoundFuncWithOneParam(self):
class Test(object):
def bound(self):
pass
t = Test()
self.assertEqual({'self': t}, tf_inspect.getcallargs(t.bound))
def testBoundFuncWithManyParamsAndDefaults(self):
class Test(object):
def bound(self, a, b=2, c='Hello'):
return (a, b, c)
t = Test()
self.assertEqual({
'self': t,
'a': 3,
'b': 2,
'c': 'Goodbye'
}, tf_inspect.getcallargs(t.bound, 3, c='Goodbye'))
def testClassMethod(self):
class Test(object):
@classmethod
def test(cls, a, b=3, c='hello'):
return (a, b, c)
self.assertEqual({
'cls': Test,
'a': 5,
'b': 3,
'c': 'goodbye'
}, tf_inspect.getcallargs(Test.test, 5, c='goodbye'))
def testUsesOutermostDecoratorsArgSpec(self):
def func():
pass
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
decorated = tf_decorator.make_decorator(
func,
wrapper,
decorator_argspec=tf_inspect.FullArgSpec(
args=['a', 'b', 'c'],
varargs=None,
kwonlyargs={},
defaults=(3, 'hello'),
kwonlydefaults=None,
varkw=None,
annotations=None))
self.assertEqual({
'a': 4,
'b': 3,
'c': 'goodbye'
}, tf_inspect.getcallargs(decorated, 4, c='goodbye'))
if __name__ == '__main__':
test.main()
| TfInspectGetCallArgsTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/matchClass1.py | {
"start": 511,
"end": 611
} | class ____(Generic[T]):
__match_args__ = ("attr_a", "attr_b")
attr_a: T
attr_b: str
| ClassB |
python | sqlalchemy__sqlalchemy | test/orm/test_unitofwork.py | {
"start": 81508,
"end": 84098
} | class ____(_fixtures.FixtureTest):
run_inserts = None
def test_m2o_nonmatch(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(User, users)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties=dict(
user=relationship(User, lazy="select", uselist=False)
),
)
session = fixture_session()
def fixture():
return [
Address(email_address="a1", user=User(name="u1")),
Address(email_address="a2", user=User(name="u2")),
]
session.add_all(fixture())
self.assert_sql_execution(
testing.db,
session.flush,
Conditional(
testing.db.dialect.insert_executemany_returning,
[
CompiledSQL(
"INSERT INTO users (name) VALUES (:name) "
"RETURNING users.id",
[{"name": "u1"}, {"name": "u2"}],
),
CompiledSQL(
"INSERT INTO addresses (user_id, email_address) "
"VALUES (:user_id, :email_address) "
"RETURNING addresses.id",
[
{"user_id": 1, "email_address": "a1"},
{"user_id": 2, "email_address": "a2"},
],
),
],
[
CompiledSQL(
"INSERT INTO users (name) VALUES (:name)",
{"name": "u1"},
),
CompiledSQL(
"INSERT INTO users (name) VALUES (:name)",
{"name": "u2"},
),
CompiledSQL(
"INSERT INTO addresses (user_id, email_address) "
"VALUES (:user_id, :email_address)",
{"user_id": 1, "email_address": "a1"},
),
CompiledSQL(
"INSERT INTO addresses (user_id, email_address) "
"VALUES (:user_id, :email_address)",
{"user_id": 2, "email_address": "a2"},
),
],
),
)
| SaveTest2 |
python | pydata__xarray | xarray/core/dataset.py | {
"start": 5254,
"end": 407196
} | class ____(
DataWithCoords,
DatasetAggregations,
DatasetArithmetic,
Mapping[Hashable, "DataArray"],
):
"""A multi-dimensional, in memory, array database.
A dataset resembles an in-memory representation of a NetCDF file,
and consists of variables, coordinates and attributes which
together form a self describing dataset.
Dataset implements the mapping interface with keys given by variable
names and values given by DataArray objects for each variable name.
By default, pandas indexes are created for one dimensional variables with
name equal to their dimension (i.e., :term:`Dimension coordinate`) so those
variables can be readily used as coordinates for label based indexing. When a
:py:class:`~xarray.Coordinates` object is passed to ``coords``, any existing
index(es) built from those coordinates will be added to the Dataset.
To load data from a file or file-like object, use the `open_dataset`
function.
Parameters
----------
data_vars : dict-like, optional
A mapping from variable names to :py:class:`~xarray.DataArray`
objects, :py:class:`~xarray.Variable` objects or to tuples of
the form ``(dims, data[, attrs])`` which can be used as
arguments to create a new ``Variable``. Each dimension must
have the same length in all variables in which it appears.
The following notations are accepted:
- mapping {var name: DataArray}
- mapping {var name: Variable}
- mapping {var name: (dimension name, array-like)}
- mapping {var name: (tuple of dimension names, array-like)}
- mapping {dimension name: array-like}
(if array-like is not a scalar it will be automatically moved to coords,
see below)
Each dimension must have the same length in all variables in
which it appears.
coords : :py:class:`~xarray.Coordinates` or dict-like, optional
A :py:class:`~xarray.Coordinates` object or another mapping in
similar form as the `data_vars` argument, except that each item
is saved on the dataset as a "coordinate".
These variables have an associated meaning: they describe
constant/fixed/independent quantities, unlike the
varying/measured/dependent quantities that belong in
`variables`.
The following notations are accepted for arbitrary mappings:
- mapping {coord name: DataArray}
- mapping {coord name: Variable}
- mapping {coord name: (dimension name, array-like)}
- mapping {coord name: (tuple of dimension names, array-like)}
- mapping {dimension name: array-like}
(the dimension name is implicitly set to be the same as the
coord name)
The last notation implies either that the coordinate value is a scalar
or that it is a 1-dimensional array and the coord name is the same as
the dimension name (i.e., a :term:`Dimension coordinate`). In the latter
case, the 1-dimensional array will be assumed to give index values
along the dimension with the same name.
Alternatively, a :py:class:`~xarray.Coordinates` object may be used in
order to explicitly pass indexes (e.g., a multi-index or any custom
Xarray index) or to bypass the creation of a default index for any
:term:`Dimension coordinate` included in that object.
attrs : dict-like, optional
Global attributes to save on this dataset.
(see FAQ, :ref:`approach to metadata`)
Examples
--------
In this example dataset, we will represent measurements of the temperature
and pressure that were made under various conditions:
* the measurements were made on four different days;
* they were made at two separate locations, which we will represent using
their latitude and longitude; and
* they were made using three instrument developed by three different
manufacturers, which we will refer to using the strings `'manufac1'`,
`'manufac2'`, and `'manufac3'`.
>>> np.random.seed(0)
>>> temperature = 15 + 8 * np.random.randn(2, 3, 4)
>>> precipitation = 10 * np.random.rand(2, 3, 4)
>>> lon = [-99.83, -99.32]
>>> lat = [42.25, 42.21]
>>> instruments = ["manufac1", "manufac2", "manufac3"]
>>> time = pd.date_range("2014-09-06", periods=4)
>>> reference_time = pd.Timestamp("2014-09-05")
Here, we initialize the dataset with multiple dimensions. We use the string
`"loc"` to represent the location dimension of the data, the string
`"instrument"` to represent the instrument manufacturer dimension, and the
string `"time"` for the time dimension.
>>> ds = xr.Dataset(
... data_vars=dict(
... temperature=(["loc", "instrument", "time"], temperature),
... precipitation=(["loc", "instrument", "time"], precipitation),
... ),
... coords=dict(
... lon=("loc", lon),
... lat=("loc", lat),
... instrument=instruments,
... time=time,
... reference_time=reference_time,
... ),
... attrs=dict(description="Weather related data."),
... )
>>> ds
<xarray.Dataset> Size: 552B
Dimensions: (loc: 2, instrument: 3, time: 4)
Coordinates:
* instrument (instrument) <U8 96B 'manufac1' 'manufac2' 'manufac3'
* time (time) datetime64[ns] 32B 2014-09-06 ... 2014-09-09
lon (loc) float64 16B -99.83 -99.32
lat (loc) float64 16B 42.25 42.21
reference_time datetime64[ns] 8B 2014-09-05
Dimensions without coordinates: loc
Data variables:
temperature (loc, instrument, time) float64 192B 29.11 18.2 ... 9.063
precipitation (loc, instrument, time) float64 192B 4.562 5.684 ... 1.613
Attributes:
description: Weather related data.
Find out where the coldest temperature was and what values the
other variables had:
>>> ds.isel(ds.temperature.argmin(...))
<xarray.Dataset> Size: 80B
Dimensions: ()
Coordinates:
lon float64 8B -99.32
lat float64 8B 42.21
instrument <U8 32B 'manufac3'
time datetime64[ns] 8B 2014-09-06
reference_time datetime64[ns] 8B 2014-09-05
Data variables:
temperature float64 8B -5.424
precipitation float64 8B 9.884
Attributes:
description: Weather related data.
"""
_attrs: dict[Hashable, Any] | None
_cache: dict[str, Any]
_coord_names: set[Hashable]
_dims: dict[Hashable, int]
_encoding: dict[Hashable, Any] | None
_close: Callable[[], None] | None
_indexes: dict[Hashable, Index]
_variables: dict[Hashable, Variable]
__slots__ = (
"__weakref__",
"_attrs",
"_cache",
"_close",
"_coord_names",
"_dims",
"_encoding",
"_indexes",
"_variables",
)
def __init__(
self,
# could make a VariableArgs to use more generally, and refine these
# categories
data_vars: DataVars | None = None,
coords: Mapping[Any, Any] | None = None,
attrs: Mapping[Any, Any] | None = None,
) -> None:
if data_vars is None:
data_vars = {}
if coords is None:
coords = {}
both_data_and_coords = set(data_vars) & set(coords)
if both_data_and_coords:
raise ValueError(
f"variables {both_data_and_coords!r} are found in both data_vars and coords"
)
if isinstance(coords, Dataset):
coords = coords._variables
variables, coord_names, dims, indexes, _ = merge_data_and_coords(
data_vars, coords
)
self._attrs = dict(attrs) if attrs else None
self._close = None
self._encoding = None
self._variables = variables
self._coord_names = coord_names
self._dims = dims
self._indexes = indexes
# TODO: dirty workaround for mypy 1.5 error with inherited DatasetOpsMixin vs. Mapping
# related to https://github.com/python/mypy/issues/9319?
def __eq__(self, other: DsCompatible) -> Self: # type: ignore[override]
return super().__eq__(other)
@classmethod
def load_store(cls, store, decoder=None) -> Self:
"""Create a new dataset from the contents of a backends.*DataStore
object
"""
variables, attributes = store.load()
if decoder:
variables, attributes = decoder(variables, attributes)
obj = cls(variables, attrs=attributes)
obj.set_close(store.close)
return obj
@property
def variables(self) -> Frozen[Hashable, Variable]:
"""Low level interface to Dataset contents as dict of Variable objects.
This ordered dictionary is frozen to prevent mutation that could
violate Dataset invariants. It contains all variable objects
constituting the Dataset, including both data variables and
coordinates.
"""
return Frozen(self._variables)
@property
def attrs(self) -> dict[Any, Any]:
"""Dictionary of global attributes on this dataset"""
if self._attrs is None:
self._attrs = {}
return self._attrs
@attrs.setter
def attrs(self, value: Mapping[Any, Any]) -> None:
self._attrs = dict(value) if value else None
@property
def encoding(self) -> dict[Any, Any]:
"""Dictionary of global encoding attributes on this dataset"""
if self._encoding is None:
self._encoding = {}
return self._encoding
@encoding.setter
def encoding(self, value: Mapping[Any, Any]) -> None:
self._encoding = dict(value)
def reset_encoding(self) -> Self:
warnings.warn(
"reset_encoding is deprecated since 2023.11, use `drop_encoding` instead",
stacklevel=2,
)
return self.drop_encoding()
def drop_encoding(self) -> Self:
"""Return a new Dataset without encoding on the dataset or any of its
variables/coords."""
variables = {k: v.drop_encoding() for k, v in self.variables.items()}
return self._replace(variables=variables, encoding={})
@property
def dims(self) -> Frozen[Hashable, int]:
"""Mapping from dimension names to lengths.
Cannot be modified directly, but is updated when adding new variables.
Note that type of this object differs from `DataArray.dims`.
See `Dataset.sizes` and `DataArray.sizes` for consistently named
properties. This property will be changed to return a type more consistent with
`DataArray.dims` in the future, i.e. a set of dimension names.
See Also
--------
Dataset.sizes
DataArray.dims
"""
return FrozenMappingWarningOnValuesAccess(self._dims)
@property
def sizes(self) -> Frozen[Hashable, int]:
"""Mapping from dimension names to lengths.
Cannot be modified directly, but is updated when adding new variables.
This is an alias for `Dataset.dims` provided for the benefit of
consistency with `DataArray.sizes`.
See Also
--------
DataArray.sizes
"""
return Frozen(self._dims)
@property
def dtypes(self) -> Frozen[Hashable, np.dtype]:
"""Mapping from data variable names to dtypes.
Cannot be modified directly, but is updated when adding new variables.
See Also
--------
DataArray.dtype
"""
return Frozen(
{
n: v.dtype
for n, v in self._variables.items()
if n not in self._coord_names
}
)
def load(self, **kwargs) -> Self:
"""Trigger loading data into memory and return this dataset.
Data will be computed and/or loaded from disk or a remote source.
Unlike ``.compute``, the original dataset is modified and returned.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically. However, this method can be necessary when
working with many file objects on disk.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.compute``.
Returns
-------
object : Dataset
Same object but with lazy data variables and coordinates as in-memory arrays.
See Also
--------
dask.compute
Dataset.compute
Dataset.load_async
DataArray.load
Variable.load
"""
# access .data to coerce everything to numpy or dask arrays
chunked_data = {
k: v._data for k, v in self.variables.items() if is_chunked_array(v._data)
}
if chunked_data:
chunkmanager = get_chunked_array_type(*chunked_data.values())
# evaluate all the chunked arrays simultaneously
evaluated_data: tuple[np.ndarray[Any, Any], ...] = chunkmanager.compute(
*chunked_data.values(), **kwargs
)
for k, data in zip(chunked_data, evaluated_data, strict=False):
self.variables[k].data = data
# load everything else sequentially
[v.load() for k, v in self.variables.items() if k not in chunked_data]
return self
async def load_async(self, **kwargs) -> Self:
"""Trigger and await asynchronous loading of data into memory and return this dataset.
Data will be computed and/or loaded from disk or a remote source.
Unlike ``.compute``, the original dataset is modified and returned.
Only works when opening data lazily from IO storage backends which support lazy asynchronous loading.
Otherwise will raise a NotImplementedError.
Note users are expected to limit concurrency themselves - xarray does not internally limit concurrency in any way.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.compute``.
Returns
-------
object : Dataset
Same object but with lazy data variables and coordinates as in-memory arrays.
See Also
--------
dask.compute
Dataset.compute
Dataset.load
DataArray.load_async
Variable.load_async
"""
# TODO refactor this to pull out the common chunked_data codepath
# this blocks on chunked arrays but not on lazily indexed arrays
# access .data to coerce everything to numpy or dask arrays
chunked_data = {
k: v._data for k, v in self.variables.items() if is_chunked_array(v._data)
}
if chunked_data:
chunkmanager = get_chunked_array_type(*chunked_data.values())
# evaluate all the chunked arrays simultaneously
evaluated_data: tuple[np.ndarray[Any, Any], ...] = chunkmanager.compute(
*chunked_data.values(), **kwargs
)
for k, data in zip(chunked_data, evaluated_data, strict=False):
self.variables[k].data = data
# load everything else concurrently
coros = [
v.load_async() for k, v in self.variables.items() if k not in chunked_data
]
await asyncio.gather(*coros)
return self
def __dask_tokenize__(self) -> object:
from dask.base import normalize_token
return normalize_token(
(type(self), self._variables, self._coord_names, self._attrs or None)
)
def __dask_graph__(self):
graphs = {k: v.__dask_graph__() for k, v in self.variables.items()}
graphs = {k: v for k, v in graphs.items() if v is not None}
if not graphs:
return None
else:
try:
from dask.highlevelgraph import HighLevelGraph
return HighLevelGraph.merge(*graphs.values())
except ImportError:
from dask import sharedict
return sharedict.merge(*graphs.values())
def __dask_keys__(self):
import dask
return [
v.__dask_keys__()
for v in self.variables.values()
if dask.is_dask_collection(v)
]
def __dask_layers__(self):
import dask
return sum(
(
v.__dask_layers__()
for v in self.variables.values()
if dask.is_dask_collection(v)
),
(),
)
@property
def __dask_optimize__(self):
import dask.array as da
return da.Array.__dask_optimize__
@property
def __dask_scheduler__(self):
import dask.array as da
return da.Array.__dask_scheduler__
def __dask_postcompute__(self):
return self._dask_postcompute, ()
def __dask_postpersist__(self):
return self._dask_postpersist, ()
def _dask_postcompute(self, results: Iterable[Variable]) -> Self:
import dask
variables = {}
results_iter = iter(results)
for k, v in self._variables.items():
if dask.is_dask_collection(v):
rebuild, args = v.__dask_postcompute__()
v = rebuild(next(results_iter), *args)
variables[k] = v
return type(self)._construct_direct(
variables,
self._coord_names,
self._dims,
self._attrs,
self._indexes,
self._encoding,
self._close,
)
def _dask_postpersist(
self, dsk: Mapping, *, rename: Mapping[str, str] | None = None
) -> Self:
from dask import is_dask_collection
from dask.highlevelgraph import HighLevelGraph
from dask.optimization import cull
variables = {}
for k, v in self._variables.items():
if not is_dask_collection(v):
variables[k] = v
continue
if isinstance(dsk, HighLevelGraph):
# dask >= 2021.3
# __dask_postpersist__() was called by dask.highlevelgraph.
# Don't use dsk.cull(), as we need to prevent partial layers:
# https://github.com/dask/dask/issues/7137
layers = v.__dask_layers__()
if rename:
layers = [rename.get(k, k) for k in layers]
dsk2 = dsk.cull_layers(layers)
elif rename: # pragma: nocover
# At the moment of writing, this is only for forward compatibility.
# replace_name_in_key requires dask >= 2021.3.
from dask.base import flatten, replace_name_in_key
keys = [
replace_name_in_key(k, rename) for k in flatten(v.__dask_keys__())
]
dsk2, _ = cull(dsk, keys)
else:
# __dask_postpersist__() was called by dask.optimize or dask.persist
dsk2, _ = cull(dsk, v.__dask_keys__())
rebuild, args = v.__dask_postpersist__()
# rename was added in dask 2021.3
kwargs = {"rename": rename} if rename else {}
variables[k] = rebuild(dsk2, *args, **kwargs)
return type(self)._construct_direct(
variables,
self._coord_names,
self._dims,
self._attrs,
self._indexes,
self._encoding,
self._close,
)
def compute(self, **kwargs) -> Self:
"""Trigger loading data into memory and return a new dataset.
Data will be computed and/or loaded from disk or a remote source.
Unlike ``.load``, the original dataset is left unaltered.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically. However, this method can be necessary when
working with many file objects on disk.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.compute``.
Returns
-------
object : Dataset
New object with lazy data variables and coordinates as in-memory arrays.
See Also
--------
dask.compute
Dataset.load
Dataset.load_async
DataArray.compute
Variable.compute
"""
new = self.copy(deep=False)
return new.load(**kwargs)
def _persist_inplace(self, **kwargs) -> Self:
"""Persist all chunked arrays in memory."""
# access .data to coerce everything to numpy or dask arrays
lazy_data = {
k: v._data for k, v in self.variables.items() if is_chunked_array(v._data)
}
if lazy_data:
chunkmanager = get_chunked_array_type(*lazy_data.values())
# evaluate all the dask arrays simultaneously
evaluated_data = chunkmanager.persist(*lazy_data.values(), **kwargs)
for k, data in zip(lazy_data, evaluated_data, strict=False):
self.variables[k].data = data
return self
def persist(self, **kwargs) -> Self:
"""Trigger computation, keeping data as chunked arrays.
This operation can be used to trigger computation on underlying dask
arrays, similar to ``.compute()`` or ``.load()``. However this
operation keeps the data as dask arrays. This is particularly useful
when using the dask.distributed scheduler and you want to load a large
amount of data into distributed memory.
Like compute (but unlike load), the original dataset is left unaltered.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.persist``.
Returns
-------
object : Dataset
New object with all dask-backed coordinates and data variables as persisted dask arrays.
See Also
--------
dask.persist
"""
new = self.copy(deep=False)
return new._persist_inplace(**kwargs)
@classmethod
def _construct_direct(
cls,
variables: dict[Any, Variable],
coord_names: set[Hashable],
dims: dict[Any, int] | None = None,
attrs: dict | None = None,
indexes: dict[Any, Index] | None = None,
encoding: dict | None = None,
close: Callable[[], None] | None = None,
) -> Self:
"""Shortcut around __init__ for internal use when we want to skip
costly validation
"""
if dims is None:
dims = calculate_dimensions(variables)
if indexes is None:
indexes = {}
obj = object.__new__(cls)
obj._variables = variables
obj._coord_names = coord_names
obj._dims = dims
obj._indexes = indexes
obj._attrs = attrs
obj._close = close
obj._encoding = encoding
return obj
def _replace(
self,
variables: dict[Hashable, Variable] | None = None,
coord_names: set[Hashable] | None = None,
dims: dict[Any, int] | None = None,
attrs: dict[Hashable, Any] | Default | None = _default,
indexes: dict[Hashable, Index] | None = None,
encoding: dict | Default | None = _default,
inplace: bool = False,
) -> Self:
"""Fastpath constructor for internal use.
Returns an object with optionally with replaced attributes.
Explicitly passed arguments are *not* copied when placed on the new
dataset. It is up to the caller to ensure that they have the right type
and are not used elsewhere.
"""
if inplace:
if variables is not None:
self._variables = variables
if coord_names is not None:
self._coord_names = coord_names
if dims is not None:
self._dims = dims
if attrs is not _default:
self._attrs = attrs
if indexes is not None:
self._indexes = indexes
if encoding is not _default:
self._encoding = encoding
obj = self
else:
if variables is None:
variables = self._variables.copy()
if coord_names is None:
coord_names = self._coord_names.copy()
if dims is None:
dims = self._dims.copy()
if attrs is _default:
attrs = copy.copy(self._attrs)
if indexes is None:
indexes = self._indexes.copy()
if encoding is _default:
encoding = copy.copy(self._encoding)
obj = self._construct_direct(
variables, coord_names, dims, attrs, indexes, encoding
)
return obj
def _replace_with_new_dims(
self,
variables: dict[Hashable, Variable],
coord_names: set | None = None,
attrs: dict[Hashable, Any] | Default | None = _default,
indexes: dict[Hashable, Index] | None = None,
inplace: bool = False,
) -> Self:
"""Replace variables with recalculated dimensions."""
dims = calculate_dimensions(variables)
return self._replace(
variables, coord_names, dims, attrs, indexes, inplace=inplace
)
def _replace_vars_and_dims(
self,
variables: dict[Hashable, Variable],
coord_names: set | None = None,
dims: dict[Hashable, int] | None = None,
attrs: dict[Hashable, Any] | Default | None = _default,
inplace: bool = False,
) -> Self:
"""Deprecated version of _replace_with_new_dims().
Unlike _replace_with_new_dims(), this method always recalculates
indexes from variables.
"""
if dims is None:
dims = calculate_dimensions(variables)
return self._replace(
variables, coord_names, dims, attrs, indexes=None, inplace=inplace
)
def _overwrite_indexes(
self,
indexes: Mapping[Hashable, Index],
variables: Mapping[Hashable, Variable] | None = None,
drop_variables: list[Hashable] | None = None,
drop_indexes: list[Hashable] | None = None,
rename_dims: Mapping[Hashable, Hashable] | None = None,
) -> Self:
"""Maybe replace indexes.
This function may do a lot more depending on index query
results.
"""
if not indexes:
return self
if variables is None:
variables = {}
if drop_variables is None:
drop_variables = []
if drop_indexes is None:
drop_indexes = []
new_variables = self._variables.copy()
new_coord_names = self._coord_names.copy()
new_indexes = dict(self._indexes)
index_variables = {}
no_index_variables = {}
for name, var in variables.items():
old_var = self._variables.get(name)
if old_var is not None:
var.attrs.update(old_var.attrs)
var.encoding.update(old_var.encoding)
if name in indexes:
index_variables[name] = var
else:
no_index_variables[name] = var
for name in indexes:
new_indexes[name] = indexes[name]
for name, var in index_variables.items():
new_coord_names.add(name)
new_variables[name] = var
# append no-index variables at the end
for k in no_index_variables:
new_variables.pop(k)
new_variables.update(no_index_variables)
for name in drop_indexes:
new_indexes.pop(name)
for name in drop_variables:
new_variables.pop(name)
new_indexes.pop(name, None)
new_coord_names.remove(name)
replaced = self._replace(
variables=new_variables, coord_names=new_coord_names, indexes=new_indexes
)
if rename_dims:
# skip rename indexes: they should already have the right name(s)
dims = replaced._rename_dims(rename_dims)
new_variables, new_coord_names = replaced._rename_vars({}, rename_dims)
return replaced._replace(
variables=new_variables, coord_names=new_coord_names, dims=dims
)
else:
return replaced
def copy(self, deep: bool = False, data: DataVars | None = None) -> Self:
"""Returns a copy of this dataset.
If `deep=True`, a deep copy is made of each of the component variables.
Otherwise, a shallow copy of each of the component variable is made, so
that the underlying memory region of the new dataset is the same as in
the original dataset.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, default: False
Whether each component variable is loaded into memory and copied onto
the new object. Default is False.
data : dict-like or None, optional
Data to use in the new object. Each item in `data` must have same
shape as corresponding data variable in original. When `data` is
used, `deep` is ignored for the data variables and only used for
coords.
Returns
-------
object : Dataset
New object with dimensions, attributes, coordinates, name, encoding,
and optionally data copied from original.
Examples
--------
Shallow copy versus deep copy
>>> da = xr.DataArray(np.random.randn(2, 3))
>>> ds = xr.Dataset(
... {"foo": da, "bar": ("x", [-1, 2])},
... coords={"x": ["one", "two"]},
... )
>>> ds.copy()
<xarray.Dataset> Size: 88B
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Coordinates:
* x (x) <U3 24B 'one' 'two'
Dimensions without coordinates: dim_0, dim_1
Data variables:
foo (dim_0, dim_1) float64 48B 1.764 0.4002 0.9787 2.241 1.868 -0.9773
bar (x) int64 16B -1 2
>>> ds_0 = ds.copy(deep=False)
>>> ds_0["foo"][0, 0] = 7
>>> ds_0
<xarray.Dataset> Size: 88B
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Coordinates:
* x (x) <U3 24B 'one' 'two'
Dimensions without coordinates: dim_0, dim_1
Data variables:
foo (dim_0, dim_1) float64 48B 7.0 0.4002 0.9787 2.241 1.868 -0.9773
bar (x) int64 16B -1 2
>>> ds
<xarray.Dataset> Size: 88B
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Coordinates:
* x (x) <U3 24B 'one' 'two'
Dimensions without coordinates: dim_0, dim_1
Data variables:
foo (dim_0, dim_1) float64 48B 7.0 0.4002 0.9787 2.241 1.868 -0.9773
bar (x) int64 16B -1 2
Changing the data using the ``data`` argument maintains the
structure of the original object, but with the new data. Original
object is unaffected.
>>> ds.copy(data={"foo": np.arange(6).reshape(2, 3), "bar": ["a", "b"]})
<xarray.Dataset> Size: 80B
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Coordinates:
* x (x) <U3 24B 'one' 'two'
Dimensions without coordinates: dim_0, dim_1
Data variables:
foo (dim_0, dim_1) int64 48B 0 1 2 3 4 5
bar (x) <U1 8B 'a' 'b'
>>> ds
<xarray.Dataset> Size: 88B
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Coordinates:
* x (x) <U3 24B 'one' 'two'
Dimensions without coordinates: dim_0, dim_1
Data variables:
foo (dim_0, dim_1) float64 48B 7.0 0.4002 0.9787 2.241 1.868 -0.9773
bar (x) int64 16B -1 2
See Also
--------
pandas.DataFrame.copy
"""
return self._copy(deep=deep, data=data)
def _copy(
self,
deep: bool = False,
data: DataVars | None = None,
memo: dict[int, Any] | None = None,
) -> Self:
if data is None:
data = {}
elif not utils.is_dict_like(data):
raise ValueError("Data must be dict-like")
if data:
var_keys = set(self.data_vars.keys())
data_keys = set(data.keys())
keys_not_in_vars = data_keys - var_keys
if keys_not_in_vars:
raise ValueError(
"Data must only contain variables in original "
f"dataset. Extra variables: {keys_not_in_vars}"
)
keys_missing_from_data = var_keys - data_keys
if keys_missing_from_data:
raise ValueError(
"Data must contain all variables in original "
f"dataset. Data is missing {keys_missing_from_data}"
)
indexes, index_vars = self.xindexes.copy_indexes(deep=deep)
variables = {}
for k, v in self._variables.items():
if k in index_vars:
variables[k] = index_vars[k]
else:
variables[k] = v._copy(deep=deep, data=data.get(k), memo=memo)
attrs = copy.deepcopy(self._attrs, memo) if deep else copy.copy(self._attrs)
encoding = (
copy.deepcopy(self._encoding, memo) if deep else copy.copy(self._encoding)
)
return self._replace(variables, indexes=indexes, attrs=attrs, encoding=encoding)
def __copy__(self) -> Self:
return self._copy(deep=False)
def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self:
return self._copy(deep=True, memo=memo)
def as_numpy(self) -> Self:
"""
Coerces wrapped data and coordinates into numpy arrays, returning a Dataset.
See also
--------
DataArray.as_numpy
DataArray.to_numpy : Returns only the data as a numpy.ndarray object.
"""
numpy_variables = {k: v.as_numpy() for k, v in self.variables.items()}
return self._replace(variables=numpy_variables)
def _copy_listed(self, names: Iterable[Hashable]) -> Self:
"""Create a new Dataset with the listed variables from this dataset and
the all relevant coordinates. Skips all validation.
"""
variables: dict[Hashable, Variable] = {}
coord_names = set()
indexes: dict[Hashable, Index] = {}
for name in names:
try:
variables[name] = self._variables[name]
except KeyError:
ref_name, var_name, var = _get_virtual_variable(
self._variables, name, self.sizes
)
variables[var_name] = var
if ref_name in self._coord_names or ref_name in self.dims:
coord_names.add(var_name)
if (var_name,) == var.dims:
index, index_vars = create_default_index_implicit(var, names)
indexes.update(dict.fromkeys(index_vars, index))
variables.update(index_vars)
coord_names.update(index_vars)
needed_dims: OrderedSet[Hashable] = OrderedSet()
for v in variables.values():
needed_dims.update(v.dims)
dims = {k: self.sizes[k] for k in needed_dims}
# preserves ordering of coordinates
for k in self._variables:
if k not in self._coord_names:
continue
if set(self.variables[k].dims) <= needed_dims:
variables[k] = self._variables[k]
coord_names.add(k)
indexes.update(filter_indexes_from_coords(self._indexes, coord_names))
return self._replace(variables, coord_names, dims, indexes=indexes)
def _construct_dataarray(self, name: Hashable) -> DataArray:
"""Construct a DataArray by indexing this dataset"""
from xarray.core.dataarray import DataArray
try:
variable = self._variables[name]
except KeyError:
_, name, variable = _get_virtual_variable(self._variables, name, self.sizes)
needed_dims = set(variable.dims)
coords: dict[Hashable, Variable] = {}
# preserve ordering
for k in self._variables:
if k in self._indexes:
add_coord = self._indexes[k].should_add_coord_to_array(
k, self._variables[k], needed_dims
)
else:
var_dims = set(self._variables[k].dims)
add_coord = k in self._coord_names and var_dims <= needed_dims
if add_coord:
coords[k] = self._variables[k]
indexes = filter_indexes_from_coords(self._indexes, set(coords))
return DataArray(variable, coords, name=name, indexes=indexes, fastpath=True)
@property
def _attr_sources(self) -> Iterable[Mapping[Hashable, Any]]:
"""Places to look-up items for attribute-style access"""
yield from self._item_sources
yield self.attrs
@property
def _item_sources(self) -> Iterable[Mapping[Hashable, Any]]:
"""Places to look-up items for key-completion"""
yield self.data_vars
yield FilteredMapping(keys=self._coord_names, mapping=self.coords)
# virtual coordinates
yield FilteredMapping(keys=self.sizes, mapping=self)
def __contains__(self, key: object) -> bool:
"""The 'in' operator will return true or false depending on whether
'key' is an array in the dataset or not.
"""
return key in self._variables
def __len__(self) -> int:
return len(self.data_vars)
def __bool__(self) -> bool:
return bool(self.data_vars)
def __iter__(self) -> Iterator[Hashable]:
return iter(self.data_vars)
if TYPE_CHECKING:
# needed because __getattr__ is returning Any and otherwise
# this class counts as part of the SupportsArray Protocol
__array__ = None # type: ignore[var-annotated,unused-ignore]
else:
def __array__(self, dtype=None, copy=None):
raise TypeError(
"cannot directly convert an xarray.Dataset into a "
"numpy array. Instead, create an xarray.DataArray "
"first, either with indexing on the Dataset or by "
"invoking the `to_dataarray()` method."
)
@property
def nbytes(self) -> int:
"""
Total bytes consumed by the data arrays of all variables in this dataset.
If the backend array for any variable does not include ``nbytes``, estimates
the total bytes for that array based on the ``size`` and ``dtype``.
"""
return sum(v.nbytes for v in self.variables.values())
@property
def loc(self) -> _LocIndexer[Self]:
"""Attribute for location based indexing. Only supports __getitem__,
and only when the key is a dict of the form {dim: labels}.
"""
return _LocIndexer(self)
@overload
def __getitem__(self, key: Hashable) -> DataArray: ...
# Mapping is Iterable
@overload
def __getitem__(self, key: Iterable[Hashable]) -> Self: ...
def __getitem__(
self, key: Mapping[Any, Any] | Hashable | Iterable[Hashable]
) -> Self | DataArray:
"""Access variables or coordinates of this dataset as a
:py:class:`~xarray.DataArray` or a subset of variables or a indexed dataset.
Indexing with a list of names will return a new ``Dataset`` object.
"""
from xarray.core.formatting import shorten_list_repr
if utils.is_dict_like(key):
return self.isel(**key)
if utils.hashable(key):
try:
return self._construct_dataarray(key)
except KeyError as e:
message = f"No variable named {key!r}."
best_guess = utils.did_you_mean(key, self.variables.keys())
if best_guess:
message += f" {best_guess}"
else:
message += f" Variables on the dataset include {shorten_list_repr(list(self.variables.keys()), max_items=10)}"
# If someone attempts `ds['foo' , 'bar']` instead of `ds[['foo', 'bar']]`
if isinstance(key, tuple):
message += f"\nHint: use a list to select multiple variables, for example `ds[{list(key)}]`"
raise KeyError(message) from e
if utils.iterable_of_hashable(key):
return self._copy_listed(key)
raise ValueError(f"Unsupported key-type {type(key)}")
def __setitem__(
self, key: Hashable | Iterable[Hashable] | Mapping, value: Any
) -> None:
"""Add an array to this dataset.
Multiple arrays can be added at the same time, in which case each of
the following operations is applied to the respective value.
If key is dict-like, update all variables in the dataset
one by one with the given value at the given location.
If the given value is also a dataset, select corresponding variables
in the given value and in the dataset to be changed.
If value is a `
from .dataarray import DataArray`, call its `select_vars()` method, rename it
to `key` and merge the contents of the resulting dataset into this
dataset.
If value is a `Variable` object (or tuple of form
``(dims, data[, attrs])``), add it to this dataset as a new
variable.
"""
from xarray.core.dataarray import DataArray
if utils.is_dict_like(key):
# check for consistency and convert value to dataset
value = self._setitem_check(key, value)
# loop over dataset variables and set new values
processed = []
for name, var in self.items():
try:
var[key] = value[name]
processed.append(name)
except Exception as e:
if processed:
raise RuntimeError(
"An error occurred while setting values of the"
f" variable '{name}'. The following variables have"
f" been successfully updated:\n{processed}"
) from e
else:
raise e
elif utils.hashable(key):
if isinstance(value, Dataset):
raise TypeError(
"Cannot assign a Dataset to a single key - only a DataArray or Variable "
"object can be stored under a single key."
)
self.update({key: value})
elif utils.iterable_of_hashable(key):
keylist = list(key)
if len(keylist) == 0:
raise ValueError("Empty list of variables to be set")
if len(keylist) == 1:
self.update({keylist[0]: value})
else:
if len(keylist) != len(value):
raise ValueError(
f"Different lengths of variables to be set "
f"({len(keylist)}) and data used as input for "
f"setting ({len(value)})"
)
if isinstance(value, Dataset):
self.update(
dict(zip(keylist, value.data_vars.values(), strict=True))
)
elif isinstance(value, DataArray):
raise ValueError("Cannot assign single DataArray to multiple keys")
else:
self.update(dict(zip(keylist, value, strict=True)))
else:
raise ValueError(f"Unsupported key-type {type(key)}")
def _setitem_check(self, key, value):
"""Consistency check for __setitem__
When assigning values to a subset of a Dataset, do consistency check beforehand
to avoid leaving the dataset in a partially updated state when an error occurs.
"""
from xarray.core.dataarray import DataArray
if isinstance(value, Dataset):
missing_vars = [
name for name in value.data_vars if name not in self.data_vars
]
if missing_vars:
raise ValueError(
f"Variables {missing_vars} in new values"
f" not available in original dataset:\n{self}"
)
elif not any(isinstance(value, t) for t in [DataArray, Number, str]):
raise TypeError(
"Dataset assignment only accepts DataArrays, Datasets, and scalars."
)
new_value = Dataset()
for name, var in self.items():
# test indexing
try:
var_k = var[key]
except Exception as e:
raise ValueError(
f"Variable '{name}': indexer {key} not available"
) from e
if isinstance(value, Dataset):
val = value[name]
else:
val = value
if isinstance(val, DataArray):
# check consistency of dimensions
for dim in val.dims:
if dim not in var_k.dims:
raise KeyError(
f"Variable '{name}': dimension '{dim}' appears in new values "
f"but not in the indexed original data"
)
dims = tuple(dim for dim in var_k.dims if dim in val.dims)
if dims != val.dims:
raise ValueError(
f"Variable '{name}': dimension order differs between"
f" original and new data:\n{dims}\nvs.\n{val.dims}"
)
else:
val = np.array(val)
# type conversion
new_value[name] = duck_array_ops.astype(val, dtype=var_k.dtype, copy=False)
# check consistency of dimension sizes and dimension coordinates
if isinstance(value, DataArray | Dataset):
align(self[key], value, join="exact", copy=False)
return new_value
def __delitem__(self, key: Hashable) -> None:
"""Remove a variable from this dataset."""
assert_no_index_corrupted(self.xindexes, {key})
if key in self._indexes:
del self._indexes[key]
del self._variables[key]
self._coord_names.discard(key)
self._dims = calculate_dimensions(self._variables)
# mutable objects should not be hashable
# https://github.com/python/mypy/issues/4266
__hash__ = None # type: ignore[assignment]
def _all_compat(
self, other: Self, compat: str | Callable[[Variable, Variable], bool]
) -> bool:
"""Helper function for equals and identical"""
if not callable(compat):
compat_str = compat
# some stores (e.g., scipy) do not seem to preserve order, so don't
# require matching order for equality
def compat(x: Variable, y: Variable) -> bool:
return getattr(x, compat_str)(y)
return self._coord_names == other._coord_names and utils.dict_equiv(
self._variables, other._variables, compat=compat
)
def broadcast_equals(self, other: Self) -> bool:
"""Two Datasets are broadcast equal if they are equal after
broadcasting all variables against each other.
For example, variables that are scalar in one dataset but non-scalar in
the other dataset can still be broadcast equal if the the non-scalar
variable is a constant.
Examples
--------
# 2D array with shape (1, 3)
>>> data = np.array([[1, 2, 3]])
>>> a = xr.Dataset(
... {"variable_name": (("space", "time"), data)},
... coords={"space": [0], "time": [0, 1, 2]},
... )
>>> a
<xarray.Dataset> Size: 56B
Dimensions: (space: 1, time: 3)
Coordinates:
* space (space) int64 8B 0
* time (time) int64 24B 0 1 2
Data variables:
variable_name (space, time) int64 24B 1 2 3
# 2D array with shape (3, 1)
>>> data = np.array([[1], [2], [3]])
>>> b = xr.Dataset(
... {"variable_name": (("time", "space"), data)},
... coords={"time": [0, 1, 2], "space": [0]},
... )
>>> b
<xarray.Dataset> Size: 56B
Dimensions: (time: 3, space: 1)
Coordinates:
* time (time) int64 24B 0 1 2
* space (space) int64 8B 0
Data variables:
variable_name (time, space) int64 24B 1 2 3
.equals returns True if two Datasets have the same values, dimensions, and coordinates. .broadcast_equals returns True if the
results of broadcasting two Datasets against each other have the same values, dimensions, and coordinates.
>>> a.equals(b)
False
>>> a.broadcast_equals(b)
True
>>> a2, b2 = xr.broadcast(a, b)
>>> a2.equals(b2)
True
See Also
--------
Dataset.equals
Dataset.identical
Dataset.broadcast
"""
try:
return self._all_compat(other, "broadcast_equals")
except (TypeError, AttributeError):
return False
def equals(self, other: Self) -> bool:
"""Two Datasets are equal if they have matching variables and
coordinates, all of which are equal.
Datasets can still be equal (like pandas objects) if they have NaN
values in the same locations.
This method is necessary because `v1 == v2` for ``Dataset``
does element-wise comparisons (like numpy.ndarrays).
Examples
--------
# 2D array with shape (1, 3)
>>> data = np.array([[1, 2, 3]])
>>> dataset1 = xr.Dataset(
... {"variable_name": (("space", "time"), data)},
... coords={"space": [0], "time": [0, 1, 2]},
... )
>>> dataset1
<xarray.Dataset> Size: 56B
Dimensions: (space: 1, time: 3)
Coordinates:
* space (space) int64 8B 0
* time (time) int64 24B 0 1 2
Data variables:
variable_name (space, time) int64 24B 1 2 3
# 2D array with shape (3, 1)
>>> data = np.array([[1], [2], [3]])
>>> dataset2 = xr.Dataset(
... {"variable_name": (("time", "space"), data)},
... coords={"time": [0, 1, 2], "space": [0]},
... )
>>> dataset2
<xarray.Dataset> Size: 56B
Dimensions: (time: 3, space: 1)
Coordinates:
* time (time) int64 24B 0 1 2
* space (space) int64 8B 0
Data variables:
variable_name (time, space) int64 24B 1 2 3
>>> dataset1.equals(dataset2)
False
>>> dataset1.broadcast_equals(dataset2)
True
.equals returns True if two Datasets have the same values, dimensions, and coordinates. .broadcast_equals returns True if the
results of broadcasting two Datasets against each other have the same values, dimensions, and coordinates.
Similar for missing values too:
>>> ds1 = xr.Dataset(
... {
... "temperature": (["x", "y"], [[1, np.nan], [3, 4]]),
... },
... coords={"x": [0, 1], "y": [0, 1]},
... )
>>> ds2 = xr.Dataset(
... {
... "temperature": (["x", "y"], [[1, np.nan], [3, 4]]),
... },
... coords={"x": [0, 1], "y": [0, 1]},
... )
>>> ds1.equals(ds2)
True
See Also
--------
Dataset.broadcast_equals
Dataset.identical
"""
try:
return self._all_compat(other, "equals")
except (TypeError, AttributeError):
return False
def identical(self, other: Self) -> bool:
"""Like equals, but also checks all dataset attributes and the
attributes on all variables and coordinates.
Example
-------
>>> a = xr.Dataset(
... {"Width": ("X", [1, 2, 3])},
... coords={"X": [1, 2, 3]},
... attrs={"units": "m"},
... )
>>> b = xr.Dataset(
... {"Width": ("X", [1, 2, 3])},
... coords={"X": [1, 2, 3]},
... attrs={"units": "m"},
... )
>>> c = xr.Dataset(
... {"Width": ("X", [1, 2, 3])},
... coords={"X": [1, 2, 3]},
... attrs={"units": "ft"},
... )
>>> a
<xarray.Dataset> Size: 48B
Dimensions: (X: 3)
Coordinates:
* X (X) int64 24B 1 2 3
Data variables:
Width (X) int64 24B 1 2 3
Attributes:
units: m
>>> b
<xarray.Dataset> Size: 48B
Dimensions: (X: 3)
Coordinates:
* X (X) int64 24B 1 2 3
Data variables:
Width (X) int64 24B 1 2 3
Attributes:
units: m
>>> c
<xarray.Dataset> Size: 48B
Dimensions: (X: 3)
Coordinates:
* X (X) int64 24B 1 2 3
Data variables:
Width (X) int64 24B 1 2 3
Attributes:
units: ft
>>> a.equals(b)
True
>>> a.identical(b)
True
>>> a.equals(c)
True
>>> a.identical(c)
False
See Also
--------
Dataset.broadcast_equals
Dataset.equals
"""
try:
return utils.dict_equiv(self.attrs, other.attrs) and self._all_compat(
other, "identical"
)
except (TypeError, AttributeError):
return False
@property
def indexes(self) -> Indexes[pd.Index]:
"""Mapping of pandas.Index objects used for label based indexing.
Raises an error if this Dataset has indexes that cannot be coerced
to pandas.Index objects.
See Also
--------
Dataset.xindexes
"""
return self.xindexes.to_pandas_indexes()
@property
def xindexes(self) -> Indexes[Index]:
"""Mapping of :py:class:`~xarray.indexes.Index` objects
used for label based indexing.
"""
return Indexes(self._indexes, {k: self._variables[k] for k in self._indexes})
@property
def coords(self) -> DatasetCoordinates:
"""Mapping of :py:class:`~xarray.DataArray` objects corresponding to
coordinate variables.
See Also
--------
Coordinates
"""
return DatasetCoordinates(self)
@property
def data_vars(self) -> DataVariables:
"""Dictionary of DataArray objects corresponding to data variables"""
return DataVariables(self)
def set_coords(self, names: Hashable | Iterable[Hashable]) -> Self:
"""Given names of one or more variables, set them as coordinates
Parameters
----------
names : hashable or iterable of hashable
Name(s) of variables in this dataset to convert into coordinates.
Examples
--------
>>> dataset = xr.Dataset(
... {
... "pressure": ("time", [1.013, 1.2, 3.5]),
... "time": pd.date_range("2023-01-01", periods=3),
... }
... )
>>> dataset
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2023-01-01 2023-01-02 2023-01-03
Data variables:
pressure (time) float64 24B 1.013 1.2 3.5
>>> dataset.set_coords("pressure")
<xarray.Dataset> Size: 48B
Dimensions: (time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2023-01-01 2023-01-02 2023-01-03
pressure (time) float64 24B 1.013 1.2 3.5
Data variables:
*empty*
On calling ``set_coords`` , these data variables are converted to coordinates, as shown in the final dataset.
Returns
-------
Dataset
See Also
--------
Dataset.swap_dims
Dataset.assign_coords
"""
# TODO: allow inserting new coordinates with this method, like
# DataFrame.set_index?
# nb. check in self._variables, not self.data_vars to insure that the
# operation is idempotent
if isinstance(names, str) or not isinstance(names, Iterable):
names = [names]
else:
names = list(names)
self._assert_all_in_dataset(names)
obj = self.copy()
obj._coord_names.update(names)
return obj
def reset_coords(
self,
names: Dims = None,
drop: bool = False,
) -> Self:
"""Given names of coordinates, reset them to become variables
Parameters
----------
names : str, Iterable of Hashable or None, optional
Name(s) of non-index coordinates in this dataset to reset into
variables. By default, all non-index coordinates are reset.
drop : bool, default: False
If True, remove coordinates instead of converting them into
variables.
Examples
--------
>>> dataset = xr.Dataset(
... {
... "temperature": (
... ["time", "lat", "lon"],
... [[[25, 26], [27, 28]], [[29, 30], [31, 32]]],
... ),
... "precipitation": (
... ["time", "lat", "lon"],
... [[[0.5, 0.8], [0.2, 0.4]], [[0.3, 0.6], [0.7, 0.9]]],
... ),
... },
... coords={
... "time": pd.date_range(start="2023-01-01", periods=2),
... "lat": [40, 41],
... "lon": [-80, -79],
... "altitude": 1000,
... },
... )
# Dataset before resetting coordinates
>>> dataset
<xarray.Dataset> Size: 184B
Dimensions: (time: 2, lat: 2, lon: 2)
Coordinates:
* time (time) datetime64[ns] 16B 2023-01-01 2023-01-02
* lat (lat) int64 16B 40 41
* lon (lon) int64 16B -80 -79
altitude int64 8B 1000
Data variables:
temperature (time, lat, lon) int64 64B 25 26 27 28 29 30 31 32
precipitation (time, lat, lon) float64 64B 0.5 0.8 0.2 0.4 0.3 0.6 0.7 0.9
# Reset the 'altitude' coordinate
>>> dataset_reset = dataset.reset_coords("altitude")
# Dataset after resetting coordinates
>>> dataset_reset
<xarray.Dataset> Size: 184B
Dimensions: (time: 2, lat: 2, lon: 2)
Coordinates:
* time (time) datetime64[ns] 16B 2023-01-01 2023-01-02
* lat (lat) int64 16B 40 41
* lon (lon) int64 16B -80 -79
Data variables:
temperature (time, lat, lon) int64 64B 25 26 27 28 29 30 31 32
precipitation (time, lat, lon) float64 64B 0.5 0.8 0.2 0.4 0.3 0.6 0.7 0.9
altitude int64 8B 1000
Returns
-------
Dataset
See Also
--------
Dataset.set_coords
"""
if names is None:
names = self._coord_names - set(self._indexes)
else:
if isinstance(names, str) or not isinstance(names, Iterable):
names = [names]
else:
names = list(names)
self._assert_all_in_dataset(names)
bad_coords = set(names) & set(self._indexes)
if bad_coords:
raise ValueError(
f"cannot remove index coordinates with reset_coords: {bad_coords}"
)
obj = self.copy()
obj._coord_names.difference_update(names)
if drop:
for name in names:
del obj._variables[name]
return obj
def dump_to_store(self, store: AbstractDataStore, **kwargs) -> None:
"""Store dataset contents to a backends.*DataStore object."""
from xarray.backends.writers import dump_to_store
# TODO: rename and/or cleanup this method to make it more consistent
# with to_netcdf()
dump_to_store(self, store, **kwargs)
# path=None writes to bytes
@overload
def to_netcdf(
self,
path: None = None,
mode: NetcdfWriteModes = "w",
format: T_NetcdfTypes | None = None,
group: str | None = None,
engine: T_NetcdfEngine | None = None,
encoding: Mapping[Any, Mapping[str, Any]] | None = None,
unlimited_dims: Iterable[Hashable] | None = None,
compute: bool = True,
invalid_netcdf: bool = False,
auto_complex: bool | None = None,
) -> memoryview: ...
# compute=False returns dask.Delayed
@overload
def to_netcdf(
self,
path: str | PathLike,
mode: NetcdfWriteModes = "w",
format: T_NetcdfTypes | None = None,
group: str | None = None,
engine: T_NetcdfEngine | None = None,
encoding: Mapping[Any, Mapping[str, Any]] | None = None,
unlimited_dims: Iterable[Hashable] | None = None,
*,
compute: Literal[False],
invalid_netcdf: bool = False,
auto_complex: bool | None = None,
) -> Delayed: ...
# default return None
@overload
def to_netcdf(
self,
path: str | PathLike | io.IOBase,
mode: NetcdfWriteModes = "w",
format: T_NetcdfTypes | None = None,
group: str | None = None,
engine: T_NetcdfEngine | None = None,
encoding: Mapping[Any, Mapping[str, Any]] | None = None,
unlimited_dims: Iterable[Hashable] | None = None,
compute: Literal[True] = True,
invalid_netcdf: bool = False,
auto_complex: bool | None = None,
) -> None: ...
# if compute cannot be evaluated at type check time
# we may get back either Delayed or None
@overload
def to_netcdf(
self,
path: str | PathLike,
mode: NetcdfWriteModes = "w",
format: T_NetcdfTypes | None = None,
group: str | None = None,
engine: T_NetcdfEngine | None = None,
encoding: Mapping[Any, Mapping[str, Any]] | None = None,
unlimited_dims: Iterable[Hashable] | None = None,
compute: bool = True,
invalid_netcdf: bool = False,
auto_complex: bool | None = None,
) -> Delayed | None: ...
def to_netcdf(
self,
path: str | PathLike | io.IOBase | None = None,
mode: NetcdfWriteModes = "w",
format: T_NetcdfTypes | None = None,
group: str | None = None,
engine: T_NetcdfEngine | None = None,
encoding: Mapping[Any, Mapping[str, Any]] | None = None,
unlimited_dims: Iterable[Hashable] | None = None,
compute: bool = True,
invalid_netcdf: bool = False,
auto_complex: bool | None = None,
) -> memoryview | Delayed | None:
"""Write dataset contents to a netCDF file.
Parameters
----------
path : str, path-like, file-like or None, optional
Path to which to save this datatree, or a file-like object to write
it to (which must support read and write and be seekable) or None
(default) to return in-memory bytes as a memoryview.
mode : {"w", "a"}, default: "w"
Write ('w') or append ('a') mode. If mode='w', any existing file at
this location will be overwritten. If mode='a', existing variables
will be overwritten.
format : {"NETCDF4", "NETCDF4_CLASSIC", "NETCDF3_64BIT", \
"NETCDF3_CLASSIC"}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatible API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xarray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
format='NETCDF4'). The group(s) will be created if necessary.
engine : {"netcdf4", "h5netcdf", "scipy"}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, by default
preferring "netcdf4" over "h5netcdf" over "scipy" (customizable via
``netcdf_engine_order`` in ``xarray.set_options()``).
encoding : dict, optional
Nested dictionary with variable names as keys and dictionaries of
variable specific encodings as values, e.g.,
``{"my_variable": {"dtype": "int16", "scale_factor": 0.1,
"zlib": True}, ...}``.
If ``encoding`` is specified the original encoding of the variables of
the dataset is ignored.
The `h5netcdf` engine supports both the NetCDF4-style compression
encoding parameters ``{"zlib": True, "complevel": 9}`` and the h5py
ones ``{"compression": "gzip", "compression_opts": 9}``.
This allows using any compression plugin installed in the HDF5
library, e.g. LZF.
unlimited_dims : iterable of hashable, optional
Dimension(s) that should be serialized as unlimited dimensions.
By default, no dimensions are treated as unlimited dimensions.
Note that unlimited_dims may also be set via
``dataset.encoding["unlimited_dims"]``.
compute: bool, default: True
If true compute immediately, otherwise return a
``dask.delayed.Delayed`` object that can be computed later.
invalid_netcdf: bool, default: False
Only valid along with ``engine="h5netcdf"``. If True, allow writing
hdf5 files which are invalid netcdf as described in
https://github.com/h5netcdf/h5netcdf.
Returns
-------
* ``memoryview`` if path is None
* ``dask.delayed.Delayed`` if compute is False
* ``None`` otherwise
See Also
--------
DataArray.to_netcdf
"""
if encoding is None:
encoding = {}
from xarray.backends.writers import to_netcdf
return to_netcdf( # type: ignore[return-value] # mypy cannot resolve the overloads:(
self,
path,
mode=mode,
format=format,
group=group,
engine=engine,
encoding=encoding,
unlimited_dims=unlimited_dims,
compute=compute,
multifile=False,
invalid_netcdf=invalid_netcdf,
auto_complex=auto_complex,
)
# compute=True (default) returns ZarrStore
@overload
def to_zarr(
self,
store: ZarrStoreLike | None = None,
chunk_store: MutableMapping | str | PathLike | None = None,
mode: ZarrWriteModes | None = None,
synchronizer=None,
group: str | None = None,
encoding: Mapping | None = None,
*,
compute: Literal[True] = True,
consolidated: bool | None = None,
append_dim: Hashable | None = None,
region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None,
safe_chunks: bool = True,
align_chunks: bool = False,
storage_options: dict[str, str] | None = None,
zarr_version: int | None = None,
zarr_format: int | None = None,
write_empty_chunks: bool | None = None,
chunkmanager_store_kwargs: dict[str, Any] | None = None,
) -> ZarrStore: ...
# compute=False returns dask.Delayed
@overload
def to_zarr(
self,
store: ZarrStoreLike | None = None,
chunk_store: MutableMapping | str | PathLike | None = None,
mode: ZarrWriteModes | None = None,
synchronizer=None,
group: str | None = None,
encoding: Mapping | None = None,
*,
compute: Literal[False],
consolidated: bool | None = None,
append_dim: Hashable | None = None,
region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None,
safe_chunks: bool = True,
align_chunks: bool = False,
storage_options: dict[str, str] | None = None,
zarr_version: int | None = None,
zarr_format: int | None = None,
write_empty_chunks: bool | None = None,
chunkmanager_store_kwargs: dict[str, Any] | None = None,
) -> Delayed: ...
def to_zarr(
self,
store: ZarrStoreLike | None = None,
chunk_store: MutableMapping | str | PathLike | None = None,
mode: ZarrWriteModes | None = None,
synchronizer=None,
group: str | None = None,
encoding: Mapping | None = None,
*,
compute: bool = True,
consolidated: bool | None = None,
append_dim: Hashable | None = None,
region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None,
safe_chunks: bool = True,
align_chunks: bool = False,
storage_options: dict[str, str] | None = None,
zarr_version: int | None = None,
zarr_format: int | None = None,
write_empty_chunks: bool | None = None,
chunkmanager_store_kwargs: dict[str, Any] | None = None,
) -> ZarrStore | Delayed:
"""Write dataset contents to a zarr group.
Zarr chunks are determined in the following way:
- From the ``chunks`` attribute in each variable's ``encoding``
(can be set via `Dataset.chunk`).
- If the variable is a Dask array, from the dask chunks
- If neither Dask chunks nor encoding chunks are present, chunks will
be determined automatically by Zarr
- If both Dask chunks and encoding chunks are present, encoding chunks
will be used, provided that there is a many-to-one relationship between
encoding chunks and dask chunks (i.e. Dask chunks are bigger than and
evenly divide encoding chunks); otherwise raise a ``ValueError``.
This restriction ensures that no synchronization / locks are required
when writing. To disable this restriction, use ``safe_chunks=False``.
Parameters
----------
store : zarr.storage.StoreLike, optional
Store or path to directory in local or remote file system.
chunk_store : MutableMapping, str or path-like, optional
Store or path to directory in local or remote file system only for Zarr
array chunks. Requires zarr-python v2.4.0 or later.
mode : {"w", "w-", "a", "a-", r+", None}, optional
Persistence mode: "w" means create (overwrite if exists);
"w-" means create (fail if exists);
"a" means override all existing variables including dimension coordinates (create if does not exist);
"a-" means only append those variables that have ``append_dim``.
"r+" means modify existing array *values* only (raise an error if
any metadata or shapes would change).
The default mode is "a" if ``append_dim`` is set. Otherwise, it is
"r+" if ``region`` is set and ``w-`` otherwise.
synchronizer : object, optional
Zarr array synchronizer.
group : str, optional
Group path. (a.k.a. `path` in zarr terminology.)
encoding : dict, optional
Nested dictionary with variable names as keys and dictionaries of
variable specific encodings as values, e.g.,
``{"my_variable": {"dtype": "int16", "scale_factor": 0.1,}, ...}``
compute : bool, default: True
If True write array data immediately, otherwise return a
``dask.delayed.Delayed`` object that can be computed to write
array data later. Metadata is always updated eagerly.
consolidated : bool, optional
If True, apply :func:`zarr.convenience.consolidate_metadata`
after writing metadata and read existing stores with consolidated
metadata; if False, do not. The default (`consolidated=None`) means
write consolidated metadata and attempt to read consolidated
metadata for existing stores (falling back to non-consolidated).
When the experimental ``zarr_version=3``, ``consolidated`` must be
either be ``None`` or ``False``.
append_dim : hashable, optional
If set, the dimension along which the data will be appended. All
other dimensions on overridden variables must remain the same size.
region : dict or "auto", optional
Optional mapping from dimension names to either a) ``"auto"``, or b) integer
slices, indicating the region of existing zarr array(s) in which to write
this dataset's data.
If ``"auto"`` is provided the existing store will be opened and the region
inferred by matching indexes. ``"auto"`` can be used as a single string,
which will automatically infer the region for all dimensions, or as
dictionary values for specific dimensions mixed together with explicit
slices for other dimensions.
Alternatively integer slices can be provided; for example, ``{'x': slice(0,
1000), 'y': slice(10000, 11000)}`` would indicate that values should be
written to the region ``0:1000`` along ``x`` and ``10000:11000`` along
``y``.
Two restrictions apply to the use of ``region``:
- If ``region`` is set, _all_ variables in a dataset must have at
least one dimension in common with the region. Other variables
should be written in a separate single call to ``to_zarr()``.
- Dimensions cannot be included in both ``region`` and
``append_dim`` at the same time. To create empty arrays to fill
in with ``region``, use a separate call to ``to_zarr()`` with
``compute=False``. See "Modifying existing Zarr stores" in
the reference documentation for full details.
Users are expected to ensure that the specified region aligns with
Zarr chunk boundaries, and that dask chunks are also aligned.
Xarray makes limited checks that these multiple chunk boundaries line up.
It is possible to write incomplete chunks and corrupt the data with this
option if you are not careful.
safe_chunks : bool, default: True
If True, only allow writes to when there is a many-to-one relationship
between Zarr chunks (specified in encoding) and Dask chunks.
Set False to override this restriction; however, data may become corrupted
if Zarr arrays are written in parallel. This option may be useful in combination
with ``compute=False`` to initialize a Zarr from an existing
Dataset with arbitrary chunk structure.
In addition to the many-to-one relationship validation, it also detects partial
chunks writes when using the region parameter,
these partial chunks are considered unsafe in the mode "r+" but safe in
the mode "a".
Note: Even with these validations it can still be unsafe to write
two or more chunked arrays in the same location in parallel if they are
not writing in independent regions, for those cases it is better to use
a synchronizer.
align_chunks: bool, default False
If True, rechunks the Dask array to align with Zarr chunks before writing.
This ensures each Dask chunk maps to one or more contiguous Zarr chunks,
which avoids race conditions.
Internally, the process sets safe_chunks=False and tries to preserve
the original Dask chunking as much as possible.
Note: While this alignment avoids write conflicts stemming from chunk
boundary misalignment, it does not protect against race conditions
if multiple uncoordinated processes write to the same
Zarr array concurrently.
storage_options : dict, optional
Any additional parameters for the storage backend (ignored for local
paths).
zarr_version : int or None, optional
.. deprecated:: 2024.9.1
Use ``zarr_format`` instead.
zarr_format : int or None, optional
The desired zarr format to target (currently 2 or 3). The default
of None will attempt to determine the zarr version from ``store`` when
possible, otherwise defaulting to the default version used by
the zarr-python library installed.
write_empty_chunks : bool or None, optional
If True, all chunks will be stored regardless of their
contents. If False, each chunk is compared to the array's fill value
prior to storing. If a chunk is uniformly equal to the fill value, then
that chunk is not be stored, and the store entry for that chunk's key
is deleted. This setting enables sparser storage, as only chunks with
non-fill-value data are stored, at the expense of overhead associated
with checking the data of each chunk. If None (default) fall back to
specification(s) in ``encoding`` or Zarr defaults. A ``ValueError``
will be raised if the value of this (if not None) differs with
``encoding``.
chunkmanager_store_kwargs : dict, optional
Additional keyword arguments passed on to the `ChunkManager.store` method used to store
chunked arrays. For example for a dask array additional kwargs will be passed eventually to
:py:func:`dask.array.store()`. Experimental API that should not be relied upon.
Returns
-------
* ``dask.delayed.Delayed`` if compute is False
* ZarrStore otherwise
References
----------
https://zarr.readthedocs.io/
Notes
-----
Zarr chunking behavior:
If chunks are found in the encoding argument or attribute
corresponding to any DataArray, those chunks are used.
If a DataArray is a dask array, it is written with those chunks.
If not other chunks are found, Zarr uses its own heuristics to
choose automatic chunk sizes.
encoding:
The encoding attribute (if exists) of the DataArray(s) will be
used. Override any existing encodings by providing the ``encoding`` kwarg.
``fill_value`` handling:
There exists a subtlety in interpreting zarr's ``fill_value`` property.
For Zarr v2 format arrays, ``fill_value`` is *always* interpreted as an
invalid value similar to the ``_FillValue`` attribute in CF/netCDF.
For Zarr v3 format arrays, only an explicit ``_FillValue`` attribute
will be used to mask the data if requested using ``mask_and_scale=True``.
To customize the fill value Zarr uses as a default for unwritten
chunks on disk, set ``_FillValue`` in encoding for Zarr v2 or
``fill_value`` for Zarr v3.
See this `Github issue <https://github.com/pydata/xarray/issues/5475>`_
for more.
See Also
--------
:ref:`io.zarr`
The I/O user guide, with more details and examples.
"""
from xarray.backends.writers import to_zarr
return to_zarr( # type: ignore[call-overload,misc]
self,
store=store,
chunk_store=chunk_store,
storage_options=storage_options,
mode=mode,
synchronizer=synchronizer,
group=group,
encoding=encoding,
compute=compute,
consolidated=consolidated,
append_dim=append_dim,
region=region,
safe_chunks=safe_chunks,
align_chunks=align_chunks,
zarr_version=zarr_version,
zarr_format=zarr_format,
write_empty_chunks=write_empty_chunks,
chunkmanager_store_kwargs=chunkmanager_store_kwargs,
)
def __repr__(self) -> str:
return formatting.dataset_repr(self)
def _repr_html_(self) -> str:
if OPTIONS["display_style"] == "text":
return f"<pre>{escape(repr(self))}</pre>"
return formatting_html.dataset_repr(self)
def info(self, buf: IO | None = None) -> None:
"""
Concise summary of a Dataset variables and attributes.
Parameters
----------
buf : file-like, default: sys.stdout
writable buffer
See Also
--------
pandas.DataFrame.assign
ncdump : netCDF's ncdump
"""
if buf is None: # pragma: no cover
buf = sys.stdout
lines = [
"xarray.Dataset {",
"dimensions:",
]
for name, size in self.sizes.items():
lines.append(f"\t{name} = {size} ;")
lines.append("\nvariables:")
for name, da in self.variables.items():
dims = ", ".join(map(str, da.dims))
lines.append(f"\t{da.dtype} {name}({dims}) ;")
for k, v in da.attrs.items():
lines.append(f"\t\t{name}:{k} = {v} ;")
lines.append("\n// global attributes:")
for k, v in self.attrs.items():
lines.append(f"\t:{k} = {v} ;")
lines.append("}")
buf.write("\n".join(lines))
@property
def chunks(self) -> Mapping[Hashable, tuple[int, ...]]:
"""
Mapping from dimension names to block lengths for this dataset's data.
If this dataset does not contain chunked arrays, the mapping will be empty.
Cannot be modified directly, but can be modified by calling .chunk().
Same as Dataset.chunksizes, but maintained for backwards compatibility.
See Also
--------
Dataset.chunk
Dataset.chunksizes
xarray.unify_chunks
"""
return get_chunksizes(self.variables.values())
@property
def chunksizes(self) -> Mapping[Hashable, tuple[int, ...]]:
"""
Mapping from dimension names to block lengths for this dataset's data.
If this dataset does not contain chunked arrays, the mapping will be empty.
Cannot be modified directly, but can be modified by calling .chunk().
Same as Dataset.chunks.
See Also
--------
Dataset.chunk
Dataset.chunks
xarray.unify_chunks
"""
return get_chunksizes(self.variables.values())
def chunk(
self,
chunks: T_ChunksFreq = {}, # noqa: B006 # {} even though it's technically unsafe, is being used intentionally here (#4667)
name_prefix: str = "xarray-",
token: str | None = None,
lock: bool = False,
inline_array: bool = False,
chunked_array_type: str | ChunkManagerEntrypoint | None = None,
from_array_kwargs=None,
**chunks_kwargs: T_ChunkDimFreq,
) -> Self:
"""Coerce all arrays in this dataset into dask arrays with the given
chunks.
Non-dask arrays in this dataset will be converted to dask arrays. Dask
arrays will be rechunked to the given chunk sizes.
If neither chunks is not provided for one or more dimensions, chunk
sizes along that dimension will not be updated; non-dask arrays will be
converted into dask arrays with a single block.
Along datetime-like dimensions, a :py:class:`Resampler` object
(e.g. :py:class:`groupers.TimeResampler` or :py:class:`groupers.SeasonResampler`)
is also accepted.
Parameters
----------
chunks : int, tuple of int, "auto" or mapping of hashable to int or a Resampler, optional
Chunk sizes along each dimension, e.g., ``5``, ``"auto"``, or
``{"x": 5, "y": 5}`` or ``{"x": 5, "time": TimeResampler(freq="YE")}`` or
``{"time": SeasonResampler(["DJF", "MAM", "JJA", "SON"])}``.
name_prefix : str, default: "xarray-"
Prefix for the name of any new dask arrays.
token : str, optional
Token uniquely identifying this dataset.
lock : bool, default: False
Passed on to :py:func:`dask.array.from_array`, if the array is not
already as dask array.
inline_array: bool, default: False
Passed on to :py:func:`dask.array.from_array`, if the array is not
already as dask array.
chunked_array_type: str, optional
Which chunked array type to coerce this datasets' arrays to.
Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEntryPoint` system.
Experimental API that should not be relied upon.
from_array_kwargs: dict, optional
Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create
chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg.
For example, with dask as the default chunked array type, this method would pass additional kwargs
to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon.
**chunks_kwargs : {dim: chunks, ...}, optional
The keyword arguments form of ``chunks``.
One of chunks or chunks_kwargs must be provided
Returns
-------
chunked : xarray.Dataset
See Also
--------
Dataset.chunks
Dataset.chunksizes
xarray.unify_chunks
dask.array.from_array
"""
from xarray.groupers import Resampler
if chunks is None and not chunks_kwargs:
warnings.warn(
"None value for 'chunks' is deprecated. "
"It will raise an error in the future. Use instead '{}'",
category=DeprecationWarning,
stacklevel=2,
)
chunks = {}
chunks_mapping: Mapping[Any, Any]
if not isinstance(chunks, Mapping) and chunks is not None:
if isinstance(chunks, tuple | list):
utils.emit_user_level_warning(
"Supplying chunks as dimension-order tuples is deprecated. "
"It will raise an error in the future. Instead use a dict with dimensions as keys.",
category=DeprecationWarning,
)
chunks_mapping = dict.fromkeys(self.dims, chunks)
else:
chunks_mapping = either_dict_or_kwargs(chunks, chunks_kwargs, "chunk")
bad_dims = chunks_mapping.keys() - self.sizes.keys()
if bad_dims:
raise ValueError(
f"chunks keys {tuple(bad_dims)} not found in data dimensions {tuple(self.sizes.keys())}"
)
def _resolve_resampler(name: Hashable, resampler: Resampler) -> tuple[int, ...]:
variable = self._variables.get(name, None)
if variable is None:
raise ValueError(
f"Cannot chunk by resampler {resampler!r} for virtual variable {name!r}."
)
if variable.ndim != 1:
raise ValueError(
f"chunks={resampler!r} only supported for 1D variables. "
f"Received variable {name!r} with {variable.ndim} dimensions instead."
)
newchunks = resampler.compute_chunks(variable, dim=name)
if sum(newchunks) != variable.shape[0]:
raise ValueError(
f"Logic bug in rechunking variable {name!r} using {resampler!r}. "
"New chunks tuple does not match size of data. Please open an issue."
)
return newchunks
chunks_mapping_ints: Mapping[Any, T_ChunkDim] = {
name: (
_resolve_resampler(name, chunks)
if isinstance(chunks, Resampler)
else chunks
)
for name, chunks in chunks_mapping.items()
}
chunkmanager = guess_chunkmanager(chunked_array_type)
if from_array_kwargs is None:
from_array_kwargs = {}
variables = {
k: _maybe_chunk(
k,
v,
chunks_mapping_ints,
token,
lock,
name_prefix,
inline_array=inline_array,
chunked_array_type=chunkmanager,
from_array_kwargs=from_array_kwargs.copy(),
)
for k, v in self.variables.items()
}
return self._replace(variables)
def _validate_indexers(
self, indexers: Mapping[Any, Any], missing_dims: ErrorOptionsWithWarn = "raise"
) -> Iterator[tuple[Hashable, int | slice | np.ndarray | Variable]]:
"""Here we make sure
+ indexer has a valid keys
+ indexer is in a valid data type
+ string indexers are cast to the appropriate date type if the
associated index is a DatetimeIndex or CFTimeIndex
"""
from xarray.core.dataarray import DataArray
indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims)
# all indexers should be int, slice, np.ndarrays, or Variable
for k, v in indexers.items():
if isinstance(v, int | slice | Variable) and not isinstance(v, bool):
yield k, v
elif isinstance(v, DataArray):
yield k, v.variable
elif isinstance(v, tuple):
yield k, as_variable(v)
elif isinstance(v, Dataset):
raise TypeError("cannot use a Dataset as an indexer")
elif isinstance(v, Sequence) and len(v) == 0:
yield k, np.empty((0,), dtype="int64")
else:
if not is_duck_array(v):
v = np.asarray(v)
if v.dtype.kind in "US":
index = self._indexes[k].to_pandas_index()
if isinstance(index, pd.DatetimeIndex):
v = duck_array_ops.astype(v, dtype="datetime64[ns]")
elif isinstance(index, CFTimeIndex):
v = _parse_array_of_cftime_strings(v, index.date_type)
if v.ndim > 1:
raise IndexError(
"Unlabeled multi-dimensional array cannot be "
f"used for indexing: {k}"
)
yield k, v
def _validate_interp_indexers(
self, indexers: Mapping[Any, Any]
) -> Iterator[tuple[Hashable, Variable]]:
"""Variant of _validate_indexers to be used for interpolation"""
for k, v in self._validate_indexers(indexers):
if isinstance(v, Variable):
yield k, v
elif is_scalar(v):
yield k, Variable((), v, attrs=self.coords[k].attrs)
elif isinstance(v, np.ndarray):
yield k, Variable(dims=(k,), data=v, attrs=self.coords[k].attrs)
else:
raise TypeError(type(v))
def _get_indexers_coords_and_indexes(self, indexers):
"""Extract coordinates and indexes from indexers.
Only coordinate with a name different from any of self.variables will
be attached.
"""
from xarray.core.dataarray import DataArray
coords_list = []
for k, v in indexers.items():
if isinstance(v, DataArray):
if v.dtype.kind == "b":
if v.ndim != 1: # we only support 1-d boolean array
raise ValueError(
f"{v.ndim:d}d-boolean array is used for indexing along "
f"dimension {k!r}, but only 1d boolean arrays are "
"supported."
)
# Make sure in case of boolean DataArray, its
# coordinate also should be indexed.
v_coords = v[v.values.nonzero()[0]].coords
else:
v_coords = v.coords
coords_list.append(v_coords)
# we don't need to call align() explicitly or check indexes for
# alignment, because merge_variables already checks for exact alignment
# between dimension coordinates
coords, indexes = merge_coordinates_without_align(coords_list)
assert_coordinate_consistent(self, coords)
# silently drop the conflicted variables.
attached_coords = {k: v for k, v in coords.items() if k not in self._variables}
attached_indexes = {
k: v for k, v in indexes.items() if k not in self._variables
}
return attached_coords, attached_indexes
def isel(
self,
indexers: Mapping[Any, Any] | None = None,
drop: bool = False,
missing_dims: ErrorOptionsWithWarn = "raise",
**indexers_kwargs: Any,
) -> Self:
"""Returns a new dataset with each array indexed along the specified
dimension(s).
This method selects values from each array using its `__getitem__`
method, except this method does not require knowing the order of
each array's dimensions.
Parameters
----------
indexers : dict, optional
A dict with keys matching dimensions and values given
by integers, slice objects or arrays.
indexer can be a integer, slice, array-like or DataArray.
If DataArrays are passed as indexers, xarray-style indexing will be
carried out. See :ref:`indexing` for the details.
One of indexers or indexers_kwargs must be provided.
drop : bool, default: False
If ``drop=True``, drop coordinates variables indexed by integers
instead of making them scalar.
missing_dims : {"raise", "warn", "ignore"}, default: "raise"
What to do if dimensions that should be selected from are not present in the
Dataset:
- "raise": raise an exception
- "warn": raise a warning, and ignore the missing dimensions
- "ignore": ignore the missing dimensions
**indexers_kwargs : {dim: indexer, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
array and dimension is indexed by the appropriate indexers.
If indexer DataArrays have coordinates that do not conflict with
this object, then these coordinates will be attached.
In general, each array's data will be a view of the array's data
in this dataset, unless vectorized indexing was triggered by using
an array indexer, in which case the data will be a copy.
Examples
--------
>>> dataset = xr.Dataset(
... {
... "math_scores": (
... ["student", "test"],
... [[90, 85, 92], [78, 80, 85], [95, 92, 98]],
... ),
... "english_scores": (
... ["student", "test"],
... [[88, 90, 92], [75, 82, 79], [93, 96, 91]],
... ),
... },
... coords={
... "student": ["Alice", "Bob", "Charlie"],
... "test": ["Test 1", "Test 2", "Test 3"],
... },
... )
# A specific element from the dataset is selected
>>> dataset.isel(student=1, test=0)
<xarray.Dataset> Size: 68B
Dimensions: ()
Coordinates:
student <U7 28B 'Bob'
test <U6 24B 'Test 1'
Data variables:
math_scores int64 8B 78
english_scores int64 8B 75
# Indexing with a slice using isel
>>> slice_of_data = dataset.isel(student=slice(0, 2), test=slice(0, 2))
>>> slice_of_data
<xarray.Dataset> Size: 168B
Dimensions: (student: 2, test: 2)
Coordinates:
* student (student) <U7 56B 'Alice' 'Bob'
* test (test) <U6 48B 'Test 1' 'Test 2'
Data variables:
math_scores (student, test) int64 32B 90 85 78 80
english_scores (student, test) int64 32B 88 90 75 82
>>> index_array = xr.DataArray([0, 2], dims="student")
>>> indexed_data = dataset.isel(student=index_array)
>>> indexed_data
<xarray.Dataset> Size: 224B
Dimensions: (student: 2, test: 3)
Coordinates:
* student (student) <U7 56B 'Alice' 'Charlie'
* test (test) <U6 72B 'Test 1' 'Test 2' 'Test 3'
Data variables:
math_scores (student, test) int64 48B 90 85 92 95 92 98
english_scores (student, test) int64 48B 88 90 92 93 96 91
See Also
--------
:func:`Dataset.sel <Dataset.sel>`
:func:`DataArray.isel <DataArray.isel>`
:doc:`xarray-tutorial:intermediate/indexing/indexing`
Tutorial material on indexing with Xarray objects
:doc:`xarray-tutorial:fundamentals/02.1_indexing_Basic`
Tutorial material on basics of indexing
"""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "isel")
if any(is_fancy_indexer(idx) for idx in indexers.values()):
return self._isel_fancy(indexers, drop=drop, missing_dims=missing_dims)
# Much faster algorithm for when all indexers are ints, slices, one-dimensional
# lists, or zero or one-dimensional np.ndarray's
indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims)
variables = {}
dims: dict[Hashable, int] = {}
coord_names = self._coord_names.copy()
indexes, index_variables = isel_indexes(self.xindexes, indexers)
for name, var in self._variables.items():
# preserve variable order
if name in index_variables:
var = index_variables[name]
else:
var_indexers = {k: v for k, v in indexers.items() if k in var.dims}
if var_indexers:
var = var.isel(var_indexers)
if drop and var.ndim == 0 and name in coord_names:
coord_names.remove(name)
continue
variables[name] = var
dims.update(zip(var.dims, var.shape, strict=True))
return self._construct_direct(
variables=variables,
coord_names=coord_names,
dims=dims,
attrs=self._attrs,
indexes=indexes,
encoding=self._encoding,
close=self._close,
)
def _isel_fancy(
self,
indexers: Mapping[Any, Any],
*,
drop: bool,
missing_dims: ErrorOptionsWithWarn = "raise",
) -> Self:
valid_indexers = dict(self._validate_indexers(indexers, missing_dims))
variables: dict[Hashable, Variable] = {}
indexes, index_variables = isel_indexes(self.xindexes, valid_indexers)
for name, var in self.variables.items():
if name in index_variables:
new_var = index_variables[name]
else:
var_indexers = {
k: v for k, v in valid_indexers.items() if k in var.dims
}
if var_indexers:
new_var = var.isel(indexers=var_indexers)
# drop scalar coordinates
# https://github.com/pydata/xarray/issues/6554
if name in self.coords and drop and new_var.ndim == 0:
continue
else:
new_var = var.copy(deep=False)
if name not in indexes:
new_var = new_var.to_base_variable()
variables[name] = new_var
coord_names = self._coord_names & variables.keys()
selected = self._replace_with_new_dims(variables, coord_names, indexes)
# Extract coordinates from indexers
coord_vars, new_indexes = selected._get_indexers_coords_and_indexes(indexers)
variables.update(coord_vars)
indexes.update(new_indexes)
coord_names = self._coord_names & variables.keys() | coord_vars.keys()
return self._replace_with_new_dims(variables, coord_names, indexes=indexes)
def sel(
self,
indexers: Mapping[Any, Any] | None = None,
method: str | None = None,
tolerance: int | float | Iterable[int | float] | None = None,
drop: bool = False,
**indexers_kwargs: Any,
) -> Self:
"""Returns a new dataset with each array indexed by tick labels
along the specified dimension(s).
In contrast to `Dataset.isel`, indexers for this method should use
labels instead of integers.
Under the hood, this method is powered by using pandas's powerful Index
objects. This makes label based indexing essentially just as fast as
using integer indexing.
It also means this method uses pandas's (well documented) logic for
indexing. This means you can use string shortcuts for datetime indexes
(e.g., '2000-01' to select all values in January 2000). It also means
that slices are treated as inclusive of both the start and stop values,
unlike normal Python indexing.
Parameters
----------
indexers : dict, optional
A dict with keys matching dimensions and values given
by scalars, slices or arrays of tick labels. For dimensions with
multi-index, the indexer may also be a dict-like object with keys
matching index level names.
If DataArrays are passed as indexers, xarray-style indexing will be
carried out. See :ref:`indexing` for the details.
One of indexers or indexers_kwargs must be provided.
method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional
Method to use for inexact matches:
* None (default): only exact matches
* pad / ffill: propagate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
drop : bool, optional
If ``drop=True``, drop coordinates variables in `indexers` instead
of making them scalar.
**indexers_kwargs : {dim: indexer, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
variable and dimension is indexed by the appropriate indexers.
If indexer DataArrays have coordinates that do not conflict with
this object, then these coordinates will be attached.
In general, each array's data will be a view of the array's data
in this dataset, unless vectorized indexing was triggered by using
an array indexer, in which case the data will be a copy.
See Also
--------
:func:`Dataset.isel <Dataset.isel>`
:func:`DataArray.sel <DataArray.sel>`
:doc:`xarray-tutorial:intermediate/indexing/indexing`
Tutorial material on indexing with Xarray objects
:doc:`xarray-tutorial:fundamentals/02.1_indexing_Basic`
Tutorial material on basics of indexing
"""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "sel")
query_results = map_index_queries(
self, indexers=indexers, method=method, tolerance=tolerance
)
if drop:
no_scalar_variables = {}
for k, v in query_results.variables.items():
if v.dims:
no_scalar_variables[k] = v
elif k in self._coord_names:
query_results.drop_coords.append(k)
query_results.variables = no_scalar_variables
result = self.isel(indexers=query_results.dim_indexers, drop=drop)
return result._overwrite_indexes(*query_results.as_tuple()[1:])
def _shuffle(self, dim, *, indices: GroupIndices, chunks: T_Chunks) -> Self:
# Shuffling is only different from `isel` for chunked arrays.
# Extract them out, and treat them specially. The rest, we route through isel.
# This makes it easy to ensure correct handling of indexes.
is_chunked = {
name: var
for name, var in self._variables.items()
if is_chunked_array(var._data)
}
subset = self[[name for name in self._variables if name not in is_chunked]]
no_slices: list[list[int]] = [
(
list(range(*idx.indices(self.sizes[dim])))
if isinstance(idx, slice)
else idx
)
for idx in indices
]
no_slices = [idx for idx in no_slices if idx]
shuffled = (
subset
if dim not in subset.dims
else subset.isel({dim: np.concatenate(no_slices)})
)
for name, var in is_chunked.items():
shuffled[name] = var._shuffle(
indices=no_slices,
dim=dim,
chunks=chunks,
)
return shuffled
def head(
self,
indexers: Mapping[Any, int] | int | None = None,
**indexers_kwargs: Any,
) -> Self:
"""Returns a new dataset with the first `n` values of each array
for the specified dimension(s).
Parameters
----------
indexers : dict or int, default: 5
A dict with keys matching dimensions and integer values `n`
or a single integer `n` applied over all dimensions.
One of indexers or indexers_kwargs must be provided.
**indexers_kwargs : {dim: n, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
Examples
--------
>>> dates = pd.date_range(start="2023-01-01", periods=5)
>>> pageviews = [1200, 1500, 900, 1800, 2000]
>>> visitors = [800, 1000, 600, 1200, 1500]
>>> dataset = xr.Dataset(
... {
... "pageviews": (("date"), pageviews),
... "visitors": (("date"), visitors),
... },
... coords={"date": dates},
... )
>>> busiest_days = dataset.sortby("pageviews", ascending=False)
>>> busiest_days.head()
<xarray.Dataset> Size: 120B
Dimensions: (date: 5)
Coordinates:
* date (date) datetime64[ns] 40B 2023-01-05 2023-01-04 ... 2023-01-03
Data variables:
pageviews (date) int64 40B 2000 1800 1500 1200 900
visitors (date) int64 40B 1500 1200 1000 800 600
# Retrieve the 3 most busiest days in terms of pageviews
>>> busiest_days.head(3)
<xarray.Dataset> Size: 72B
Dimensions: (date: 3)
Coordinates:
* date (date) datetime64[ns] 24B 2023-01-05 2023-01-04 2023-01-02
Data variables:
pageviews (date) int64 24B 2000 1800 1500
visitors (date) int64 24B 1500 1200 1000
# Using a dictionary to specify the number of elements for specific dimensions
>>> busiest_days.head({"date": 3})
<xarray.Dataset> Size: 72B
Dimensions: (date: 3)
Coordinates:
* date (date) datetime64[ns] 24B 2023-01-05 2023-01-04 2023-01-02
Data variables:
pageviews (date) int64 24B 2000 1800 1500
visitors (date) int64 24B 1500 1200 1000
See Also
--------
Dataset.tail
Dataset.thin
DataArray.head
"""
if not indexers_kwargs:
if indexers is None:
indexers = 5
if not isinstance(indexers, int) and not is_dict_like(indexers):
raise TypeError("indexers must be either dict-like or a single integer")
if isinstance(indexers, int):
indexers = dict.fromkeys(self.dims, indexers)
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "head")
for k, v in indexers.items():
if not isinstance(v, int):
raise TypeError(
"expected integer type indexer for "
f"dimension {k!r}, found {type(v)!r}"
)
elif v < 0:
raise ValueError(
"expected positive integer as indexer "
f"for dimension {k!r}, found {v}"
)
indexers_slices = {k: slice(val) for k, val in indexers.items()}
return self.isel(indexers_slices)
def tail(
self,
indexers: Mapping[Any, int] | int | None = None,
**indexers_kwargs: Any,
) -> Self:
"""Returns a new dataset with the last `n` values of each array
for the specified dimension(s).
Parameters
----------
indexers : dict or int, default: 5
A dict with keys matching dimensions and integer values `n`
or a single integer `n` applied over all dimensions.
One of indexers or indexers_kwargs must be provided.
**indexers_kwargs : {dim: n, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
Examples
--------
>>> activity_names = ["Walking", "Running", "Cycling", "Swimming", "Yoga"]
>>> durations = [30, 45, 60, 45, 60] # in minutes
>>> energies = [150, 300, 250, 400, 100] # in calories
>>> dataset = xr.Dataset(
... {
... "duration": (["activity"], durations),
... "energy_expenditure": (["activity"], energies),
... },
... coords={"activity": activity_names},
... )
>>> sorted_dataset = dataset.sortby("energy_expenditure", ascending=False)
>>> sorted_dataset
<xarray.Dataset> Size: 240B
Dimensions: (activity: 5)
Coordinates:
* activity (activity) <U8 160B 'Swimming' 'Running' ... 'Yoga'
Data variables:
duration (activity) int64 40B 45 45 60 30 60
energy_expenditure (activity) int64 40B 400 300 250 150 100
# Activities with the least energy expenditures using tail()
>>> sorted_dataset.tail(3)
<xarray.Dataset> Size: 144B
Dimensions: (activity: 3)
Coordinates:
* activity (activity) <U8 96B 'Cycling' 'Walking' 'Yoga'
Data variables:
duration (activity) int64 24B 60 30 60
energy_expenditure (activity) int64 24B 250 150 100
>>> sorted_dataset.tail({"activity": 3})
<xarray.Dataset> Size: 144B
Dimensions: (activity: 3)
Coordinates:
* activity (activity) <U8 96B 'Cycling' 'Walking' 'Yoga'
Data variables:
duration (activity) int64 24B 60 30 60
energy_expenditure (activity) int64 24B 250 150 100
See Also
--------
Dataset.head
Dataset.thin
DataArray.tail
"""
if not indexers_kwargs:
if indexers is None:
indexers = 5
if not isinstance(indexers, int) and not is_dict_like(indexers):
raise TypeError("indexers must be either dict-like or a single integer")
if isinstance(indexers, int):
indexers = dict.fromkeys(self.dims, indexers)
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "tail")
for k, v in indexers.items():
if not isinstance(v, int):
raise TypeError(
"expected integer type indexer for "
f"dimension {k!r}, found {type(v)!r}"
)
elif v < 0:
raise ValueError(
"expected positive integer as indexer "
f"for dimension {k!r}, found {v}"
)
indexers_slices = {
k: slice(-val, None) if val != 0 else slice(val)
for k, val in indexers.items()
}
return self.isel(indexers_slices)
def thin(
self,
indexers: Mapping[Any, int] | int | None = None,
**indexers_kwargs: Any,
) -> Self:
"""Returns a new dataset with each array indexed along every `n`-th
value for the specified dimension(s)
Parameters
----------
indexers : dict or int
A dict with keys matching dimensions and integer values `n`
or a single integer `n` applied over all dimensions.
One of indexers or indexers_kwargs must be provided.
**indexers_kwargs : {dim: n, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
Examples
--------
>>> x_arr = np.arange(0, 26)
>>> x_arr
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25])
>>> x = xr.DataArray(
... np.reshape(x_arr, (2, 13)),
... dims=("x", "y"),
... coords={"x": [0, 1], "y": np.arange(0, 13)},
... )
>>> x_ds = xr.Dataset({"foo": x})
>>> x_ds
<xarray.Dataset> Size: 328B
Dimensions: (x: 2, y: 13)
Coordinates:
* x (x) int64 16B 0 1
* y (y) int64 104B 0 1 2 3 4 5 6 7 8 9 10 11 12
Data variables:
foo (x, y) int64 208B 0 1 2 3 4 5 6 7 8 ... 17 18 19 20 21 22 23 24 25
>>> x_ds.thin(3)
<xarray.Dataset> Size: 88B
Dimensions: (x: 1, y: 5)
Coordinates:
* x (x) int64 8B 0
* y (y) int64 40B 0 3 6 9 12
Data variables:
foo (x, y) int64 40B 0 3 6 9 12
>>> x.thin({"x": 2, "y": 5})
<xarray.DataArray (x: 1, y: 3)> Size: 24B
array([[ 0, 5, 10]])
Coordinates:
* x (x) int64 8B 0
* y (y) int64 24B 0 5 10
See Also
--------
Dataset.head
Dataset.tail
DataArray.thin
"""
if (
not indexers_kwargs
and not isinstance(indexers, int)
and not is_dict_like(indexers)
):
raise TypeError("indexers must be either dict-like or a single integer")
if isinstance(indexers, int):
indexers = dict.fromkeys(self.dims, indexers)
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "thin")
for k, v in indexers.items():
if not isinstance(v, int):
raise TypeError(
"expected integer type indexer for "
f"dimension {k!r}, found {type(v)!r}"
)
elif v < 0:
raise ValueError(
"expected positive integer as indexer "
f"for dimension {k!r}, found {v}"
)
elif v == 0:
raise ValueError("step cannot be zero")
indexers_slices = {k: slice(None, None, val) for k, val in indexers.items()}
return self.isel(indexers_slices)
def broadcast_like(
self,
other: T_DataArrayOrSet,
exclude: Iterable[Hashable] | None = None,
) -> Self:
"""Broadcast this DataArray against another Dataset or DataArray.
This is equivalent to xr.broadcast(other, self)[1]
Parameters
----------
other : Dataset or DataArray
Object against which to broadcast this array.
exclude : iterable of hashable, optional
Dimensions that must not be broadcasted
"""
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
args = align(other, self, join="outer", copy=False, exclude=exclude)
dims_map, common_coords = _get_broadcast_dims_map_common_coords(args, exclude)
return _broadcast_helper(args[1], exclude, dims_map, common_coords)
def _reindex_callback(
self,
aligner: alignment.Aligner,
dim_pos_indexers: dict[Hashable, Any],
variables: dict[Hashable, Variable],
indexes: dict[Hashable, Index],
fill_value: Any,
exclude_dims: frozenset[Hashable],
exclude_vars: frozenset[Hashable],
) -> Self:
"""Callback called from ``Aligner`` to create a new reindexed Dataset."""
new_variables = variables.copy()
new_indexes = indexes.copy()
# re-assign variable metadata
for name, new_var in new_variables.items():
var = self._variables.get(name)
if var is not None:
new_var.attrs = var.attrs
new_var.encoding = var.encoding
# pass through indexes from excluded dimensions
# no extra check needed for multi-coordinate indexes, potential conflicts
# should already have been detected when aligning the indexes
for name, idx in self._indexes.items():
var = self._variables[name]
if set(var.dims) <= exclude_dims:
new_indexes[name] = idx
new_variables[name] = var
if not dim_pos_indexers:
# fast path for no reindexing necessary
if set(new_indexes) - set(self._indexes):
# this only adds new indexes and their coordinate variables
reindexed = self._overwrite_indexes(new_indexes, new_variables)
else:
reindexed = self.copy(deep=aligner.copy)
else:
to_reindex = {
k: v
for k, v in self.variables.items()
if k not in variables and k not in exclude_vars
}
reindexed_vars = alignment.reindex_variables(
to_reindex,
dim_pos_indexers,
copy=aligner.copy,
fill_value=fill_value,
sparse=aligner.sparse,
)
new_variables.update(reindexed_vars)
new_coord_names = self._coord_names | set(new_indexes)
reindexed = self._replace_with_new_dims(
new_variables, new_coord_names, indexes=new_indexes
)
reindexed.encoding = self.encoding
return reindexed
def reindex_like(
self,
other: T_Xarray,
method: ReindexMethodOptions = None,
tolerance: float | Iterable[float] | str | None = None,
copy: bool = True,
fill_value: Any = xrdtypes.NA,
) -> Self:
"""
Conform this object onto the indexes of another object, for indexes which the
objects share. Missing values are filled with ``fill_value``. The default fill
value is NaN.
Parameters
----------
other : Dataset or DataArray
Object with an 'indexes' attribute giving a mapping from dimension
names to pandas.Index objects, which provides coordinates upon
which to index the variables in this dataset. The indexes on this
other object need not be the same as the indexes on this
dataset. Any mismatched index values will be filled in with
NaN, and any mismatched dimension names will simply be ignored.
method : {None, "nearest", "pad", "ffill", "backfill", "bfill", None}, optional
Method to use for filling index values from other not found in this
dataset:
- None (default): don't fill gaps
- "pad" / "ffill": propagate last valid index value forward
- "backfill" / "bfill": propagate next valid index value backward
- "nearest": use nearest valid index value
tolerance : float | Iterable[float] | str | None, default: None
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like must be the same size as the index and its dtype
must exactly match the index’s type.
copy : bool, default: True
If ``copy=True``, data in the return value is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed
with only slice operations, then the output may share memory with
the input. In either case, a new xarray object is always returned.
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like maps
variable names to fill values.
Returns
-------
reindexed : Dataset
Another dataset, with this dataset's data but coordinates from the
other object.
See Also
--------
Dataset.reindex
DataArray.reindex_like
align
"""
return alignment.reindex_like(
self,
other=other,
method=method,
tolerance=tolerance,
copy=copy,
fill_value=fill_value,
)
def reindex(
self,
indexers: Mapping[Any, Any] | None = None,
method: ReindexMethodOptions = None,
tolerance: float | Iterable[float] | str | None = None,
copy: bool = True,
fill_value: Any = xrdtypes.NA,
**indexers_kwargs: Any,
) -> Self:
"""Conform this object onto a new set of indexes, filling in
missing values with ``fill_value``. The default fill value is NaN.
Parameters
----------
indexers : dict, optional
Dictionary with keys given by dimension names and values given by
arrays of coordinates tick labels. Any mismatched coordinate
values will be filled in with NaN, and any mismatched dimension
names will simply be ignored.
One of indexers or indexers_kwargs must be provided.
method : {None, "nearest", "pad", "ffill", "backfill", "bfill", None}, optional
Method to use for filling index values in ``indexers`` not found in
this dataset:
- None (default): don't fill gaps
- "pad" / "ffill": propagate last valid index value forward
- "backfill" / "bfill": propagate next valid index value backward
- "nearest": use nearest valid index value
tolerance : float | Iterable[float] | str | None, default: None
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like must be the same size as the index and its dtype
must exactly match the index’s type.
copy : bool, default: True
If ``copy=True``, data in the return value is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed
with only slice operations, then the output may share memory with
the input. In either case, a new xarray object is always returned.
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like,
maps variable names (including coordinates) to fill values.
sparse : bool, default: False
use sparse-array.
**indexers_kwargs : {dim: indexer, ...}, optional
Keyword arguments in the same form as ``indexers``.
One of indexers or indexers_kwargs must be provided.
Returns
-------
reindexed : Dataset
Another dataset, with this dataset's data but replaced coordinates.
See Also
--------
Dataset.reindex_like
align
pandas.Index.get_indexer
Examples
--------
Create a dataset with some fictional data.
>>> x = xr.Dataset(
... {
... "temperature": ("station", 20 * np.random.rand(4)),
... "pressure": ("station", 500 * np.random.rand(4)),
... },
... coords={"station": ["boston", "nyc", "seattle", "denver"]},
... )
>>> x
<xarray.Dataset> Size: 176B
Dimensions: (station: 4)
Coordinates:
* station (station) <U7 112B 'boston' 'nyc' 'seattle' 'denver'
Data variables:
temperature (station) float64 32B 10.98 14.3 12.06 10.9
pressure (station) float64 32B 211.8 322.9 218.8 445.9
>>> x.indexes
Indexes:
station Index(['boston', 'nyc', 'seattle', 'denver'], dtype='object', name='station')
Create a new index and reindex the dataset. By default values in the new index that
do not have corresponding records in the dataset are assigned `NaN`.
>>> new_index = ["boston", "austin", "seattle", "lincoln"]
>>> x.reindex({"station": new_index})
<xarray.Dataset> Size: 176B
Dimensions: (station: 4)
Coordinates:
* station (station) <U7 112B 'boston' 'austin' 'seattle' 'lincoln'
Data variables:
temperature (station) float64 32B 10.98 nan 12.06 nan
pressure (station) float64 32B 211.8 nan 218.8 nan
We can fill in the missing values by passing a value to the keyword `fill_value`.
>>> x.reindex({"station": new_index}, fill_value=0)
<xarray.Dataset> Size: 176B
Dimensions: (station: 4)
Coordinates:
* station (station) <U7 112B 'boston' 'austin' 'seattle' 'lincoln'
Data variables:
temperature (station) float64 32B 10.98 0.0 12.06 0.0
pressure (station) float64 32B 211.8 0.0 218.8 0.0
We can also use different fill values for each variable.
>>> x.reindex(
... {"station": new_index}, fill_value={"temperature": 0, "pressure": 100}
... )
<xarray.Dataset> Size: 176B
Dimensions: (station: 4)
Coordinates:
* station (station) <U7 112B 'boston' 'austin' 'seattle' 'lincoln'
Data variables:
temperature (station) float64 32B 10.98 0.0 12.06 0.0
pressure (station) float64 32B 211.8 100.0 218.8 100.0
Because the index is not monotonically increasing or decreasing, we cannot use arguments
to the keyword method to fill the `NaN` values.
>>> x.reindex({"station": new_index}, method="nearest")
Traceback (most recent call last):
...
raise ValueError('index must be monotonic increasing or decreasing')
ValueError: index must be monotonic increasing or decreasing
To further illustrate the filling functionality in reindex, we will create a
dataset with a monotonically increasing index (for example, a sequence of dates).
>>> x2 = xr.Dataset(
... {
... "temperature": (
... "time",
... [15.57, 12.77, np.nan, 0.3081, 16.59, 15.12],
... ),
... "pressure": ("time", 500 * np.random.rand(6)),
... },
... coords={"time": pd.date_range("01/01/2019", periods=6, freq="D")},
... )
>>> x2
<xarray.Dataset> Size: 144B
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 48B 2019-01-01 2019-01-02 ... 2019-01-06
Data variables:
temperature (time) float64 48B 15.57 12.77 nan 0.3081 16.59 15.12
pressure (time) float64 48B 481.8 191.7 395.9 264.4 284.0 462.8
Suppose we decide to expand the dataset to cover a wider date range.
>>> time_index2 = pd.date_range("12/29/2018", periods=10, freq="D")
>>> x2.reindex({"time": time_index2})
<xarray.Dataset> Size: 240B
Dimensions: (time: 10)
Coordinates:
* time (time) datetime64[ns] 80B 2018-12-29 2018-12-30 ... 2019-01-07
Data variables:
temperature (time) float64 80B nan nan nan 15.57 ... 0.3081 16.59 15.12 nan
pressure (time) float64 80B nan nan nan 481.8 ... 264.4 284.0 462.8 nan
The index entries that did not have a value in the original data frame (for example, `2018-12-29`)
are by default filled with NaN. If desired, we can fill in the missing values using one of several options.
For example, to back-propagate the last valid value to fill the `NaN` values,
pass `bfill` as an argument to the `method` keyword.
>>> x3 = x2.reindex({"time": time_index2}, method="bfill")
>>> x3
<xarray.Dataset> Size: 240B
Dimensions: (time: 10)
Coordinates:
* time (time) datetime64[ns] 80B 2018-12-29 2018-12-30 ... 2019-01-07
Data variables:
temperature (time) float64 80B 15.57 15.57 15.57 15.57 ... 16.59 15.12 nan
pressure (time) float64 80B 481.8 481.8 481.8 481.8 ... 284.0 462.8 nan
Please note that the `NaN` value present in the original dataset (at index value `2019-01-03`)
will not be filled by any of the value propagation schemes.
>>> x2.where(x2.temperature.isnull(), drop=True)
<xarray.Dataset> Size: 24B
Dimensions: (time: 1)
Coordinates:
* time (time) datetime64[ns] 8B 2019-01-03
Data variables:
temperature (time) float64 8B nan
pressure (time) float64 8B 395.9
>>> x3.where(x3.temperature.isnull(), drop=True)
<xarray.Dataset> Size: 48B
Dimensions: (time: 2)
Coordinates:
* time (time) datetime64[ns] 16B 2019-01-03 2019-01-07
Data variables:
temperature (time) float64 16B nan nan
pressure (time) float64 16B 395.9 nan
This is because filling while reindexing does not look at dataset values, but only compares
the original and desired indexes. If you do want to fill in the `NaN` values present in the
original dataset, use the :py:meth:`~Dataset.fillna()` method.
"""
indexers = utils.either_dict_or_kwargs(indexers, indexers_kwargs, "reindex")
return alignment.reindex(
self,
indexers=indexers,
method=method,
tolerance=tolerance,
copy=copy,
fill_value=fill_value,
)
def _reindex(
self,
indexers: Mapping[Any, Any] | None = None,
method: str | None = None,
tolerance: int | float | Iterable[int | float] | None = None,
copy: bool = True,
fill_value: Any = xrdtypes.NA,
sparse: bool = False,
**indexers_kwargs: Any,
) -> Self:
"""
Same as reindex but supports sparse option.
"""
indexers = utils.either_dict_or_kwargs(indexers, indexers_kwargs, "reindex")
return alignment.reindex(
self,
indexers=indexers,
method=method,
tolerance=tolerance,
copy=copy,
fill_value=fill_value,
sparse=sparse,
)
def interp(
self,
coords: Mapping[Any, Any] | None = None,
method: InterpOptions = "linear",
assume_sorted: bool = False,
kwargs: Mapping[str, Any] | None = None,
method_non_numeric: str = "nearest",
**coords_kwargs: Any,
) -> Self:
"""
Interpolate a Dataset onto new coordinates.
Performs univariate or multivariate interpolation of a Dataset onto new coordinates,
utilizing either NumPy or SciPy interpolation routines.
Out-of-range values are filled with NaN, unless specified otherwise via `kwargs` to the numpy/scipy interpolant.
Parameters
----------
coords : dict, optional
Mapping from dimension names to the new coordinates.
New coordinate can be a scalar, array-like or DataArray.
If DataArrays are passed as new coordinates, their dimensions are
used for the broadcasting. Missing values are skipped.
method : { "linear", "nearest", "zero", "slinear", "quadratic", "cubic", \
"quintic", "polynomial", "pchip", "barycentric", "krogh", "akima", "makima" }
Interpolation method to use (see descriptions above).
assume_sorted : bool, default: False
If False, values of coordinates that are interpolated over can be
in any order and they are sorted first. If True, interpolated
coordinates are assumed to be an array of monotonically increasing
values.
kwargs : dict, optional
Additional keyword arguments passed to the interpolator. Valid
options and their behavior depend which interpolant is used.
method_non_numeric : {"nearest", "pad", "ffill", "backfill", "bfill"}, optional
Method for non-numeric types. Passed on to :py:meth:`Dataset.reindex`.
``"nearest"`` is used by default.
**coords_kwargs : {dim: coordinate, ...}, optional
The keyword arguments form of ``coords``.
One of coords or coords_kwargs must be provided.
Returns
-------
interpolated : Dataset
New dataset on the new coordinates.
Notes
-----
- SciPy is required for certain interpolation methods.
- When interpolating along multiple dimensions with methods `linear` and `nearest`,
the process attempts to decompose the interpolation into independent interpolations
along one dimension at a time.
- The specific interpolation method and dimensionality determine which
interpolant is used:
1. **Interpolation along one dimension of 1D data (`method='linear'`)**
- Uses :py:func:`numpy.interp`, unless `fill_value='extrapolate'` is provided via `kwargs`.
2. **Interpolation along one dimension of N-dimensional data (N ≥ 1)**
- Methods {"linear", "nearest", "zero", "slinear", "quadratic", "cubic", "quintic", "polynomial"}
use :py:func:`scipy.interpolate.interp1d`, unless conditions permit the use of :py:func:`numpy.interp`
(as in the case of `method='linear'` for 1D data).
- If `method='polynomial'`, the `order` keyword argument must also be provided.
3. **Special interpolants for interpolation along one dimension of N-dimensional data (N ≥ 1)**
- Depending on the `method`, the following interpolants from :py:class:`scipy.interpolate` are used:
- `"pchip"`: :py:class:`scipy.interpolate.PchipInterpolator`
- `"barycentric"`: :py:class:`scipy.interpolate.BarycentricInterpolator`
- `"krogh"`: :py:class:`scipy.interpolate.KroghInterpolator`
- `"akima"` or `"makima"`: :py:class:`scipy.interpolate.Akima1dInterpolator`
(`makima` is handled by passing the `makima` flag).
4. **Interpolation along multiple dimensions of multi-dimensional data**
- Uses :py:func:`scipy.interpolate.interpn` for methods {"linear", "nearest", "slinear",
"cubic", "quintic", "pchip"}.
See Also
--------
:mod:`scipy.interpolate`
:doc:`xarray-tutorial:fundamentals/02.2_manipulating_dimensions`
Tutorial material on manipulating data resolution using :py:func:`~xarray.Dataset.interp`
Examples
--------
>>> ds = xr.Dataset(
... data_vars={
... "a": ("x", [5, 7, 4]),
... "b": (
... ("x", "y"),
... [[1, 4, 2, 9], [2, 7, 6, np.nan], [6, np.nan, 5, 8]],
... ),
... },
... coords={"x": [0, 1, 2], "y": [10, 12, 14, 16]},
... )
>>> ds
<xarray.Dataset> Size: 176B
Dimensions: (x: 3, y: 4)
Coordinates:
* x (x) int64 24B 0 1 2
* y (y) int64 32B 10 12 14 16
Data variables:
a (x) int64 24B 5 7 4
b (x, y) float64 96B 1.0 4.0 2.0 9.0 2.0 7.0 6.0 nan 6.0 nan 5.0 8.0
1D interpolation with the default method (linear):
>>> ds.interp(x=[0, 0.75, 1.25, 1.75])
<xarray.Dataset> Size: 224B
Dimensions: (x: 4, y: 4)
Coordinates:
* x (x) float64 32B 0.0 0.75 1.25 1.75
* y (y) int64 32B 10 12 14 16
Data variables:
a (x) float64 32B 5.0 6.5 6.25 4.75
b (x, y) float64 128B 1.0 4.0 2.0 nan 1.75 ... nan 5.0 nan 5.25 nan
1D interpolation with a different method:
>>> ds.interp(x=[0, 0.75, 1.25, 1.75], method="nearest")
<xarray.Dataset> Size: 224B
Dimensions: (x: 4, y: 4)
Coordinates:
* x (x) float64 32B 0.0 0.75 1.25 1.75
* y (y) int64 32B 10 12 14 16
Data variables:
a (x) float64 32B 5.0 7.0 7.0 4.0
b (x, y) float64 128B 1.0 4.0 2.0 9.0 2.0 7.0 ... nan 6.0 nan 5.0 8.0
1D extrapolation:
>>> ds.interp(
... x=[1, 1.5, 2.5, 3.5],
... method="linear",
... kwargs={"fill_value": "extrapolate"},
... )
<xarray.Dataset> Size: 224B
Dimensions: (x: 4, y: 4)
Coordinates:
* x (x) float64 32B 1.0 1.5 2.5 3.5
* y (y) int64 32B 10 12 14 16
Data variables:
a (x) float64 32B 7.0 5.5 2.5 -0.5
b (x, y) float64 128B 2.0 7.0 6.0 nan 4.0 ... nan 12.0 nan 3.5 nan
2D interpolation:
>>> ds.interp(x=[0, 0.75, 1.25, 1.75], y=[11, 13, 15], method="linear")
<xarray.Dataset> Size: 184B
Dimensions: (x: 4, y: 3)
Coordinates:
* x (x) float64 32B 0.0 0.75 1.25 1.75
* y (y) int64 24B 11 13 15
Data variables:
a (x) float64 32B 5.0 6.5 6.25 4.75
b (x, y) float64 96B 2.5 3.0 nan 4.0 5.625 ... nan nan nan nan nan
"""
from xarray.core import missing
if kwargs is None:
kwargs = {}
coords = either_dict_or_kwargs(coords, coords_kwargs, "interp")
indexers = dict(self._validate_interp_indexers(coords))
obj = self if assume_sorted else self.sortby(list(coords))
def maybe_variable(obj, k):
# workaround to get variable for dimension without coordinate.
try:
return obj._variables[k]
except KeyError:
return as_variable((k, range(obj.sizes[k])))
def _validate_interp_indexer(x, new_x):
# In the case of datetimes, the restrictions placed on indexers
# used with interp are stronger than those which are placed on
# isel, so we need an additional check after _validate_indexers.
if _contains_datetime_like_objects(
x
) and not _contains_datetime_like_objects(new_x):
raise TypeError(
"When interpolating over a datetime-like "
"coordinate, the coordinates to "
"interpolate to must be either datetime "
"strings or datetimes. "
f"Instead got\n{new_x}"
)
return x, new_x
validated_indexers = {
k: _validate_interp_indexer(maybe_variable(obj, k), v)
for k, v in indexers.items()
}
# optimization: subset to coordinate range of the target index
if method in ["linear", "nearest"]:
for k, v in validated_indexers.items():
obj, newidx = missing._localize(obj, {k: v})
validated_indexers[k] = newidx[k]
has_chunked_array = bool(
any(is_chunked_array(v._data) for v in obj._variables.values())
)
if has_chunked_array:
# optimization: create dask coordinate arrays once per Dataset
# rather than once per Variable when dask.array.unify_chunks is called later
# GH4739
dask_indexers = {
k: (index.to_base_variable().chunk(), dest.to_base_variable().chunk())
for k, (index, dest) in validated_indexers.items()
}
variables: dict[Hashable, Variable] = {}
reindex_vars: list[Hashable] = []
for name, var in obj._variables.items():
if name in indexers:
continue
use_indexers = (
dask_indexers if is_duck_dask_array(var._data) else validated_indexers
)
dtype_kind = var.dtype.kind
if dtype_kind in "uifc":
# For normal number types do the interpolation:
var_indexers = {k: v for k, v in use_indexers.items() if k in var.dims}
variables[name] = missing.interp(var, var_indexers, method, **kwargs)
elif dtype_kind in "ObU" and (use_indexers.keys() & var.dims):
if all(var.sizes[d] == 1 for d in (use_indexers.keys() & var.dims)):
# Broadcastable, can be handled quickly without reindex:
to_broadcast = (var.squeeze(),) + tuple(
dest for _, dest in use_indexers.values()
)
variables[name] = broadcast_variables(*to_broadcast)[0].copy(
deep=True
)
else:
# For types that we do not understand do stepwise
# interpolation to avoid modifying the elements.
# reindex the variable instead because it supports
# booleans and objects and retains the dtype but inside
# this loop there might be some duplicate code that slows it
# down, therefore collect these signals and run it later:
reindex_vars.append(name)
elif all(d not in indexers for d in var.dims):
# For anything else we can only keep variables if they
# are not dependent on any coords that are being
# interpolated along:
variables[name] = var
if reindex_vars and (
reindex_indexers := {
k: v for k, (_, v) in validated_indexers.items() if v.dims == (k,)
}
):
reindexed = alignment.reindex(
obj[reindex_vars],
indexers=reindex_indexers,
method=method_non_numeric,
exclude_vars=variables.keys(),
)
indexes = dict(reindexed._indexes)
variables.update(reindexed.variables)
else:
# Get the indexes that are not being interpolated along
indexes = {k: v for k, v in obj._indexes.items() if k not in indexers}
# Get the coords that also exist in the variables:
coord_names = obj._coord_names & variables.keys()
selected = self._replace_with_new_dims(
variables.copy(), coord_names, indexes=indexes
)
# Attach indexer as coordinate
for k, v in indexers.items():
assert isinstance(v, Variable)
if v.dims == (k,):
index = PandasIndex(v, k, coord_dtype=v.dtype)
index_vars = index.create_variables({k: v})
indexes[k] = index
variables.update(index_vars)
else:
variables[k] = v
# Extract coordinates from indexers
coord_vars, new_indexes = selected._get_indexers_coords_and_indexes(coords)
variables.update(coord_vars)
indexes.update(new_indexes)
coord_names = obj._coord_names & variables.keys() | coord_vars.keys()
return self._replace_with_new_dims(variables, coord_names, indexes=indexes)
def interp_like(
self,
other: T_Xarray,
method: InterpOptions = "linear",
assume_sorted: bool = False,
kwargs: Mapping[str, Any] | None = None,
method_non_numeric: str = "nearest",
) -> Self:
"""Interpolate this object onto the coordinates of another object.
Performs univariate or multivariate interpolation of a Dataset onto new coordinates,
utilizing either NumPy or SciPy interpolation routines.
Out-of-range values are filled with NaN, unless specified otherwise via `kwargs` to the numpy/scipy interpolant.
Parameters
----------
other : Dataset or DataArray
Object with an 'indexes' attribute giving a mapping from dimension
names to an 1d array-like, which provides coordinates upon
which to index the variables in this dataset. Missing values are skipped.
method : { "linear", "nearest", "zero", "slinear", "quadratic", "cubic", \
"quintic", "polynomial", "pchip", "barycentric", "krogh", "akima", "makima" }
Interpolation method to use (see descriptions above).
assume_sorted : bool, default: False
If False, values of coordinates that are interpolated over can be
in any order and they are sorted first. If True, interpolated
coordinates are assumed to be an array of monotonically increasing
values.
kwargs : dict, optional
Additional keyword arguments passed to the interpolator. Valid
options and their behavior depend which interpolant is use
method_non_numeric : {"nearest", "pad", "ffill", "backfill", "bfill"}, optional
Method for non-numeric types. Passed on to :py:meth:`Dataset.reindex`.
``"nearest"`` is used by default.
Returns
-------
interpolated : Dataset
Another dataset by interpolating this dataset's data along the
coordinates of the other object.
Notes
-----
- scipy is required.
- If the dataset has object-type coordinates, reindex is used for these
coordinates instead of the interpolation.
- When interpolating along multiple dimensions with methods `linear` and `nearest`,
the process attempts to decompose the interpolation into independent interpolations
along one dimension at a time.
- The specific interpolation method and dimensionality determine which
interpolant is used:
1. **Interpolation along one dimension of 1D data (`method='linear'`)**
- Uses :py:func:`numpy.interp`, unless `fill_value='extrapolate'` is provided via `kwargs`.
2. **Interpolation along one dimension of N-dimensional data (N ≥ 1)**
- Methods {"linear", "nearest", "zero", "slinear", "quadratic", "cubic", "quintic", "polynomial"}
use :py:func:`scipy.interpolate.interp1d`, unless conditions permit the use of :py:func:`numpy.interp`
(as in the case of `method='linear'` for 1D data).
- If `method='polynomial'`, the `order` keyword argument must also be provided.
3. **Special interpolants for interpolation along one dimension of N-dimensional data (N ≥ 1)**
- Depending on the `method`, the following interpolants from :py:class:`scipy.interpolate` are used:
- `"pchip"`: :py:class:`scipy.interpolate.PchipInterpolator`
- `"barycentric"`: :py:class:`scipy.interpolate.BarycentricInterpolator`
- `"krogh"`: :py:class:`scipy.interpolate.KroghInterpolator`
- `"akima"` or `"makima"`: :py:class:`scipy.interpolate.Akima1dInterpolator`
(`makima` is handled by passing the `makima` flag).
4. **Interpolation along multiple dimensions of multi-dimensional data**
- Uses :py:func:`scipy.interpolate.interpn` for methods {"linear", "nearest", "slinear",
"cubic", "quintic", "pchip"}.
See Also
--------
:func:`Dataset.interp`
:func:`Dataset.reindex_like`
:mod:`scipy.interpolate`
"""
if kwargs is None:
kwargs = {}
# pick only dimension coordinates with a single index
coords: dict[Hashable, Variable] = {}
other_indexes = other.xindexes
for dim in self.dims:
other_dim_coords = other_indexes.get_all_coords(dim, errors="ignore")
if len(other_dim_coords) == 1:
coords[dim] = other_dim_coords[dim]
numeric_coords: dict[Hashable, Variable] = {}
object_coords: dict[Hashable, Variable] = {}
for k, v in coords.items():
if v.dtype.kind in "uifcMm":
numeric_coords[k] = v
else:
object_coords[k] = v
ds = self
if object_coords:
# We do not support interpolation along object coordinate.
# reindex instead.
ds = self.reindex(object_coords)
return ds.interp(
coords=numeric_coords,
method=method,
assume_sorted=assume_sorted,
kwargs=kwargs,
method_non_numeric=method_non_numeric,
)
# Helper methods for rename()
def _rename_vars(
self, name_dict, dims_dict
) -> tuple[dict[Hashable, Variable], set[Hashable]]:
variables = {}
coord_names = set()
for k, v in self.variables.items():
var = v.copy(deep=False)
var.dims = tuple(dims_dict.get(dim, dim) for dim in v.dims)
name = name_dict.get(k, k)
if name in variables:
raise ValueError(f"the new name {name!r} conflicts")
variables[name] = var
if k in self._coord_names:
coord_names.add(name)
return variables, coord_names
def _rename_dims(self, name_dict: Mapping[Any, Hashable]) -> dict[Hashable, int]:
return {name_dict.get(k, k): v for k, v in self.sizes.items()}
def _rename_indexes(
self, name_dict: Mapping[Any, Hashable], dims_dict: Mapping[Any, Hashable]
) -> tuple[dict[Hashable, Index], dict[Hashable, Variable]]:
if not self._indexes:
return {}, {}
indexes = {}
variables = {}
for index, coord_names in self.xindexes.group_by_index():
new_index = index.rename(name_dict, dims_dict)
new_coord_names = [name_dict.get(k, k) for k in coord_names]
indexes.update(dict.fromkeys(new_coord_names, new_index))
new_index_vars = new_index.create_variables(
{
new: self._variables[old]
for old, new in zip(coord_names, new_coord_names, strict=True)
}
)
variables.update(new_index_vars)
return indexes, variables
def _rename_all(
self, name_dict: Mapping[Any, Hashable], dims_dict: Mapping[Any, Hashable]
) -> tuple[
dict[Hashable, Variable],
set[Hashable],
dict[Hashable, int],
dict[Hashable, Index],
]:
variables, coord_names = self._rename_vars(name_dict, dims_dict)
dims = self._rename_dims(dims_dict)
indexes, index_vars = self._rename_indexes(name_dict, dims_dict)
variables = {k: index_vars.get(k, v) for k, v in variables.items()}
return variables, coord_names, dims, indexes
def _rename(
self,
name_dict: Mapping[Any, Hashable] | None = None,
**names: Hashable,
) -> Self:
"""Also used internally by DataArray so that the warning (if any)
is raised at the right stack level.
"""
name_dict = either_dict_or_kwargs(name_dict, names, "rename")
for k in name_dict.keys():
if k not in self and k not in self.dims:
raise ValueError(
f"cannot rename {k!r} because it is not a "
"variable or dimension in this dataset"
)
create_dim_coord = False
new_k = name_dict[k]
if k == new_k:
continue # Same name, nothing to do
if k in self.dims and new_k in self._coord_names:
coord_dims = self._variables[name_dict[k]].dims
if coord_dims == (k,):
create_dim_coord = True
elif k in self._coord_names and new_k in self.dims:
coord_dims = self._variables[k].dims
if coord_dims == (new_k,):
create_dim_coord = True
if create_dim_coord:
warnings.warn(
f"rename {k!r} to {name_dict[k]!r} does not create an index "
"anymore. Try using swap_dims instead or use set_index "
"after rename to create an indexed coordinate.",
UserWarning,
stacklevel=3,
)
variables, coord_names, dims, indexes = self._rename_all(
name_dict=name_dict, dims_dict=name_dict
)
return self._replace(variables, coord_names, dims=dims, indexes=indexes)
def rename(
self,
name_dict: Mapping[Any, Hashable] | None = None,
**names: Hashable,
) -> Self:
"""Returns a new object with renamed variables, coordinates and dimensions.
Parameters
----------
name_dict : dict-like, optional
Dictionary whose keys are current variable, coordinate or dimension names and
whose values are the desired names.
**names : optional
Keyword form of ``name_dict``.
One of name_dict or names must be provided.
Returns
-------
renamed : Dataset
Dataset with renamed variables, coordinates and dimensions.
See Also
--------
Dataset.swap_dims
Dataset.rename_vars
Dataset.rename_dims
DataArray.rename
"""
return self._rename(name_dict=name_dict, **names)
def rename_dims(
self,
dims_dict: Mapping[Any, Hashable] | None = None,
**dims: Hashable,
) -> Self:
"""Returns a new object with renamed dimensions only.
Parameters
----------
dims_dict : dict-like, optional
Dictionary whose keys are current dimension names and
whose values are the desired names. The desired names must
not be the name of an existing dimension or Variable in the Dataset.
**dims : optional
Keyword form of ``dims_dict``.
One of dims_dict or dims must be provided.
Returns
-------
renamed : Dataset
Dataset with renamed dimensions.
See Also
--------
Dataset.swap_dims
Dataset.rename
Dataset.rename_vars
DataArray.rename
"""
dims_dict = either_dict_or_kwargs(dims_dict, dims, "rename_dims")
for k, v in dims_dict.items():
if k not in self.dims:
raise ValueError(
f"cannot rename {k!r} because it is not found "
f"in the dimensions of this dataset {tuple(self.dims)}"
)
if v in self.dims or v in self:
raise ValueError(
f"Cannot rename {k} to {v} because {v} already exists. "
"Try using swap_dims instead."
)
variables, coord_names, sizes, indexes = self._rename_all(
name_dict={}, dims_dict=dims_dict
)
return self._replace(variables, coord_names, dims=sizes, indexes=indexes)
def rename_vars(
self,
name_dict: Mapping[Any, Hashable] | None = None,
**names: Hashable,
) -> Self:
"""Returns a new object with renamed variables including coordinates
Parameters
----------
name_dict : dict-like, optional
Dictionary whose keys are current variable or coordinate names and
whose values are the desired names.
**names : optional
Keyword form of ``name_dict``.
One of name_dict or names must be provided.
Returns
-------
renamed : Dataset
Dataset with renamed variables including coordinates
See Also
--------
Dataset.swap_dims
Dataset.rename
Dataset.rename_dims
DataArray.rename
"""
name_dict = either_dict_or_kwargs(name_dict, names, "rename_vars")
for k in name_dict:
if k not in self:
raise ValueError(
f"cannot rename {k!r} because it is not a "
"variable or coordinate in this dataset"
)
variables, coord_names, dims, indexes = self._rename_all(
name_dict=name_dict, dims_dict={}
)
return self._replace(variables, coord_names, dims=dims, indexes=indexes)
def swap_dims(
self, dims_dict: Mapping[Any, Hashable] | None = None, **dims_kwargs
) -> Self:
"""Returns a new object with swapped dimensions.
Parameters
----------
dims_dict : dict-like
Dictionary whose keys are current dimension names and whose values
are new names.
**dims_kwargs : {existing_dim: new_dim, ...}, optional
The keyword arguments form of ``dims_dict``.
One of dims_dict or dims_kwargs must be provided.
Returns
-------
swapped : Dataset
Dataset with swapped dimensions.
Examples
--------
>>> ds = xr.Dataset(
... data_vars={"a": ("x", [5, 7]), "b": ("x", [0.1, 2.4])},
... coords={"x": ["a", "b"], "y": ("x", [0, 1])},
... )
>>> ds
<xarray.Dataset> Size: 56B
Dimensions: (x: 2)
Coordinates:
* x (x) <U1 8B 'a' 'b'
y (x) int64 16B 0 1
Data variables:
a (x) int64 16B 5 7
b (x) float64 16B 0.1 2.4
>>> ds.swap_dims({"x": "y"})
<xarray.Dataset> Size: 56B
Dimensions: (y: 2)
Coordinates:
* y (y) int64 16B 0 1
x (y) <U1 8B 'a' 'b'
Data variables:
a (y) int64 16B 5 7
b (y) float64 16B 0.1 2.4
>>> ds.swap_dims({"x": "z"})
<xarray.Dataset> Size: 56B
Dimensions: (z: 2)
Coordinates:
x (z) <U1 8B 'a' 'b'
y (z) int64 16B 0 1
Dimensions without coordinates: z
Data variables:
a (z) int64 16B 5 7
b (z) float64 16B 0.1 2.4
See Also
--------
Dataset.rename
DataArray.swap_dims
"""
# TODO: deprecate this method in favor of a (less confusing)
# rename_dims() method that only renames dimensions.
dims_dict = either_dict_or_kwargs(dims_dict, dims_kwargs, "swap_dims")
for current_name, new_name in dims_dict.items():
if current_name not in self.dims:
raise ValueError(
f"cannot swap from dimension {current_name!r} because it is "
f"not one of the dimensions of this dataset {tuple(self.dims)}"
)
if new_name in self.variables and self.variables[new_name].dims != (
current_name,
):
raise ValueError(
f"replacement dimension {new_name!r} is not a 1D "
f"variable along the old dimension {current_name!r}"
)
result_dims = {dims_dict.get(dim, dim) for dim in self.dims}
coord_names = self._coord_names.copy()
coord_names.update({dim for dim in dims_dict.values() if dim in self.variables})
variables: dict[Hashable, Variable] = {}
indexes: dict[Hashable, Index] = {}
for current_name, current_variable in self.variables.items():
dims = tuple(dims_dict.get(dim, dim) for dim in current_variable.dims)
var: Variable
if current_name in result_dims:
var = current_variable.to_index_variable()
var.dims = dims
if current_name in self._indexes:
indexes[current_name] = self._indexes[current_name]
variables[current_name] = var
else:
index, index_vars = create_default_index_implicit(var)
indexes.update(dict.fromkeys(index_vars, index))
variables.update(index_vars)
coord_names.update(index_vars)
else:
var = current_variable.to_base_variable()
var.dims = dims
variables[current_name] = var
return self._replace_with_new_dims(variables, coord_names, indexes=indexes)
def expand_dims(
self,
dim: Hashable | Sequence[Hashable] | Mapping[Any, Any] | None = None,
axis: int | Sequence[int] | None = None,
create_index_for_new_dim: bool = True,
**dim_kwargs: Any,
) -> Self:
"""Return a new object with an additional axis (or axes) inserted at
the corresponding position in the array shape. The new object is a
view into the underlying array, not a copy.
If dim is already a scalar coordinate, it will be promoted to a 1D
coordinate consisting of a single value.
The automatic creation of indexes to back new 1D coordinate variables
controlled by the create_index_for_new_dim kwarg.
Parameters
----------
dim : hashable, sequence of hashable, mapping, or None
Dimensions to include on the new variable. If provided as hashable
or sequence of hashable, then dimensions are inserted with length
1. If provided as a mapping, then the keys are the new dimensions
and the values are either integers (giving the length of the new
dimensions) or array-like (giving the coordinates of the new
dimensions).
axis : int, sequence of int, or None, default: None
Axis position(s) where new axis is to be inserted (position(s) on
the result array). If a sequence of integers is passed,
multiple axes are inserted. In this case, dim arguments should be
same length list. If axis=None is passed, all the axes will be
inserted to the start of the result array.
create_index_for_new_dim : bool, default: True
Whether to create new ``PandasIndex`` objects when the object being expanded contains scalar variables with names in ``dim``.
**dim_kwargs : int or sequence or ndarray
The keywords are arbitrary dimensions being inserted and the values
are either the lengths of the new dims (if int is given), or their
coordinates. Note, this is an alternative to passing a dict to the
dim kwarg and will only be used if dim is None.
Returns
-------
expanded : Dataset
This object, but with additional dimension(s).
Examples
--------
>>> dataset = xr.Dataset({"temperature": ([], 25.0)})
>>> dataset
<xarray.Dataset> Size: 8B
Dimensions: ()
Data variables:
temperature float64 8B 25.0
# Expand the dataset with a new dimension called "time"
>>> dataset.expand_dims(dim="time")
<xarray.Dataset> Size: 8B
Dimensions: (time: 1)
Dimensions without coordinates: time
Data variables:
temperature (time) float64 8B 25.0
# 1D data
>>> temperature_1d = xr.DataArray([25.0, 26.5, 24.8], dims="x")
>>> dataset_1d = xr.Dataset({"temperature": temperature_1d})
>>> dataset_1d
<xarray.Dataset> Size: 24B
Dimensions: (x: 3)
Dimensions without coordinates: x
Data variables:
temperature (x) float64 24B 25.0 26.5 24.8
# Expand the dataset with a new dimension called "time" using axis argument
>>> dataset_1d.expand_dims(dim="time", axis=0)
<xarray.Dataset> Size: 24B
Dimensions: (time: 1, x: 3)
Dimensions without coordinates: time, x
Data variables:
temperature (time, x) float64 24B 25.0 26.5 24.8
# 2D data
>>> temperature_2d = xr.DataArray(np.random.rand(3, 4), dims=("y", "x"))
>>> dataset_2d = xr.Dataset({"temperature": temperature_2d})
>>> dataset_2d
<xarray.Dataset> Size: 96B
Dimensions: (y: 3, x: 4)
Dimensions without coordinates: y, x
Data variables:
temperature (y, x) float64 96B 0.5488 0.7152 0.6028 ... 0.7917 0.5289
# Expand the dataset with a new dimension called "time" using axis argument
>>> dataset_2d.expand_dims(dim="time", axis=2)
<xarray.Dataset> Size: 96B
Dimensions: (y: 3, x: 4, time: 1)
Dimensions without coordinates: y, x, time
Data variables:
temperature (y, x, time) float64 96B 0.5488 0.7152 0.6028 ... 0.7917 0.5289
# Expand a scalar variable along a new dimension of the same name with and without creating a new index
>>> ds = xr.Dataset(coords={"x": 0})
>>> ds
<xarray.Dataset> Size: 8B
Dimensions: ()
Coordinates:
x int64 8B 0
Data variables:
*empty*
>>> ds.expand_dims("x")
<xarray.Dataset> Size: 8B
Dimensions: (x: 1)
Coordinates:
* x (x) int64 8B 0
Data variables:
*empty*
>>> ds.expand_dims("x").indexes
Indexes:
x Index([0], dtype='int64', name='x')
>>> ds.expand_dims("x", create_index_for_new_dim=False).indexes
Indexes:
*empty*
See Also
--------
DataArray.expand_dims
"""
if dim is None:
pass
elif isinstance(dim, Mapping):
# We're later going to modify dim in place; don't tamper with
# the input
dim = dict(dim)
elif isinstance(dim, int):
raise TypeError(
"dim should be hashable or sequence of hashables or mapping"
)
elif isinstance(dim, str) or not isinstance(dim, Sequence):
dim = {dim: 1}
elif isinstance(dim, Sequence):
if len(dim) != len(set(dim)):
raise ValueError("dims should not contain duplicate values.")
dim = dict.fromkeys(dim, 1)
dim = either_dict_or_kwargs(dim, dim_kwargs, "expand_dims")
assert isinstance(dim, MutableMapping)
if axis is None:
axis = list(range(len(dim)))
elif not isinstance(axis, Sequence):
axis = [axis]
if len(dim) != len(axis):
raise ValueError("lengths of dim and axis should be identical.")
for d in dim:
if d in self.dims:
raise ValueError(f"Dimension {d} already exists.")
if d in self._variables and not utils.is_scalar(self._variables[d]):
raise ValueError(f"{d} already exists as coordinate or variable name.")
variables: dict[Hashable, Variable] = {}
indexes: dict[Hashable, Index] = dict(self._indexes)
coord_names = self._coord_names.copy()
# If dim is a dict, then ensure that the values are either integers
# or iterables.
for k, v in dim.items():
if hasattr(v, "__iter__"):
# If the value for the new dimension is an iterable, then
# save the coordinates to the variables dict, and set the
# value within the dim dict to the length of the iterable
# for later use.
if create_index_for_new_dim:
index = PandasIndex(v, k)
indexes[k] = index
name_and_new_1d_var = index.create_variables()
else:
name_and_new_1d_var = {k: Variable(data=v, dims=k)}
variables.update(name_and_new_1d_var)
coord_names.add(k)
dim[k] = variables[k].size
elif isinstance(v, int):
pass # Do nothing if the dimensions value is just an int
else:
raise TypeError(
f"The value of new dimension {k} must be an iterable or an int"
)
for k, v in self._variables.items():
if k not in dim:
if k in coord_names: # Do not change coordinates
variables[k] = v
else:
result_ndim = len(v.dims) + len(axis)
for a in axis:
if a < -result_ndim or result_ndim - 1 < a:
raise IndexError(
f"Axis {a} of variable {k} is out of bounds of the "
f"expanded dimension size {result_ndim}"
)
axis_pos = [a if a >= 0 else result_ndim + a for a in axis]
if len(axis_pos) != len(set(axis_pos)):
raise ValueError("axis should not contain duplicate values")
# We need to sort them to make sure `axis` equals to the
# axis positions of the result array.
zip_axis_dim = sorted(zip(axis_pos, dim.items(), strict=True))
all_dims = list(zip(v.dims, v.shape, strict=True))
for d, c in zip_axis_dim:
all_dims.insert(d, c)
variables[k] = v.set_dims(dict(all_dims))
elif k not in variables:
if k in coord_names and create_index_for_new_dim:
# If dims includes a label of a non-dimension coordinate,
# it will be promoted to a 1D coordinate with a single value.
index, index_vars = create_default_index_implicit(v.set_dims(k))
indexes[k] = index
variables.update(index_vars)
else:
if create_index_for_new_dim:
warnings.warn(
f"No index created for dimension {k} because variable {k} is not a coordinate. "
f"To create an index for {k}, please first call `.set_coords('{k}')` on this object.",
UserWarning,
stacklevel=2,
)
# create 1D variable without creating a new index
new_1d_var = v.set_dims(k)
variables.update({k: new_1d_var})
return self._replace_with_new_dims(
variables, coord_names=coord_names, indexes=indexes
)
def set_index(
self,
indexes: Mapping[Any, Hashable | Sequence[Hashable]] | None = None,
append: bool = False,
**indexes_kwargs: Hashable | Sequence[Hashable],
) -> Self:
"""Set Dataset (multi-)indexes using one or more existing coordinates
or variables.
This legacy method is limited to pandas (multi-)indexes and
1-dimensional "dimension" coordinates. See
:py:meth:`~Dataset.set_xindex` for setting a pandas or a custom
Xarray-compatible index from one or more arbitrary coordinates.
Parameters
----------
indexes : {dim: index, ...}
Mapping from names matching dimensions and values given
by (lists of) the names of existing coordinates or variables to set
as new (multi-)index.
append : bool, default: False
If True, append the supplied index(es) to the existing index(es).
Otherwise replace the existing index(es) (default).
**indexes_kwargs : optional
The keyword arguments form of ``indexes``.
One of indexes or indexes_kwargs must be provided.
Returns
-------
obj : Dataset
Another dataset, with this dataset's data but replaced coordinates.
Examples
--------
>>> arr = xr.DataArray(
... data=np.ones((2, 3)),
... dims=["x", "y"],
... coords={"x": range(2), "y": range(3), "a": ("x", [3, 4])},
... )
>>> ds = xr.Dataset({"v": arr})
>>> ds
<xarray.Dataset> Size: 104B
Dimensions: (x: 2, y: 3)
Coordinates:
* x (x) int64 16B 0 1
* y (y) int64 24B 0 1 2
a (x) int64 16B 3 4
Data variables:
v (x, y) float64 48B 1.0 1.0 1.0 1.0 1.0 1.0
>>> ds.set_index(x="a")
<xarray.Dataset> Size: 88B
Dimensions: (x: 2, y: 3)
Coordinates:
* x (x) int64 16B 3 4
* y (y) int64 24B 0 1 2
Data variables:
v (x, y) float64 48B 1.0 1.0 1.0 1.0 1.0 1.0
See Also
--------
Dataset.reset_index
Dataset.set_xindex
Dataset.swap_dims
"""
dim_coords = either_dict_or_kwargs(indexes, indexes_kwargs, "set_index")
new_indexes: dict[Hashable, Index] = {}
new_variables: dict[Hashable, Variable] = {}
drop_indexes: set[Hashable] = set()
drop_variables: set[Hashable] = set()
replace_dims: dict[Hashable, Hashable] = {}
all_var_names: set[Hashable] = set()
for dim, _var_names in dim_coords.items():
if isinstance(_var_names, str) or not isinstance(_var_names, Sequence):
var_names = [_var_names]
else:
var_names = list(_var_names)
invalid_vars = set(var_names) - set(self._variables)
if invalid_vars:
raise ValueError(
", ".join([str(v) for v in invalid_vars])
+ " variable(s) do not exist"
)
all_var_names.update(var_names)
drop_variables.update(var_names)
# drop any pre-existing index involved and its corresponding coordinates
index_coord_names = self.xindexes.get_all_coords(dim, errors="ignore")
all_index_coord_names = set(index_coord_names)
for k in var_names:
all_index_coord_names.update(
self.xindexes.get_all_coords(k, errors="ignore")
)
drop_indexes.update(all_index_coord_names)
drop_variables.update(all_index_coord_names)
if len(var_names) == 1 and (not append or dim not in self._indexes):
var_name = var_names[0]
var = self._variables[var_name]
# an error with a better message will be raised for scalar variables
# when creating the PandasIndex
if var.ndim > 0 and var.dims != (dim,):
raise ValueError(
f"dimension mismatch: try setting an index for dimension {dim!r} with "
f"variable {var_name!r} that has dimensions {var.dims}"
)
idx = PandasIndex.from_variables({dim: var}, options={})
idx_vars = idx.create_variables({var_name: var})
# trick to preserve coordinate order in this case
if dim in self._coord_names:
drop_variables.remove(dim)
else:
if append:
current_variables = {
k: self._variables[k] for k in index_coord_names
}
else:
current_variables = {}
idx, idx_vars = PandasMultiIndex.from_variables_maybe_expand(
dim,
current_variables,
{k: self._variables[k] for k in var_names},
)
for n in idx.index.names:
replace_dims[n] = dim
new_indexes.update(dict.fromkeys(idx_vars, idx))
new_variables.update(idx_vars)
# re-add deindexed coordinates (convert to base variables)
for k in drop_variables:
if (
k not in new_variables
and k not in all_var_names
and k in self._coord_names
):
new_variables[k] = self._variables[k].to_base_variable()
indexes_: dict[Any, Index] = {
k: v for k, v in self._indexes.items() if k not in drop_indexes
}
indexes_.update(new_indexes)
variables = {
k: v for k, v in self._variables.items() if k not in drop_variables
}
variables.update(new_variables)
# update dimensions if necessary, GH: 3512
for k, v in variables.items():
if any(d in replace_dims for d in v.dims):
new_dims = [replace_dims.get(d, d) for d in v.dims]
variables[k] = v._replace(dims=new_dims)
coord_names = self._coord_names - drop_variables | set(new_variables)
return self._replace_with_new_dims(
variables, coord_names=coord_names, indexes=indexes_
)
def reset_index(
self,
dims_or_levels: Hashable | Sequence[Hashable],
*,
drop: bool = False,
) -> Self:
"""Reset the specified index(es) or multi-index level(s).
This legacy method is specific to pandas (multi-)indexes and
1-dimensional "dimension" coordinates. See the more generic
:py:meth:`~Dataset.drop_indexes` and :py:meth:`~Dataset.set_xindex`
method to respectively drop and set pandas or custom indexes for
arbitrary coordinates.
Parameters
----------
dims_or_levels : Hashable or Sequence of Hashable
Name(s) of the dimension(s) and/or multi-index level(s) that will
be reset.
drop : bool, default: False
If True, remove the specified indexes and/or multi-index levels
instead of extracting them as new coordinates (default: False).
Returns
-------
obj : Dataset
Another dataset, with this dataset's data but replaced coordinates.
See Also
--------
Dataset.set_index
Dataset.set_xindex
Dataset.drop_indexes
"""
if isinstance(dims_or_levels, str) or not isinstance(dims_or_levels, Sequence):
dims_or_levels = [dims_or_levels]
invalid_coords = set(dims_or_levels) - set(self._indexes)
if invalid_coords:
raise ValueError(
f"{tuple(invalid_coords)} are not coordinates with an index"
)
drop_indexes: set[Hashable] = set()
drop_variables: set[Hashable] = set()
seen: set[Index] = set()
new_indexes: dict[Hashable, Index] = {}
new_variables: dict[Hashable, Variable] = {}
def drop_or_convert(var_names):
if drop:
drop_variables.update(var_names)
else:
base_vars = {
k: self._variables[k].to_base_variable() for k in var_names
}
new_variables.update(base_vars)
for name in dims_or_levels:
index = self._indexes[name]
if index in seen:
continue
seen.add(index)
idx_var_names = set(self.xindexes.get_all_coords(name))
drop_indexes.update(idx_var_names)
if isinstance(index, PandasMultiIndex):
# special case for pd.MultiIndex
level_names = index.index.names
keep_level_vars = {
k: self._variables[k]
for k in level_names
if k not in dims_or_levels
}
if index.dim not in dims_or_levels and keep_level_vars:
# do not drop the multi-index completely
# instead replace it by a new (multi-)index with dropped level(s)
idx = index.keep_levels(keep_level_vars)
idx_vars = idx.create_variables(keep_level_vars)
new_indexes.update(dict.fromkeys(idx_vars, idx))
new_variables.update(idx_vars)
if not isinstance(idx, PandasMultiIndex):
# multi-index reduced to single index
# backward compatibility: unique level coordinate renamed to dimension
drop_variables.update(keep_level_vars)
drop_or_convert(
[k for k in level_names if k not in keep_level_vars]
)
else:
# always drop the multi-index dimension variable
drop_variables.add(index.dim)
drop_or_convert(level_names)
else:
drop_or_convert(idx_var_names)
indexes = {k: v for k, v in self._indexes.items() if k not in drop_indexes}
indexes.update(new_indexes)
variables = {
k: v for k, v in self._variables.items() if k not in drop_variables
}
variables.update(new_variables)
coord_names = self._coord_names - drop_variables
return self._replace_with_new_dims(
variables, coord_names=coord_names, indexes=indexes
)
def set_xindex(
self,
coord_names: str | Sequence[Hashable],
index_cls: type[Index] | None = None,
**options,
) -> Self:
"""Set a new, Xarray-compatible index from one or more existing
coordinate(s).
Parameters
----------
coord_names : str or list
Name(s) of the coordinate(s) used to build the index.
If several names are given, their order matters.
index_cls : subclass of :class:`~xarray.indexes.Index`, optional
The type of index to create. By default, try setting
a ``PandasIndex`` if ``len(coord_names) == 1``,
otherwise a ``PandasMultiIndex``.
**options
Options passed to the index constructor.
Returns
-------
obj : Dataset
Another dataset, with this dataset's data and with a new index.
"""
# the Sequence check is required for mypy
if is_scalar(coord_names) or not isinstance(coord_names, Sequence):
coord_names = [coord_names]
if index_cls is None:
if len(coord_names) == 1:
index_cls = PandasIndex
else:
index_cls = PandasMultiIndex
elif not issubclass(index_cls, Index):
raise TypeError(f"{index_cls} is not a subclass of xarray.Index")
invalid_coords = set(coord_names) - self._coord_names
if invalid_coords:
msg = ["invalid coordinate(s)"]
no_vars = invalid_coords - set(self._variables)
data_vars = invalid_coords - no_vars
if no_vars:
msg.append(f"those variables don't exist: {no_vars}")
if data_vars:
msg.append(
f"those variables are data variables: {data_vars}, use `set_coords` first"
)
raise ValueError("\n".join(msg))
# we could be more clever here (e.g., drop-in index replacement if index
# coordinates do not conflict), but let's not allow this for now
indexed_coords = set(coord_names) & set(self._indexes)
if indexed_coords:
raise ValueError(
f"those coordinates already have an index: {indexed_coords}"
)
coord_vars = {name: self._variables[name] for name in coord_names}
index = index_cls.from_variables(coord_vars, options=options)
new_coord_vars = index.create_variables(coord_vars)
# special case for setting a pandas multi-index from level coordinates
# TODO: remove it once we depreciate pandas multi-index dimension (tuple
# elements) coordinate
if isinstance(index, PandasMultiIndex):
coord_names = [index.dim] + list(coord_names)
# Check for extra variables that don't match the coordinate names
extra_vars = set(new_coord_vars) - set(coord_names)
if extra_vars:
extra_vars_str = ", ".join(f"'{name}'" for name in extra_vars)
coord_names_str = ", ".join(f"'{name}'" for name in coord_names)
raise ValueError(
f"The index created extra variables {extra_vars_str} that are not "
f"in the list of coordinates {coord_names_str}. "
f"Use a factory method pattern instead:\n"
f" index = {index_cls.__name__}.from_variables(ds, {list(coord_names)!r})\n"
f" coords = xr.Coordinates.from_xindex(index)\n"
f" ds = ds.assign_coords(coords)"
)
variables: dict[Hashable, Variable]
indexes: dict[Hashable, Index]
if len(coord_names) == 1:
variables = self._variables.copy()
indexes = self._indexes.copy()
name = list(coord_names).pop()
if name in new_coord_vars:
variables[name] = new_coord_vars[name]
indexes[name] = index
else:
# reorder variables and indexes so that coordinates having the same
# index are next to each other
variables = {}
for name, var in self._variables.items():
if name not in coord_names:
variables[name] = var
indexes = {}
for name, idx in self._indexes.items():
if name not in coord_names:
indexes[name] = idx
for name in coord_names:
try:
variables[name] = new_coord_vars[name]
except KeyError:
variables[name] = self._variables[name]
indexes[name] = index
return self._replace(
variables=variables,
coord_names=self._coord_names | set(coord_names),
indexes=indexes,
)
def reorder_levels(
self,
dim_order: Mapping[Any, Sequence[int | Hashable]] | None = None,
**dim_order_kwargs: Sequence[int | Hashable],
) -> Self:
"""Rearrange index levels using input order.
Parameters
----------
dim_order : dict-like of Hashable to Sequence of int or Hashable, optional
Mapping from names matching dimensions and values given
by lists representing new level orders. Every given dimension
must have a multi-index.
**dim_order_kwargs : Sequence of int or Hashable, optional
The keyword arguments form of ``dim_order``.
One of dim_order or dim_order_kwargs must be provided.
Returns
-------
obj : Dataset
Another dataset, with this dataset's data but replaced
coordinates.
"""
dim_order = either_dict_or_kwargs(dim_order, dim_order_kwargs, "reorder_levels")
variables = self._variables.copy()
indexes = dict(self._indexes)
new_indexes: dict[Hashable, Index] = {}
new_variables: dict[Hashable, IndexVariable] = {}
for dim, order in dim_order.items():
index = self._indexes[dim]
if not isinstance(index, PandasMultiIndex):
raise ValueError(f"coordinate {dim} has no MultiIndex")
level_vars = {k: self._variables[k] for k in order}
idx = index.reorder_levels(level_vars)
idx_vars = idx.create_variables(level_vars)
new_indexes.update(dict.fromkeys(idx_vars, idx))
new_variables.update(idx_vars)
indexes = {k: v for k, v in self._indexes.items() if k not in new_indexes}
indexes.update(new_indexes)
variables = {k: v for k, v in self._variables.items() if k not in new_variables}
variables.update(new_variables)
return self._replace(variables, indexes=indexes)
def _get_stack_index(
self,
dim,
multi=False,
create_index=False,
) -> tuple[Index | None, dict[Hashable, Variable]]:
"""Used by stack and unstack to get one pandas (multi-)index among
the indexed coordinates along dimension `dim`.
If exactly one index is found, return it with its corresponding
coordinate variables(s), otherwise return None and an empty dict.
If `create_index=True`, create a new index if none is found or raise
an error if multiple indexes are found.
"""
stack_index: Index | None = None
stack_coords: dict[Hashable, Variable] = {}
for name, index in self._indexes.items():
var = self._variables[name]
if (
var.ndim == 1
and var.dims[0] == dim
and (
# stack: must be a single coordinate index
(not multi and not self.xindexes.is_multi(name))
# unstack: must be an index that implements .unstack
or (multi and type(index).unstack is not Index.unstack)
)
):
if stack_index is not None and index is not stack_index:
# more than one index found, stop
if create_index:
raise ValueError(
f"cannot stack dimension {dim!r} with `create_index=True` "
"and with more than one index found along that dimension"
)
return None, {}
stack_index = index
stack_coords[name] = var
if create_index and stack_index is None:
if dim in self._variables:
var = self._variables[dim]
else:
_, _, var = _get_virtual_variable(self._variables, dim, self.sizes)
# dummy index (only `stack_coords` will be used to construct the multi-index)
stack_index = PandasIndex([0], dim)
stack_coords = {dim: var}
return stack_index, stack_coords
def _stack_once(
self,
dims: Sequence[Hashable | EllipsisType],
new_dim: Hashable,
index_cls: type[Index],
create_index: bool | None = True,
) -> Self:
if dims == ...:
raise ValueError("Please use [...] for dims, rather than just ...")
if ... in dims:
dims = list(infix_dims(dims, self.dims))
new_variables: dict[Hashable, Variable] = {}
stacked_var_names: list[Hashable] = []
drop_indexes: list[Hashable] = []
for name, var in self.variables.items():
if any(d in var.dims for d in dims):
add_dims = [d for d in dims if d not in var.dims]
vdims = list(var.dims) + add_dims
shape = [self.sizes[d] for d in vdims]
exp_var = var.set_dims(vdims, shape)
stacked_var = exp_var.stack(**{new_dim: dims})
new_variables[name] = stacked_var
stacked_var_names.append(name)
else:
new_variables[name] = var.copy(deep=False)
# drop indexes of stacked coordinates (if any)
for name in stacked_var_names:
drop_indexes += list(self.xindexes.get_all_coords(name, errors="ignore"))
new_indexes = {}
new_coord_names = set(self._coord_names)
if create_index or create_index is None:
product_vars: dict[Any, Variable] = {}
for dim in dims:
idx, idx_vars = self._get_stack_index(dim, create_index=create_index)
if idx is not None:
product_vars.update(idx_vars)
if len(product_vars) == len(dims):
idx = index_cls.stack(product_vars, new_dim)
new_indexes[new_dim] = idx
new_indexes.update(dict.fromkeys(product_vars, idx))
idx_vars = idx.create_variables(product_vars)
# keep consistent multi-index coordinate order
for k in idx_vars:
new_variables.pop(k, None)
new_variables.update(idx_vars)
new_coord_names.update(idx_vars)
indexes = {k: v for k, v in self._indexes.items() if k not in drop_indexes}
indexes.update(new_indexes)
return self._replace_with_new_dims(
new_variables, coord_names=new_coord_names, indexes=indexes
)
@partial(deprecate_dims, old_name="dimensions")
def stack(
self,
dim: Mapping[Any, Sequence[Hashable | EllipsisType]] | None = None,
create_index: bool | None = True,
index_cls: type[Index] = PandasMultiIndex,
**dim_kwargs: Sequence[Hashable | EllipsisType],
) -> Self:
"""
Stack any number of existing dimensions into a single new dimension.
New dimensions will be added at the end, and by default the corresponding
coordinate variables will be combined into a MultiIndex.
Parameters
----------
dim : mapping of hashable to sequence of hashable
Mapping of the form `new_name=(dim1, dim2, ...)`. Names of new
dimensions, and the existing dimensions that they replace. An
ellipsis (`...`) will be replaced by all unlisted dimensions.
Passing a list containing an ellipsis (`stacked_dim=[...]`) will stack over
all dimensions.
create_index : bool or None, default: True
- True: create a multi-index for each of the stacked dimensions.
- False: don't create any index.
- None. create a multi-index only if exactly one single (1-d) coordinate
index is found for every dimension to stack.
index_cls: Index-class, default: PandasMultiIndex
Can be used to pass a custom multi-index type (must be an Xarray index that
implements `.stack()`). By default, a pandas multi-index wrapper is used.
**dim_kwargs
The keyword arguments form of ``dim``.
One of dim or dim_kwargs must be provided.
Returns
-------
stacked : Dataset
Dataset with stacked data.
See Also
--------
Dataset.unstack
"""
dim = either_dict_or_kwargs(dim, dim_kwargs, "stack")
result = self
for new_dim, dims in dim.items():
result = result._stack_once(dims, new_dim, index_cls, create_index)
return result
def to_stacked_array(
self,
new_dim: Hashable,
sample_dims: Collection[Hashable],
variable_dim: Hashable = "variable",
name: Hashable | None = None,
) -> DataArray:
"""Combine variables of differing dimensionality into a DataArray
without broadcasting.
This method is similar to Dataset.to_dataarray but does not broadcast the
variables.
Parameters
----------
new_dim : hashable
Name of the new stacked coordinate
sample_dims : Collection of hashables
List of dimensions that **will not** be stacked. Each array in the
dataset must share these dimensions. For machine learning
applications, these define the dimensions over which samples are
drawn.
variable_dim : hashable, default: "variable"
Name of the level in the stacked coordinate which corresponds to
the variables.
name : hashable, optional
Name of the new data array.
Returns
-------
stacked : DataArray
DataArray with the specified dimensions and data variables
stacked together. The stacked coordinate is named ``new_dim``
and represented by a MultiIndex object with a level containing the
data variable names. The name of this level is controlled using
the ``variable_dim`` argument.
See Also
--------
Dataset.to_dataarray
Dataset.stack
DataArray.to_unstacked_dataset
Examples
--------
>>> data = xr.Dataset(
... data_vars={
... "a": (("x", "y"), [[0, 1, 2], [3, 4, 5]]),
... "b": ("x", [6, 7]),
... },
... coords={"y": ["u", "v", "w"]},
... )
>>> data
<xarray.Dataset> Size: 76B
Dimensions: (x: 2, y: 3)
Coordinates:
* y (y) <U1 12B 'u' 'v' 'w'
Dimensions without coordinates: x
Data variables:
a (x, y) int64 48B 0 1 2 3 4 5
b (x) int64 16B 6 7
>>> data.to_stacked_array("z", sample_dims=["x"])
<xarray.DataArray 'a' (x: 2, z: 4)> Size: 64B
array([[0, 1, 2, 6],
[3, 4, 5, 7]])
Coordinates:
* z (z) object 32B MultiIndex
* variable (z) <U1 16B 'a' 'a' 'a' 'b'
* y (z) object 32B 'u' 'v' 'w' nan
Dimensions without coordinates: x
"""
from xarray.structure.concat import concat
# add stacking dims by order of appearance
stacking_dims_list: list[Hashable] = []
for da in self.data_vars.values():
for dim in da.dims:
if dim not in sample_dims and dim not in stacking_dims_list:
stacking_dims_list.append(dim)
stacking_dims = tuple(stacking_dims_list)
for key, da in self.data_vars.items():
missing_sample_dims = set(sample_dims) - set(da.dims)
if missing_sample_dims:
raise ValueError(
"Variables in the dataset must contain all ``sample_dims`` "
f"({sample_dims!r}) but '{key}' misses {sorted(map(str, missing_sample_dims))}"
)
def stack_dataarray(da):
# add missing dims/ coords and the name of the variable
missing_stack_coords = {variable_dim: da.name}
for dim in set(stacking_dims) - set(da.dims):
missing_stack_coords[dim] = None
missing_stack_dims = list(missing_stack_coords)
return (
da.assign_coords(**missing_stack_coords)
.expand_dims(missing_stack_dims)
.stack({new_dim: (variable_dim,) + stacking_dims})
)
# concatenate the arrays
stackable_vars = [stack_dataarray(da) for da in self.data_vars.values()]
data_array = concat(
stackable_vars,
dim=new_dim,
data_vars="all",
coords="different",
compat="equals",
join="outer",
)
if name is not None:
data_array.name = name
return data_array
def _unstack_once(
self,
dim: Hashable,
index_and_vars: tuple[Index, dict[Hashable, Variable]],
fill_value,
sparse: bool = False,
) -> Self:
index, index_vars = index_and_vars
variables: dict[Hashable, Variable] = {}
indexes = {k: v for k, v in self._indexes.items() if k != dim}
new_indexes, clean_index = index.unstack()
indexes.update(new_indexes)
for idx in new_indexes.values():
variables.update(idx.create_variables(index_vars))
for name, var in self.variables.items():
if name not in index_vars:
if dim in var.dims:
if isinstance(fill_value, Mapping):
fill_value_ = fill_value.get(name, xrdtypes.NA)
else:
fill_value_ = fill_value
variables[name] = var._unstack_once(
index=clean_index,
dim=dim,
fill_value=fill_value_,
sparse=sparse,
)
else:
variables[name] = var
coord_names = set(self._coord_names) - {dim} | set(new_indexes)
return self._replace_with_new_dims(
variables, coord_names=coord_names, indexes=indexes
)
def _unstack_full_reindex(
self,
dim: Hashable,
index_and_vars: tuple[Index, dict[Hashable, Variable]],
fill_value,
sparse: bool,
) -> Self:
index, index_vars = index_and_vars
variables: dict[Hashable, Variable] = {}
indexes = {k: v for k, v in self._indexes.items() if k != dim}
new_indexes, clean_index = index.unstack()
indexes.update(new_indexes)
new_index_variables = {}
for idx in new_indexes.values():
new_index_variables.update(idx.create_variables(index_vars))
new_dim_sizes = {k: v.size for k, v in new_index_variables.items()}
variables.update(new_index_variables)
# take a shortcut in case the MultiIndex was not modified.
full_idx = pd.MultiIndex.from_product(
clean_index.levels, names=clean_index.names
)
if clean_index.equals(full_idx):
obj = self
else:
# TODO: we may depreciate implicit re-indexing with a pandas.MultiIndex
xr_full_idx = PandasMultiIndex(full_idx, dim)
indexers = Indexes(
dict.fromkeys(index_vars, xr_full_idx),
xr_full_idx.create_variables(index_vars),
)
obj = self._reindex(
indexers, copy=False, fill_value=fill_value, sparse=sparse
)
for name, var in obj.variables.items():
if name not in index_vars:
if dim in var.dims:
variables[name] = var.unstack({dim: new_dim_sizes})
else:
variables[name] = var
coord_names = set(self._coord_names) - {dim} | set(new_dim_sizes)
return self._replace_with_new_dims(
variables, coord_names=coord_names, indexes=indexes
)
def unstack(
self,
dim: Dims = None,
*,
fill_value: Any = xrdtypes.NA,
sparse: bool = False,
) -> Self:
"""
Unstack existing dimensions corresponding to MultiIndexes into
multiple new dimensions.
New dimensions will be added at the end.
Parameters
----------
dim : str, Iterable of Hashable or None, optional
Dimension(s) over which to unstack. By default unstacks all
MultiIndexes.
fill_value : scalar or dict-like, default: nan
value to be filled. If a dict-like, maps variable names to
fill values. If not provided or if the dict-like does not
contain all variables, the dtype's NA value will be used.
sparse : bool, default: False
use sparse-array if True
Returns
-------
unstacked : Dataset
Dataset with unstacked data.
See Also
--------
Dataset.stack
"""
if dim is None:
dims = list(self.dims)
else:
if isinstance(dim, str) or not isinstance(dim, Iterable):
dims = [dim]
else:
dims = list(dim)
missing_dims = set(dims) - set(self.dims)
if missing_dims:
raise ValueError(
f"Dimensions {tuple(missing_dims)} not found in data dimensions {tuple(self.dims)}"
)
# each specified dimension must have exactly one multi-index
stacked_indexes: dict[Any, tuple[Index, dict[Hashable, Variable]]] = {}
for d in dims:
idx, idx_vars = self._get_stack_index(d, multi=True)
if idx is not None:
stacked_indexes[d] = idx, idx_vars
if dim is None:
dims = list(stacked_indexes)
else:
non_multi_dims = set(dims) - set(stacked_indexes)
if non_multi_dims:
raise ValueError(
"cannot unstack dimensions that do not "
f"have exactly one multi-index: {tuple(non_multi_dims)}"
)
result = self.copy(deep=False)
# we want to avoid allocating an object-dtype ndarray for a MultiIndex,
# so we can't just access self.variables[v].data for every variable.
# We only check the non-index variables.
# https://github.com/pydata/xarray/issues/5902
nonindexes = [
self.variables[k] for k in set(self.variables) - set(self._indexes)
]
# Notes for each of these cases:
# 1. Dask arrays don't support assignment by index, which the fast unstack
# function requires.
# https://github.com/pydata/xarray/pull/4746#issuecomment-753282125
# 2. Sparse doesn't currently support (though we could special-case it)
# https://github.com/pydata/sparse/issues/422
# 3. pint requires checking if it's a NumPy array until
# https://github.com/pydata/xarray/pull/4751 is resolved,
# Once that is resolved, explicitly exclude pint arrays.
# pint doesn't implement `np.full_like` in a way that's
# currently compatible.
sparse_array_type = array_type("sparse")
needs_full_reindex = any(
is_duck_dask_array(v.data)
or isinstance(v.data, sparse_array_type)
or not isinstance(v.data, np.ndarray)
for v in nonindexes
)
for d in dims:
if needs_full_reindex:
result = result._unstack_full_reindex(
d, stacked_indexes[d], fill_value, sparse
)
else:
result = result._unstack_once(d, stacked_indexes[d], fill_value, sparse)
return result
def update(self, other: CoercibleMapping) -> None:
"""Update this dataset's variables with those from another dataset.
Just like :py:meth:`dict.update` this is a in-place operation.
For a non-inplace version, see :py:meth:`Dataset.merge`.
Parameters
----------
other : Dataset or mapping
Variables with which to update this dataset. One of:
- Dataset
- mapping {var name: DataArray}
- mapping {var name: Variable}
- mapping {var name: (dimension name, array-like)}
- mapping {var name: (tuple of dimension names, array-like)}
Raises
------
ValueError
If any dimensions would have inconsistent sizes in the updated
dataset.
See Also
--------
Dataset.assign
Dataset.merge
"""
merge_result = dataset_update_method(self, other)
self._replace(inplace=True, **merge_result._asdict())
def merge(
self,
other: CoercibleMapping | DataArray,
overwrite_vars: Hashable | Iterable[Hashable] = frozenset(),
compat: CompatOptions | CombineKwargDefault = _COMPAT_DEFAULT,
join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT,
fill_value: Any = xrdtypes.NA,
combine_attrs: CombineAttrsOptions = "override",
) -> Self:
"""Merge the arrays of two datasets into a single dataset.
This method generally does not allow for overriding data, with the
exception of attributes, which are ignored on the second dataset.
Variables with the same name are checked for conflicts via the equals
or identical methods.
Parameters
----------
other : Dataset or mapping
Dataset or variables to merge with this dataset.
overwrite_vars : hashable or iterable of hashable, optional
If provided, update variables of these name(s) without checking for
conflicts in this dataset.
compat : {"identical", "equals", "broadcast_equals", \
"no_conflicts", "override", "minimal"}, default: "no_conflicts"
String indicating how to compare variables of the same name for
potential conflicts:
- 'identical': all values, dimensions and attributes must be the
same.
- 'equals': all values and dimensions must be the same.
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
- 'override': skip comparing and pick variable from first dataset
- 'minimal': drop conflicting coordinates
join : {"outer", "inner", "left", "right", "exact", "override"}, \
default: "outer"
Method for joining ``self`` and ``other`` along shared dimensions:
- 'outer': use the union of the indexes
- 'inner': use the intersection of the indexes
- 'left': use indexes from ``self``
- 'right': use indexes from ``other``
- 'exact': error instead of aligning non-equal indexes
- 'override': use indexes from ``self`` that are the same size
as those of ``other`` in that dimension
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like, maps
variable names (including coordinates) to fill values.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"} or callable, default: "override"
A callable or a string indicating how to combine attrs of the objects being
merged:
- "drop": empty attrs on returned Dataset.
- "identical": all attrs must be the same on every object.
- "no_conflicts": attrs from all objects are combined, any that have
the same name must also have the same value.
- "drop_conflicts": attrs from all objects are combined, any that have
the same name but different values are dropped.
- "override": skip comparing and copy attrs from the first dataset to
the result.
If a callable, it must expect a sequence of ``attrs`` dicts and a context object
as its only parameters.
Returns
-------
merged : Dataset
Merged dataset.
Raises
------
MergeError
If any variables conflict (see ``compat``).
See Also
--------
Dataset.update
"""
from xarray.core.dataarray import DataArray
other = other.to_dataset() if isinstance(other, DataArray) else other
merge_result = dataset_merge_method(
self,
other,
overwrite_vars=overwrite_vars,
compat=compat,
join=join,
fill_value=fill_value,
combine_attrs=combine_attrs,
)
return self._replace(**merge_result._asdict())
def _assert_all_in_dataset(
self, names: Iterable[Hashable], virtual_okay: bool = False
) -> None:
bad_names = set(names) - set(self._variables)
if virtual_okay:
bad_names -= self.virtual_variables
if bad_names:
ordered_bad_names = [name for name in names if name in bad_names]
raise ValueError(
f"These variables cannot be found in this dataset: {ordered_bad_names}"
)
def drop_vars(
self,
names: str | Iterable[Hashable] | Callable[[Self], str | Iterable[Hashable]],
*,
errors: ErrorOptions = "raise",
) -> Self:
"""Drop variables from this dataset.
Parameters
----------
names : Hashable or iterable of Hashable or Callable
Name(s) of variables to drop. If a Callable, this object is passed as its
only argument and its result is used.
errors : {"raise", "ignore"}, default: "raise"
If 'raise', raises a ValueError error if any of the variable
passed are not in the dataset. If 'ignore', any given names that are in the
dataset are dropped and no error is raised.
Examples
--------
>>> dataset = xr.Dataset(
... {
... "temperature": (
... ["time", "latitude", "longitude"],
... [[[25.5, 26.3], [27.1, 28.0]]],
... ),
... "humidity": (
... ["time", "latitude", "longitude"],
... [[[65.0, 63.8], [58.2, 59.6]]],
... ),
... "wind_speed": (
... ["time", "latitude", "longitude"],
... [[[10.2, 8.5], [12.1, 9.8]]],
... ),
... },
... coords={
... "time": pd.date_range("2023-07-01", periods=1),
... "latitude": [40.0, 40.2],
... "longitude": [-75.0, -74.8],
... },
... )
>>> dataset
<xarray.Dataset> Size: 136B
Dimensions: (time: 1, latitude: 2, longitude: 2)
Coordinates:
* time (time) datetime64[ns] 8B 2023-07-01
* latitude (latitude) float64 16B 40.0 40.2
* longitude (longitude) float64 16B -75.0 -74.8
Data variables:
temperature (time, latitude, longitude) float64 32B 25.5 26.3 27.1 28.0
humidity (time, latitude, longitude) float64 32B 65.0 63.8 58.2 59.6
wind_speed (time, latitude, longitude) float64 32B 10.2 8.5 12.1 9.8
Drop the 'humidity' variable
>>> dataset.drop_vars(["humidity"])
<xarray.Dataset> Size: 104B
Dimensions: (time: 1, latitude: 2, longitude: 2)
Coordinates:
* time (time) datetime64[ns] 8B 2023-07-01
* latitude (latitude) float64 16B 40.0 40.2
* longitude (longitude) float64 16B -75.0 -74.8
Data variables:
temperature (time, latitude, longitude) float64 32B 25.5 26.3 27.1 28.0
wind_speed (time, latitude, longitude) float64 32B 10.2 8.5 12.1 9.8
Drop the 'humidity', 'temperature' variables
>>> dataset.drop_vars(["humidity", "temperature"])
<xarray.Dataset> Size: 72B
Dimensions: (time: 1, latitude: 2, longitude: 2)
Coordinates:
* time (time) datetime64[ns] 8B 2023-07-01
* latitude (latitude) float64 16B 40.0 40.2
* longitude (longitude) float64 16B -75.0 -74.8
Data variables:
wind_speed (time, latitude, longitude) float64 32B 10.2 8.5 12.1 9.8
Drop all indexes
>>> dataset.drop_vars(lambda x: x.indexes)
<xarray.Dataset> Size: 96B
Dimensions: (time: 1, latitude: 2, longitude: 2)
Dimensions without coordinates: time, latitude, longitude
Data variables:
temperature (time, latitude, longitude) float64 32B 25.5 26.3 27.1 28.0
humidity (time, latitude, longitude) float64 32B 65.0 63.8 58.2 59.6
wind_speed (time, latitude, longitude) float64 32B 10.2 8.5 12.1 9.8
Attempt to drop non-existent variable with errors="ignore"
>>> dataset.drop_vars(["pressure"], errors="ignore")
<xarray.Dataset> Size: 136B
Dimensions: (time: 1, latitude: 2, longitude: 2)
Coordinates:
* time (time) datetime64[ns] 8B 2023-07-01
* latitude (latitude) float64 16B 40.0 40.2
* longitude (longitude) float64 16B -75.0 -74.8
Data variables:
temperature (time, latitude, longitude) float64 32B 25.5 26.3 27.1 28.0
humidity (time, latitude, longitude) float64 32B 65.0 63.8 58.2 59.6
wind_speed (time, latitude, longitude) float64 32B 10.2 8.5 12.1 9.8
Attempt to drop non-existent variable with errors="raise"
>>> dataset.drop_vars(["pressure"], errors="raise")
Traceback (most recent call last):
ValueError: These variables cannot be found in this dataset: ['pressure']
Raises
------
ValueError
Raised if you attempt to drop a variable which is not present, and the kwarg ``errors='raise'``.
Returns
-------
dropped : Dataset
See Also
--------
DataArray.drop_vars
"""
if callable(names):
names = names(self)
# the Iterable check is required for mypy
if is_scalar(names) or not isinstance(names, Iterable):
names_set = {names}
else:
names_set = set(names)
if errors == "raise":
self._assert_all_in_dataset(names_set)
# GH6505
other_names = set()
for var in names_set:
maybe_midx = self._indexes.get(var, None)
if isinstance(maybe_midx, PandasMultiIndex):
idx_coord_names = set(list(maybe_midx.index.names) + [maybe_midx.dim])
idx_other_names = idx_coord_names - set(names_set)
other_names.update(idx_other_names)
if other_names:
names_set |= set(other_names)
emit_user_level_warning(
f"Deleting a single level of a MultiIndex is deprecated. Previously, this deleted all levels of a MultiIndex. "
f"Please also drop the following variables: {other_names!r} to avoid an error in the future.",
DeprecationWarning,
)
assert_no_index_corrupted(self.xindexes, names_set)
variables = {k: v for k, v in self._variables.items() if k not in names_set}
coord_names = {k for k in self._coord_names if k in variables}
indexes = {k: v for k, v in self._indexes.items() if k not in names_set}
return self._replace_with_new_dims(
variables, coord_names=coord_names, indexes=indexes
)
def drop_indexes(
self,
coord_names: Hashable | Iterable[Hashable],
*,
errors: ErrorOptions = "raise",
) -> Self:
"""Drop the indexes assigned to the given coordinates.
Parameters
----------
coord_names : hashable or iterable of hashable
Name(s) of the coordinate(s) for which to drop the index.
errors : {"raise", "ignore"}, default: "raise"
If 'raise', raises a ValueError error if any of the coordinates
passed have no index or are not in the dataset.
If 'ignore', no error is raised.
Returns
-------
dropped : Dataset
A new dataset with dropped indexes.
"""
# the Iterable check is required for mypy
if is_scalar(coord_names) or not isinstance(coord_names, Iterable):
coord_names = {coord_names}
else:
coord_names = set(coord_names)
if errors == "raise":
invalid_coords = coord_names - self._coord_names
if invalid_coords:
raise ValueError(
f"The coordinates {tuple(invalid_coords)} are not found in the "
f"dataset coordinates {tuple(self.coords.keys())}"
)
unindexed_coords = set(coord_names) - set(self._indexes)
if unindexed_coords:
raise ValueError(
f"those coordinates do not have an index: {unindexed_coords}"
)
assert_no_index_corrupted(self.xindexes, coord_names, action="remove index(es)")
variables = {}
for name, var in self._variables.items():
if name in coord_names:
variables[name] = var.to_base_variable()
else:
variables[name] = var
indexes = {k: v for k, v in self._indexes.items() if k not in coord_names}
return self._replace(variables=variables, indexes=indexes)
def drop(
self,
labels=None,
dim=None,
*,
errors: ErrorOptions = "raise",
**labels_kwargs,
) -> Self:
"""Backward compatible method based on `drop_vars` and `drop_sel`
Using either `drop_vars` or `drop_sel` is encouraged
See Also
--------
Dataset.drop_vars
Dataset.drop_sel
"""
if errors not in ["raise", "ignore"]:
raise ValueError('errors must be either "raise" or "ignore"')
if is_dict_like(labels) and not isinstance(labels, dict):
emit_user_level_warning(
"dropping coordinates using `drop` is deprecated; use drop_vars.",
DeprecationWarning,
)
return self.drop_vars(labels, errors=errors)
if labels_kwargs or isinstance(labels, dict):
if dim is not None:
raise ValueError("cannot specify dim and dict-like arguments.")
labels = either_dict_or_kwargs(labels, labels_kwargs, "drop")
if dim is None and (is_scalar(labels) or isinstance(labels, Iterable)):
emit_user_level_warning(
"dropping variables using `drop` is deprecated; use drop_vars.",
DeprecationWarning,
)
# for mypy
if is_scalar(labels):
labels = [labels]
return self.drop_vars(labels, errors=errors)
if dim is not None:
warnings.warn(
"dropping labels using list-like labels is deprecated; using "
"dict-like arguments with `drop_sel`, e.g. `ds.drop_sel(dim=[labels]).",
DeprecationWarning,
stacklevel=2,
)
return self.drop_sel({dim: labels}, errors=errors, **labels_kwargs)
emit_user_level_warning(
"dropping labels using `drop` is deprecated; use `drop_sel` instead.",
DeprecationWarning,
)
return self.drop_sel(labels, errors=errors)
def drop_sel(
self, labels=None, *, errors: ErrorOptions = "raise", **labels_kwargs
) -> Self:
"""Drop index labels from this dataset.
Parameters
----------
labels : mapping of hashable to Any
Index labels to drop
errors : {"raise", "ignore"}, default: "raise"
If 'raise', raises a ValueError error if
any of the index labels passed are not
in the dataset. If 'ignore', any given labels that are in the
dataset are dropped and no error is raised.
**labels_kwargs : {dim: label, ...}, optional
The keyword arguments form of ``dim`` and ``labels``
Returns
-------
dropped : Dataset
Examples
--------
>>> data = np.arange(6).reshape(2, 3)
>>> labels = ["a", "b", "c"]
>>> ds = xr.Dataset({"A": (["x", "y"], data), "y": labels})
>>> ds
<xarray.Dataset> Size: 60B
Dimensions: (x: 2, y: 3)
Coordinates:
* y (y) <U1 12B 'a' 'b' 'c'
Dimensions without coordinates: x
Data variables:
A (x, y) int64 48B 0 1 2 3 4 5
>>> ds.drop_sel(y=["a", "c"])
<xarray.Dataset> Size: 20B
Dimensions: (x: 2, y: 1)
Coordinates:
* y (y) <U1 4B 'b'
Dimensions without coordinates: x
Data variables:
A (x, y) int64 16B 1 4
>>> ds.drop_sel(y="b")
<xarray.Dataset> Size: 40B
Dimensions: (x: 2, y: 2)
Coordinates:
* y (y) <U1 8B 'a' 'c'
Dimensions without coordinates: x
Data variables:
A (x, y) int64 32B 0 2 3 5
"""
from xarray.core.dataarray import DataArray
if errors not in ["raise", "ignore"]:
raise ValueError('errors must be either "raise" or "ignore"')
labels = either_dict_or_kwargs(labels, labels_kwargs, "drop_sel")
ds = self
for dim, labels_for_dim in labels.items():
# Don't cast to set, as it would harm performance when labels
# is a large numpy array
if utils.is_scalar(labels_for_dim):
labels_for_dim = [labels_for_dim]
# Most conversion to arrays is better handled in the indexer, however
# DataArrays are a special case where the underlying libraries don't provide
# a good conversition.
if isinstance(labels_for_dim, DataArray):
labels_for_dim = np.asarray(labels_for_dim)
try:
index = self.get_index(dim)
except KeyError as err:
raise ValueError(
f"dimension {dim!r} does not have coordinate labels"
) from err
new_index = index.drop(labels_for_dim, errors=errors)
ds = ds.loc[{dim: new_index}]
return ds
def drop_isel(self, indexers=None, **indexers_kwargs) -> Self:
"""Drop index positions from this Dataset.
Parameters
----------
indexers : mapping of hashable to Any
Index locations to drop
**indexers_kwargs : {dim: position, ...}, optional
The keyword arguments form of ``dim`` and ``positions``
Returns
-------
dropped : Dataset
Raises
------
IndexError
Examples
--------
>>> data = np.arange(6).reshape(2, 3)
>>> labels = ["a", "b", "c"]
>>> ds = xr.Dataset({"A": (["x", "y"], data), "y": labels})
>>> ds
<xarray.Dataset> Size: 60B
Dimensions: (x: 2, y: 3)
Coordinates:
* y (y) <U1 12B 'a' 'b' 'c'
Dimensions without coordinates: x
Data variables:
A (x, y) int64 48B 0 1 2 3 4 5
>>> ds.drop_isel(y=[0, 2])
<xarray.Dataset> Size: 20B
Dimensions: (x: 2, y: 1)
Coordinates:
* y (y) <U1 4B 'b'
Dimensions without coordinates: x
Data variables:
A (x, y) int64 16B 1 4
>>> ds.drop_isel(y=1)
<xarray.Dataset> Size: 40B
Dimensions: (x: 2, y: 2)
Coordinates:
* y (y) <U1 8B 'a' 'c'
Dimensions without coordinates: x
Data variables:
A (x, y) int64 32B 0 2 3 5
"""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "drop_isel")
ds = self
dimension_index = {}
for dim, pos_for_dim in indexers.items():
# Don't cast to set, as it would harm performance when labels
# is a large numpy array
if utils.is_scalar(pos_for_dim):
pos_for_dim = [pos_for_dim]
pos_for_dim = np.asarray(pos_for_dim)
index = self.get_index(dim)
new_index = index.delete(pos_for_dim)
dimension_index[dim] = new_index
ds = ds.loc[dimension_index]
return ds
def drop_dims(
self,
drop_dims: str | Iterable[Hashable],
*,
errors: ErrorOptions = "raise",
) -> Self:
"""Drop dimensions and associated variables from this dataset.
Parameters
----------
drop_dims : str or Iterable of Hashable
Dimension or dimensions to drop.
errors : {"raise", "ignore"}, default: "raise"
If 'raise', raises a ValueError error if any of the
dimensions passed are not in the dataset. If 'ignore', any given
dimensions that are in the dataset are dropped and no error is raised.
Returns
-------
obj : Dataset
The dataset without the given dimensions (or any variables
containing those dimensions).
"""
if errors not in ["raise", "ignore"]:
raise ValueError('errors must be either "raise" or "ignore"')
if isinstance(drop_dims, str) or not isinstance(drop_dims, Iterable):
drop_dims = {drop_dims}
else:
drop_dims = set(drop_dims)
if errors == "raise":
missing_dims = drop_dims - set(self.dims)
if missing_dims:
raise ValueError(
f"Dimensions {tuple(missing_dims)} not found in data dimensions {tuple(self.dims)}"
)
drop_vars = {k for k, v in self._variables.items() if set(v.dims) & drop_dims}
return self.drop_vars(drop_vars)
@deprecate_dims
def transpose(
self,
*dim: Hashable,
missing_dims: ErrorOptionsWithWarn = "raise",
) -> Self:
"""Return a new Dataset object with all array dimensions transposed.
Although the order of dimensions on each array will change, the dataset
dimensions themselves will remain in fixed (sorted) order.
Parameters
----------
*dim : hashable, optional
By default, reverse the dimensions on each array. Otherwise,
reorder the dimensions to this order.
missing_dims : {"raise", "warn", "ignore"}, default: "raise"
What to do if dimensions that should be selected from are not present in the
Dataset:
- "raise": raise an exception
- "warn": raise a warning, and ignore the missing dimensions
- "ignore": ignore the missing dimensions
Returns
-------
transposed : Dataset
Each array in the dataset (including) coordinates will be
transposed to the given order.
Notes
-----
This operation returns a view of each array's data. It is
lazy for dask-backed DataArrays but not for numpy-backed DataArrays
-- the data will be fully loaded into memory.
See Also
--------
numpy.transpose
DataArray.transpose
"""
# Raise error if list is passed as dim
if (len(dim) > 0) and (isinstance(dim[0], list)):
list_fix = [f"{x!r}" if isinstance(x, str) else f"{x}" for x in dim[0]]
raise TypeError(
f"transpose requires dim to be passed as multiple arguments. Expected `{', '.join(list_fix)}`. Received `{dim[0]}` instead"
)
# Use infix_dims to check once for missing dimensions
if len(dim) != 0:
_ = list(infix_dims(dim, self.dims, missing_dims))
ds = self.copy()
for name, var in self._variables.items():
var_dims = tuple(d for d in dim if d in (var.dims + (...,)))
ds._variables[name] = var.transpose(*var_dims)
return ds
def dropna(
self,
dim: Hashable,
*,
how: Literal["any", "all"] = "any",
thresh: int | None = None,
subset: Iterable[Hashable] | None = None,
) -> Self:
"""Returns a new dataset with dropped labels for missing values along
the provided dimension.
Parameters
----------
dim : hashable
Dimension along which to drop missing values. Dropping along
multiple dimensions simultaneously is not yet supported.
how : {"any", "all"}, default: "any"
- any : if any NA values are present, drop that label
- all : if all values are NA, drop that label
thresh : int or None, optional
If supplied, require this many non-NA values (summed over all the subset variables).
subset : iterable of hashable or None, optional
Which variables to check for missing values. By default, all
variables in the dataset are checked.
Examples
--------
>>> dataset = xr.Dataset(
... {
... "temperature": (
... ["time", "location"],
... [[23.4, 24.1], [np.nan, 22.1], [21.8, 24.2], [20.5, 25.3]],
... )
... },
... coords={"time": [1, 2, 3, 4], "location": ["A", "B"]},
... )
>>> dataset
<xarray.Dataset> Size: 104B
Dimensions: (time: 4, location: 2)
Coordinates:
* time (time) int64 32B 1 2 3 4
* location (location) <U1 8B 'A' 'B'
Data variables:
temperature (time, location) float64 64B 23.4 24.1 nan ... 24.2 20.5 25.3
Drop NaN values from the dataset
>>> dataset.dropna(dim="time")
<xarray.Dataset> Size: 80B
Dimensions: (time: 3, location: 2)
Coordinates:
* time (time) int64 24B 1 3 4
* location (location) <U1 8B 'A' 'B'
Data variables:
temperature (time, location) float64 48B 23.4 24.1 21.8 24.2 20.5 25.3
Drop labels with any NaN values
>>> dataset.dropna(dim="time", how="any")
<xarray.Dataset> Size: 80B
Dimensions: (time: 3, location: 2)
Coordinates:
* time (time) int64 24B 1 3 4
* location (location) <U1 8B 'A' 'B'
Data variables:
temperature (time, location) float64 48B 23.4 24.1 21.8 24.2 20.5 25.3
Drop labels with all NAN values
>>> dataset.dropna(dim="time", how="all")
<xarray.Dataset> Size: 104B
Dimensions: (time: 4, location: 2)
Coordinates:
* time (time) int64 32B 1 2 3 4
* location (location) <U1 8B 'A' 'B'
Data variables:
temperature (time, location) float64 64B 23.4 24.1 nan ... 24.2 20.5 25.3
Drop labels with less than 2 non-NA values
>>> dataset.dropna(dim="time", thresh=2)
<xarray.Dataset> Size: 80B
Dimensions: (time: 3, location: 2)
Coordinates:
* time (time) int64 24B 1 3 4
* location (location) <U1 8B 'A' 'B'
Data variables:
temperature (time, location) float64 48B 23.4 24.1 21.8 24.2 20.5 25.3
Returns
-------
Dataset
See Also
--------
DataArray.dropna
pandas.DataFrame.dropna
"""
# TODO: consider supporting multiple dimensions? Or not, given that
# there are some ugly edge cases, e.g., pandas's dropna differs
# depending on the order of the supplied axes.
if dim not in self.dims:
raise ValueError(
f"Dimension {dim!r} not found in data dimensions {tuple(self.dims)}"
)
if subset is None:
subset = iter(self.data_vars)
count = np.zeros(self.sizes[dim], dtype=np.int64)
size = np.int_(0) # for type checking
for k in subset:
array = self._variables[k]
if dim in array.dims:
dims = [d for d in array.dims if d != dim]
count += to_numpy(array.count(dims).data)
size += math.prod([self.sizes[d] for d in dims])
if thresh is not None:
mask = count >= thresh
elif how == "any":
mask = count == size
elif how == "all":
mask = count > 0
elif how is not None:
raise ValueError(f"invalid how option: {how}")
else:
raise TypeError("must specify how or thresh")
return self.isel({dim: mask})
def fillna(self, value: Any) -> Self:
"""Fill missing values in this object.
This operation follows the normal broadcasting and alignment rules that
xarray uses for binary arithmetic, except the result is aligned to this
object (``join='left'``) instead of aligned to the intersection of
index coordinates (``join='inner'``).
Parameters
----------
value : scalar, ndarray, DataArray, dict or Dataset
Used to fill all matching missing values in this dataset's data
variables. Scalars, ndarrays or DataArrays arguments are used to
fill all data with aligned coordinates (for DataArrays).
Dictionaries or datasets match data variables and then align
coordinates if necessary.
Returns
-------
Dataset
Examples
--------
>>> ds = xr.Dataset(
... {
... "A": ("x", [np.nan, 2, np.nan, 0]),
... "B": ("x", [3, 4, np.nan, 1]),
... "C": ("x", [np.nan, np.nan, np.nan, 5]),
... "D": ("x", [np.nan, 3, np.nan, 4]),
... },
... coords={"x": [0, 1, 2, 3]},
... )
>>> ds
<xarray.Dataset> Size: 160B
Dimensions: (x: 4)
Coordinates:
* x (x) int64 32B 0 1 2 3
Data variables:
A (x) float64 32B nan 2.0 nan 0.0
B (x) float64 32B 3.0 4.0 nan 1.0
C (x) float64 32B nan nan nan 5.0
D (x) float64 32B nan 3.0 nan 4.0
Replace all `NaN` values with 0s.
>>> ds.fillna(0)
<xarray.Dataset> Size: 160B
Dimensions: (x: 4)
Coordinates:
* x (x) int64 32B 0 1 2 3
Data variables:
A (x) float64 32B 0.0 2.0 0.0 0.0
B (x) float64 32B 3.0 4.0 0.0 1.0
C (x) float64 32B 0.0 0.0 0.0 5.0
D (x) float64 32B 0.0 3.0 0.0 4.0
Replace all `NaN` elements in column ‘A’, ‘B’, ‘C’, and ‘D’, with 0, 1, 2, and 3 respectively.
>>> values = {"A": 0, "B": 1, "C": 2, "D": 3}
>>> ds.fillna(value=values)
<xarray.Dataset> Size: 160B
Dimensions: (x: 4)
Coordinates:
* x (x) int64 32B 0 1 2 3
Data variables:
A (x) float64 32B 0.0 2.0 0.0 0.0
B (x) float64 32B 3.0 4.0 1.0 1.0
C (x) float64 32B 2.0 2.0 2.0 5.0
D (x) float64 32B 3.0 3.0 3.0 4.0
"""
if utils.is_dict_like(value):
value_keys = getattr(value, "data_vars", value).keys()
if not set(value_keys) <= set(self.data_vars.keys()):
raise ValueError(
"all variables in the argument to `fillna` "
"must be contained in the original dataset"
)
out = ops.fillna(self, value)
return out
def interpolate_na(
self,
dim: Hashable | None = None,
method: InterpOptions = "linear",
limit: int | None = None,
use_coordinate: bool | Hashable = True,
max_gap: (
int
| float
| str
| pd.Timedelta
| np.timedelta64
| datetime.timedelta
| None
) = None,
**kwargs: Any,
) -> Self:
"""Fill in NaNs by interpolating according to different methods.
Parameters
----------
dim : Hashable or None, optional
Specifies the dimension along which to interpolate.
method : {"linear", "nearest", "zero", "slinear", "quadratic", "cubic", "polynomial", \
"barycentric", "krogh", "pchip", "spline", "akima"}, default: "linear"
String indicating which method to use for interpolation:
- 'linear': linear interpolation. Additional keyword
arguments are passed to :py:func:`numpy.interp`
- 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'polynomial':
are passed to :py:func:`scipy.interpolate.interp1d`. If
``method='polynomial'``, the ``order`` keyword argument must also be
provided.
- 'barycentric', 'krogh', 'pchip', 'spline', 'akima': use their
respective :py:class:`scipy.interpolate` classes.
use_coordinate : bool or Hashable, default: True
Specifies which index to use as the x values in the interpolation
formulated as `y = f(x)`. If False, values are treated as if
equally-spaced along ``dim``. If True, the IndexVariable `dim` is
used. If ``use_coordinate`` is a string, it specifies the name of a
coordinate variable to use as the index.
limit : int, default: None
Maximum number of consecutive NaNs to fill. Must be greater than 0
or None for no limit. This filling is done regardless of the size of
the gap in the data. To only interpolate over gaps less than a given length,
see ``max_gap``.
max_gap : int, float, str, pandas.Timedelta, numpy.timedelta64, datetime.timedelta \
or None, default: None
Maximum size of gap, a continuous sequence of NaNs, that will be filled.
Use None for no limit. When interpolating along a datetime64 dimension
and ``use_coordinate=True``, ``max_gap`` can be one of the following:
- a string that is valid input for pandas.to_timedelta
- a :py:class:`numpy.timedelta64` object
- a :py:class:`pandas.Timedelta` object
- a :py:class:`datetime.timedelta` object
Otherwise, ``max_gap`` must be an int or a float. Use of ``max_gap`` with unlabeled
dimensions has not been implemented yet. Gap length is defined as the difference
between coordinate values at the first data point after a gap and the last value
before a gap. For gaps at the beginning (end), gap length is defined as the difference
between coordinate values at the first (last) valid data point and the first (last) NaN.
For example, consider::
<xarray.DataArray (x: 9)>
array([nan, nan, nan, 1., nan, nan, 4., nan, nan])
Coordinates:
* x (x) int64 0 1 2 3 4 5 6 7 8
The gap lengths are 3-0 = 3; 6-3 = 3; and 8-6 = 2 respectively
**kwargs : dict, optional
parameters passed verbatim to the underlying interpolation function
Returns
-------
interpolated: Dataset
Filled in Dataset.
Warning
--------
When passing fill_value as a keyword argument with method="linear", it does not use
``numpy.interp`` but it uses ``scipy.interpolate.interp1d``, which provides the fill_value parameter.
See Also
--------
numpy.interp
scipy.interpolate
Examples
--------
>>> ds = xr.Dataset(
... {
... "A": ("x", [np.nan, 2, 3, np.nan, 0]),
... "B": ("x", [3, 4, np.nan, 1, 7]),
... "C": ("x", [np.nan, np.nan, np.nan, 5, 0]),
... "D": ("x", [np.nan, 3, np.nan, -1, 4]),
... },
... coords={"x": [0, 1, 2, 3, 4]},
... )
>>> ds
<xarray.Dataset> Size: 200B
Dimensions: (x: 5)
Coordinates:
* x (x) int64 40B 0 1 2 3 4
Data variables:
A (x) float64 40B nan 2.0 3.0 nan 0.0
B (x) float64 40B 3.0 4.0 nan 1.0 7.0
C (x) float64 40B nan nan nan 5.0 0.0
D (x) float64 40B nan 3.0 nan -1.0 4.0
>>> ds.interpolate_na(dim="x", method="linear")
<xarray.Dataset> Size: 200B
Dimensions: (x: 5)
Coordinates:
* x (x) int64 40B 0 1 2 3 4
Data variables:
A (x) float64 40B nan 2.0 3.0 1.5 0.0
B (x) float64 40B 3.0 4.0 2.5 1.0 7.0
C (x) float64 40B nan nan nan 5.0 0.0
D (x) float64 40B nan 3.0 1.0 -1.0 4.0
>>> ds.interpolate_na(dim="x", method="linear", fill_value="extrapolate")
<xarray.Dataset> Size: 200B
Dimensions: (x: 5)
Coordinates:
* x (x) int64 40B 0 1 2 3 4
Data variables:
A (x) float64 40B 1.0 2.0 3.0 1.5 0.0
B (x) float64 40B 3.0 4.0 2.5 1.0 7.0
C (x) float64 40B 20.0 15.0 10.0 5.0 0.0
D (x) float64 40B 5.0 3.0 1.0 -1.0 4.0
"""
from xarray.core.missing import _apply_over_vars_with_dim, interp_na
new = _apply_over_vars_with_dim(
interp_na,
self,
dim=dim,
method=method,
limit=limit,
use_coordinate=use_coordinate,
max_gap=max_gap,
**kwargs,
)
return new
def ffill(self, dim: Hashable, limit: int | None = None) -> Self:
"""Fill NaN values by propagating values forward
*Requires bottleneck.*
Parameters
----------
dim : Hashable
Specifies the dimension along which to propagate values when filling.
limit : int or None, optional
The maximum number of consecutive NaN values to forward fill. In
other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. Must be greater
than 0 or None for no limit. Must be None or greater than or equal
to axis length if filling along chunked axes (dimensions).
Examples
--------
>>> time = pd.date_range("2023-01-01", periods=10, freq="D")
>>> data = np.array(
... [1, np.nan, np.nan, np.nan, 5, np.nan, np.nan, 8, np.nan, 10]
... )
>>> dataset = xr.Dataset({"data": (("time",), data)}, coords={"time": time})
>>> dataset
<xarray.Dataset> Size: 160B
Dimensions: (time: 10)
Coordinates:
* time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10
Data variables:
data (time) float64 80B 1.0 nan nan nan 5.0 nan nan 8.0 nan 10.0
# Perform forward fill (ffill) on the dataset
>>> dataset.ffill(dim="time")
<xarray.Dataset> Size: 160B
Dimensions: (time: 10)
Coordinates:
* time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10
Data variables:
data (time) float64 80B 1.0 1.0 1.0 1.0 5.0 5.0 5.0 8.0 8.0 10.0
# Limit the forward filling to a maximum of 2 consecutive NaN values
>>> dataset.ffill(dim="time", limit=2)
<xarray.Dataset> Size: 160B
Dimensions: (time: 10)
Coordinates:
* time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10
Data variables:
data (time) float64 80B 1.0 1.0 1.0 nan 5.0 5.0 5.0 8.0 8.0 10.0
Returns
-------
Dataset
See Also
--------
Dataset.bfill
"""
from xarray.core.missing import _apply_over_vars_with_dim, ffill
new = _apply_over_vars_with_dim(ffill, self, dim=dim, limit=limit)
return new
def bfill(self, dim: Hashable, limit: int | None = None) -> Self:
"""Fill NaN values by propagating values backward
*Requires bottleneck.*
Parameters
----------
dim : Hashable
Specifies the dimension along which to propagate values when
filling.
limit : int or None, optional
The maximum number of consecutive NaN values to backward fill. In
other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. Must be greater
than 0 or None for no limit. Must be None or greater than or equal
to axis length if filling along chunked axes (dimensions).
Examples
--------
>>> time = pd.date_range("2023-01-01", periods=10, freq="D")
>>> data = np.array(
... [1, np.nan, np.nan, np.nan, 5, np.nan, np.nan, 8, np.nan, 10]
... )
>>> dataset = xr.Dataset({"data": (("time",), data)}, coords={"time": time})
>>> dataset
<xarray.Dataset> Size: 160B
Dimensions: (time: 10)
Coordinates:
* time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10
Data variables:
data (time) float64 80B 1.0 nan nan nan 5.0 nan nan 8.0 nan 10.0
# filled dataset, fills NaN values by propagating values backward
>>> dataset.bfill(dim="time")
<xarray.Dataset> Size: 160B
Dimensions: (time: 10)
Coordinates:
* time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10
Data variables:
data (time) float64 80B 1.0 5.0 5.0 5.0 5.0 8.0 8.0 8.0 10.0 10.0
# Limit the backward filling to a maximum of 2 consecutive NaN values
>>> dataset.bfill(dim="time", limit=2)
<xarray.Dataset> Size: 160B
Dimensions: (time: 10)
Coordinates:
* time (time) datetime64[ns] 80B 2023-01-01 2023-01-02 ... 2023-01-10
Data variables:
data (time) float64 80B 1.0 nan 5.0 5.0 5.0 8.0 8.0 8.0 10.0 10.0
Returns
-------
Dataset
See Also
--------
Dataset.ffill
"""
from xarray.core.missing import _apply_over_vars_with_dim, bfill
new = _apply_over_vars_with_dim(bfill, self, dim=dim, limit=limit)
return new
def combine_first(self, other: Self) -> Self:
"""Combine two Datasets, default to data_vars of self.
The new coordinates follow the normal broadcasting and alignment rules
of ``join='outer'``. Vacant cells in the expanded coordinates are
filled with np.nan.
Parameters
----------
other : Dataset
Used to fill all matching missing values in this array.
Returns
-------
Dataset
"""
out = ops.fillna(self, other, join="outer", dataset_join="outer")
return out
def reduce(
self,
func: Callable,
dim: Dims = None,
*,
keep_attrs: bool | None = None,
keepdims: bool = False,
numeric_only: bool = False,
**kwargs: Any,
) -> Self:
"""Reduce this dataset by applying `func` along some dimension(s).
Parameters
----------
func : callable
Function which can be called in the form
`f(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim : str, Iterable of Hashable or None, optional
Dimension(s) over which to apply `func`. By default `func` is
applied over all dimensions.
keep_attrs : bool or None, optional
If True (default), the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False, the new
object will be returned without attributes.
keepdims : bool, default: False
If True, the dimensions which are reduced are left in the result
as dimensions of size one. Coordinates that use these dimensions
are removed.
numeric_only : bool, default: False
If True, only apply ``func`` to variables with a numeric dtype.
**kwargs : Any
Additional keyword arguments passed on to ``func``.
Returns
-------
reduced : Dataset
Dataset with this object's DataArrays replaced with new DataArrays
of summarized data and the indicated dimension(s) removed.
Examples
--------
>>> dataset = xr.Dataset(
... {
... "math_scores": (
... ["student", "test"],
... [[90, 85, 92], [78, 80, 85], [95, 92, 98]],
... ),
... "english_scores": (
... ["student", "test"],
... [[88, 90, 92], [75, 82, 79], [93, 96, 91]],
... ),
... },
... coords={
... "student": ["Alice", "Bob", "Charlie"],
... "test": ["Test 1", "Test 2", "Test 3"],
... },
... )
# Calculate the 75th percentile of math scores for each student using np.percentile
>>> percentile_scores = dataset.reduce(np.percentile, q=75, dim="test")
>>> percentile_scores
<xarray.Dataset> Size: 132B
Dimensions: (student: 3)
Coordinates:
* student (student) <U7 84B 'Alice' 'Bob' 'Charlie'
Data variables:
math_scores (student) float64 24B 91.0 82.5 96.5
english_scores (student) float64 24B 91.0 80.5 94.5
"""
if kwargs.get("axis") is not None:
raise ValueError(
"passing 'axis' to Dataset reduce methods is ambiguous."
" Please use 'dim' instead."
)
dims = parse_dims_as_set(dim, set(self._dims.keys()))
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=True)
variables: dict[Hashable, Variable] = {}
for name, var in self._variables.items():
reduce_dims = [d for d in var.dims if d in dims]
if name in self.coords:
if not reduce_dims:
variables[name] = var
elif (
# Some reduction functions (e.g. std, var) need to run on variables
# that don't have the reduce dims: PR5393
not pd.api.types.is_extension_array_dtype(var.dtype) # noqa: TID251
and (
not reduce_dims
or not numeric_only
or _is_numeric_aggregatable_dtype(var)
)
):
# prefer to aggregate over axis=None rather than
# axis=(0, 1) if they will be equivalent, because
# the former is often more efficient
# keep single-element dims as list, to support Hashables
reduce_maybe_single = (
None
if len(reduce_dims) == var.ndim and var.ndim != 1
else reduce_dims
)
variables[name] = var.reduce(
func,
dim=reduce_maybe_single,
keep_attrs=keep_attrs,
keepdims=keepdims,
**kwargs,
)
coord_names = {k for k in self.coords if k in variables}
indexes = {k: v for k, v in self._indexes.items() if k in variables}
attrs = self.attrs if keep_attrs else None
return self._replace_with_new_dims(
variables, coord_names=coord_names, attrs=attrs, indexes=indexes
)
def map(
self,
func: Callable,
keep_attrs: bool | None = None,
args: Iterable[Any] = (),
**kwargs: Any,
) -> Self:
"""Apply a function to each data variable in this dataset
Parameters
----------
func : callable
Function which can be called in the form `func(x, *args, **kwargs)`
to transform each DataArray `x` in this dataset into another
DataArray.
keep_attrs : bool or None, optional
If True, both the dataset's and variables' attributes (`attrs`) will be
copied from the original objects to the new ones. If False, the new dataset
and variables will be returned without copying the attributes.
args : iterable, optional
Positional arguments passed on to `func`.
**kwargs : Any
Keyword arguments passed on to `func`.
Returns
-------
applied : Dataset
Resulting dataset from applying ``func`` to each data variable.
Examples
--------
>>> da = xr.DataArray(np.random.randn(2, 3))
>>> ds = xr.Dataset({"foo": da, "bar": ("x", [-1, 2])})
>>> ds
<xarray.Dataset> Size: 64B
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Dimensions without coordinates: dim_0, dim_1, x
Data variables:
foo (dim_0, dim_1) float64 48B 1.764 0.4002 0.9787 2.241 1.868 -0.9773
bar (x) int64 16B -1 2
>>> ds.map(np.fabs)
<xarray.Dataset> Size: 64B
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Dimensions without coordinates: dim_0, dim_1, x
Data variables:
foo (dim_0, dim_1) float64 48B 1.764 0.4002 0.9787 2.241 1.868 0.9773
bar (x) float64 16B 1.0 2.0
"""
from xarray.core.dataarray import DataArray
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=True)
variables = {
k: maybe_wrap_array(v, func(v, *args, **kwargs))
for k, v in self.data_vars.items()
}
# Convert non-DataArray values to DataArrays
variables = {
k: v if isinstance(v, DataArray) else DataArray(v)
for k, v in variables.items()
}
coord_vars, indexes = merge_coordinates_without_align(
[v.coords for v in variables.values()]
)
coords = Coordinates._construct_direct(coords=coord_vars, indexes=indexes)
if keep_attrs:
for k, v in variables.items():
v._copy_attrs_from(self.data_vars[k])
for k, v in coords.items():
if k in self.coords:
v._copy_attrs_from(self.coords[k])
else:
for v in variables.values():
v.attrs = {}
for v in coords.values():
v.attrs = {}
attrs = self.attrs if keep_attrs else None
return type(self)(variables, coords=coords, attrs=attrs)
def apply(
self,
func: Callable,
keep_attrs: bool | None = None,
args: Iterable[Any] = (),
**kwargs: Any,
) -> Self:
"""
Backward compatible implementation of ``map``
See Also
--------
Dataset.map
"""
warnings.warn(
"Dataset.apply may be deprecated in the future. Using Dataset.map is encouraged",
PendingDeprecationWarning,
stacklevel=2,
)
return self.map(func, keep_attrs, args, **kwargs)
def assign(
self,
variables: Mapping[Any, Any] | None = None,
**variables_kwargs: Any,
) -> Self:
"""Assign new data variables to a Dataset, returning a new object
with all the original variables in addition to the new ones.
Parameters
----------
variables : mapping of hashable to Any
Mapping from variables names to the new values. If the new values
are callable, they are computed on the Dataset and assigned to new
data variables. If the values are not callable, (e.g. a DataArray,
scalar, or array), they are simply assigned.
**variables_kwargs
The keyword arguments form of ``variables``.
One of variables or variables_kwargs must be provided.
Returns
-------
ds : Dataset
A new Dataset with the new variables in addition to all the
existing variables.
Notes
-----
Since ``kwargs`` is a dictionary, the order of your arguments may not
be preserved, and so the order of the new variables is not well
defined. Assigning multiple variables within the same ``assign`` is
possible, but you cannot reference other variables created within the
same ``assign`` call.
The new assigned variables that replace existing coordinates in the
original dataset are still listed as coordinates in the returned
Dataset.
See Also
--------
pandas.DataFrame.assign
Examples
--------
>>> x = xr.Dataset(
... {
... "temperature_c": (
... ("lat", "lon"),
... 20 * np.random.rand(4).reshape(2, 2),
... ),
... "precipitation": (("lat", "lon"), np.random.rand(4).reshape(2, 2)),
... },
... coords={"lat": [10, 20], "lon": [150, 160]},
... )
>>> x
<xarray.Dataset> Size: 96B
Dimensions: (lat: 2, lon: 2)
Coordinates:
* lat (lat) int64 16B 10 20
* lon (lon) int64 16B 150 160
Data variables:
temperature_c (lat, lon) float64 32B 10.98 14.3 12.06 10.9
precipitation (lat, lon) float64 32B 0.4237 0.6459 0.4376 0.8918
Where the value is a callable, evaluated on dataset:
>>> x.assign(temperature_f=lambda x: x.temperature_c * 9 / 5 + 32)
<xarray.Dataset> Size: 128B
Dimensions: (lat: 2, lon: 2)
Coordinates:
* lat (lat) int64 16B 10 20
* lon (lon) int64 16B 150 160
Data variables:
temperature_c (lat, lon) float64 32B 10.98 14.3 12.06 10.9
precipitation (lat, lon) float64 32B 0.4237 0.6459 0.4376 0.8918
temperature_f (lat, lon) float64 32B 51.76 57.75 53.7 51.62
Alternatively, the same behavior can be achieved by directly referencing an existing dataarray:
>>> x.assign(temperature_f=x["temperature_c"] * 9 / 5 + 32)
<xarray.Dataset> Size: 128B
Dimensions: (lat: 2, lon: 2)
Coordinates:
* lat (lat) int64 16B 10 20
* lon (lon) int64 16B 150 160
Data variables:
temperature_c (lat, lon) float64 32B 10.98 14.3 12.06 10.9
precipitation (lat, lon) float64 32B 0.4237 0.6459 0.4376 0.8918
temperature_f (lat, lon) float64 32B 51.76 57.75 53.7 51.62
"""
variables = either_dict_or_kwargs(variables, variables_kwargs, "assign")
data = self.copy()
# do all calculations first...
results: CoercibleMapping = data._calc_assign_results(variables)
# split data variables to add/replace vs. coordinates to replace
results_data_vars: dict[Hashable, CoercibleValue] = {}
results_coords: dict[Hashable, CoercibleValue] = {}
for k, v in results.items():
if k in data._coord_names:
results_coords[k] = v
else:
results_data_vars[k] = v
# ... and then assign
data.coords.update(results_coords)
data.update(results_data_vars)
return data
def to_dataarray(
self, dim: Hashable = "variable", name: Hashable | None = None
) -> DataArray:
"""Convert this dataset into an xarray.DataArray
The data variables of this dataset will be broadcast against each other
and stacked along the first axis of the new array. All coordinates of
this dataset will remain coordinates.
Parameters
----------
dim : Hashable, default: "variable"
Name of the new dimension.
name : Hashable or None, optional
Name of the new data array.
Returns
-------
array : xarray.DataArray
"""
from xarray.core.dataarray import DataArray
data_vars = [self.variables[k] for k in self.data_vars]
broadcast_vars = broadcast_variables(*data_vars)
data = duck_array_ops.stack([b.data for b in broadcast_vars], axis=0)
dims = (dim,) + broadcast_vars[0].dims
variable = Variable(dims, data, self.attrs, fastpath=True)
coords = {k: v.variable for k, v in self.coords.items()}
indexes = filter_indexes_from_coords(self._indexes, set(coords))
new_dim_index = PandasIndex(list(self.data_vars), dim)
indexes[dim] = new_dim_index
coords.update(new_dim_index.create_variables())
return DataArray._construct_direct(variable, coords, name, indexes)
def to_array(
self, dim: Hashable = "variable", name: Hashable | None = None
) -> DataArray:
"""Deprecated version of to_dataarray"""
return self.to_dataarray(dim=dim, name=name)
def _normalize_dim_order(
self, dim_order: Sequence[Hashable] | None = None
) -> dict[Hashable, int]:
"""
Check the validity of the provided dimensions if any and return the mapping
between dimension name and their size.
Parameters
----------
dim_order: Sequence of Hashable or None, optional
Dimension order to validate (default to the alphabetical order if None).
Returns
-------
result : dict[Hashable, int]
Validated dimensions mapping.
"""
if dim_order is None:
dim_order = list(self.dims)
elif set(dim_order) != set(self.dims):
raise ValueError(
f"dim_order {dim_order} does not match the set of dimensions of this "
f"Dataset: {list(self.dims)}"
)
ordered_dims = {k: self.sizes[k] for k in dim_order}
return ordered_dims
def to_pandas(self) -> pd.Series | pd.DataFrame:
"""Convert this dataset into a pandas object without changing the number of dimensions.
The type of the returned object depends on the number of Dataset
dimensions:
* 0D -> `pandas.Series`
* 1D -> `pandas.DataFrame`
Only works for Datasets with 1 or fewer dimensions.
"""
if len(self.dims) == 0:
return pd.Series({k: v.item() for k, v in self.items()})
if len(self.dims) == 1:
return self.to_dataframe()
raise ValueError(
f"cannot convert Datasets with {len(self.dims)} dimensions into "
"pandas objects without changing the number of dimensions. "
"Please use Dataset.to_dataframe() instead."
)
def _to_dataframe(self, ordered_dims: Mapping[Any, int]):
from xarray.core.extension_array import PandasExtensionArray
# All and only non-index arrays (whether data or coordinates) should
# become columns in the output DataFrame. Excluding indexes rather
# than dims handles the case of a MultiIndex along a single dimension.
columns_in_order = [k for k in self.variables if k not in self.xindexes]
non_extension_array_columns = [
k
for k in columns_in_order
if not pd.api.types.is_extension_array_dtype(self.variables[k].data) # noqa: TID251
]
extension_array_columns = [
k
for k in columns_in_order
if pd.api.types.is_extension_array_dtype(self.variables[k].data) # noqa: TID251
]
extension_array_columns_different_index = [
k
for k in extension_array_columns
if set(self.variables[k].dims) != set(ordered_dims.keys())
]
extension_array_columns_same_index = [
k
for k in extension_array_columns
if k not in extension_array_columns_different_index
]
data = [
self._variables[k].set_dims(ordered_dims).values.reshape(-1)
for k in non_extension_array_columns
]
index = self.coords.to_index([*ordered_dims])
broadcasted_df = pd.DataFrame(
{
**dict(zip(non_extension_array_columns, data, strict=True)),
**{
c: self.variables[c].data
for c in extension_array_columns_same_index
},
},
index=index,
)
for extension_array_column in extension_array_columns_different_index:
extension_array = self.variables[extension_array_column].data
index = self[
self.variables[extension_array_column].dims[0]
].coords.to_index()
extension_array_df = pd.DataFrame(
{extension_array_column: extension_array},
index=pd.Index(index.array)
if isinstance(index, PandasExtensionArray) # type: ignore[redundant-expr]
else index,
)
extension_array_df.index.name = self.variables[extension_array_column].dims[
0
]
broadcasted_df = broadcasted_df.join(extension_array_df)
return broadcasted_df[columns_in_order]
def to_dataframe(self, dim_order: Sequence[Hashable] | None = None) -> pd.DataFrame:
"""Convert this dataset into a pandas.DataFrame.
Non-index variables in this dataset form the columns of the
DataFrame. The DataFrame is indexed by the Cartesian product of
this dataset's indices.
Parameters
----------
dim_order: Sequence of Hashable or None, optional
Hierarchical dimension order for the resulting dataframe. All
arrays are transposed to this order and then written out as flat
vectors in contiguous order, so the last dimension in this list
will be contiguous in the resulting DataFrame. This has a major
influence on which operations are efficient on the resulting
dataframe.
If provided, must include all dimensions of this dataset. By
default, dimensions are in the same order as in `Dataset.sizes`.
Returns
-------
result : DataFrame
Dataset as a pandas DataFrame.
"""
ordered_dims = self._normalize_dim_order(dim_order=dim_order)
return self._to_dataframe(ordered_dims=ordered_dims)
def _set_sparse_data_from_dataframe(
self, idx: pd.Index, arrays: list[tuple[Hashable, np.ndarray]], dims: tuple
) -> None:
from sparse import COO
if isinstance(idx, pd.MultiIndex):
coords = np.stack([np.asarray(code) for code in idx.codes], axis=0)
is_sorted = idx.is_monotonic_increasing
shape = tuple(lev.size for lev in idx.levels)
else:
coords = np.arange(idx.size).reshape(1, -1)
is_sorted = True
shape = (idx.size,)
for name, values in arrays:
# In virtually all real use cases, the sparse array will now have
# missing values and needs a fill_value. For consistency, don't
# special case the rare exceptions (e.g., dtype=int without a
# MultiIndex).
dtype, fill_value = xrdtypes.maybe_promote(values.dtype)
values = np.asarray(values, dtype=dtype)
data = COO(
coords,
values,
shape,
has_duplicates=False,
sorted=is_sorted,
fill_value=fill_value,
)
self[name] = (dims, data)
def _set_numpy_data_from_dataframe(
self, idx: pd.Index, arrays: list[tuple[Hashable, np.ndarray]], dims: tuple
) -> None:
if not isinstance(idx, pd.MultiIndex):
for name, values in arrays:
self[name] = (dims, values)
return
# NB: similar, more general logic, now exists in
# variable.unstack_once; we could consider combining them at some
# point.
shape = tuple(lev.size for lev in idx.levels)
indexer = tuple(idx.codes)
# We already verified that the MultiIndex has all unique values, so
# there are missing values if and only if the size of output arrays is
# larger that the index.
missing_values = math.prod(shape) > idx.shape[0]
for name, values in arrays:
# NumPy indexing is much faster than using DataFrame.reindex() to
# fill in missing values:
# https://stackoverflow.com/a/35049899/809705
if missing_values:
dtype, fill_value = xrdtypes.maybe_promote(values.dtype)
data = np.full(shape, fill_value, dtype)
else:
# If there are no missing values, keep the existing dtype
# instead of promoting to support NA, e.g., keep integer
# columns as integers.
# TODO: consider removing this special case, which doesn't
# exist for sparse=True.
data = np.zeros(shape, values.dtype)
data[indexer] = values
self[name] = (dims, data)
@classmethod
def from_dataframe(cls, dataframe: pd.DataFrame, sparse: bool = False) -> Self:
"""Convert a pandas.DataFrame into an xarray.Dataset
Each column will be converted into an independent variable in the
Dataset. If the dataframe's index is a MultiIndex, it will be expanded
into a tensor product of one-dimensional indices (filling in missing
values with NaN). If you rather preserve the MultiIndex use
`xr.Dataset(df)`. This method will produce a Dataset very similar to
that on which the 'to_dataframe' method was called, except with
possibly redundant dimensions (since all dataset variables will have
the same dimensionality).
Parameters
----------
dataframe : DataFrame
DataFrame from which to copy data and indices.
sparse : bool, default: False
If true, create a sparse arrays instead of dense numpy arrays. This
can potentially save a large amount of memory if the DataFrame has
a MultiIndex. Requires the sparse package (sparse.pydata.org).
Returns
-------
New Dataset.
See Also
--------
xarray.DataArray.from_series
pandas.DataFrame.to_xarray
"""
# TODO: Add an option to remove dimensions along which the variables
# are constant, to enable consistent serialization to/from a dataframe,
# even if some variables have different dimensionality.
if not dataframe.columns.is_unique:
raise ValueError("cannot convert DataFrame with non-unique columns")
idx = remove_unused_levels_categories(dataframe.index)
if isinstance(idx, pd.MultiIndex) and not idx.is_unique:
raise ValueError(
"cannot convert a DataFrame with a non-unique MultiIndex into xarray"
)
arrays = []
extension_arrays = []
for k, v in dataframe.items():
if not is_allowed_extension_array(v) or isinstance(
v.array, UNSUPPORTED_EXTENSION_ARRAY_TYPES
):
arrays.append((k, np.asarray(v)))
else:
extension_arrays.append((k, v))
indexes: dict[Hashable, Index] = {}
index_vars: dict[Hashable, Variable] = {}
if isinstance(idx, pd.MultiIndex):
dims = tuple(
name if name is not None else f"level_{n}" # type: ignore[redundant-expr,unused-ignore]
for n, name in enumerate(idx.names)
)
for dim, lev in zip(dims, idx.levels, strict=True):
xr_idx = PandasIndex(lev, dim)
indexes[dim] = xr_idx
index_vars.update(xr_idx.create_variables())
arrays += [(k, np.asarray(v)) for k, v in extension_arrays]
extension_arrays = []
else:
index_name = idx.name if idx.name is not None else "index"
dims = (index_name,)
xr_idx = PandasIndex(idx, index_name)
indexes[index_name] = xr_idx
index_vars.update(xr_idx.create_variables())
obj = cls._construct_direct(index_vars, set(index_vars), indexes=indexes)
if sparse:
obj._set_sparse_data_from_dataframe(idx, arrays, dims)
else:
obj._set_numpy_data_from_dataframe(idx, arrays, dims)
for name, extension_array in extension_arrays:
obj[name] = (dims, extension_array)
return obj[dataframe.columns] if len(dataframe.columns) else obj
def to_dask_dataframe(
self, dim_order: Sequence[Hashable] | None = None, set_index: bool = False
) -> DaskDataFrame:
"""
Convert this dataset into a dask.dataframe.DataFrame.
The dimensions, coordinates and data variables in this dataset form
the columns of the DataFrame.
Parameters
----------
dim_order : list, optional
Hierarchical dimension order for the resulting dataframe. All
arrays are transposed to this order and then written out as flat
vectors in contiguous order, so the last dimension in this list
will be contiguous in the resulting DataFrame. This has a major
influence on which operations are efficient on the resulting dask
dataframe.
If provided, must include all dimensions of this dataset. By
default, dimensions are sorted alphabetically.
set_index : bool, default: False
If set_index=True, the dask DataFrame is indexed by this dataset's
coordinate. Since dask DataFrames do not support multi-indexes,
set_index only works if the dataset only contains one dimension.
Returns
-------
dask.dataframe.DataFrame
"""
import dask.array as da
import dask.dataframe as dd
ordered_dims = self._normalize_dim_order(dim_order=dim_order)
columns = list(ordered_dims)
columns.extend(k for k in self.coords if k not in self.dims)
columns.extend(self.data_vars)
ds_chunks = self.chunks
series_list = []
df_meta = pd.DataFrame()
for name in columns:
try:
var = self.variables[name]
except KeyError:
# dimension without a matching coordinate
size = self.sizes[name]
data = da.arange(size, chunks=size, dtype=np.int64)
var = Variable((name,), data)
# IndexVariable objects have a dummy .chunk() method
if isinstance(var, IndexVariable):
var = var.to_base_variable()
# Make sure var is a dask array, otherwise the array can become too large
# when it is broadcasted to several dimensions:
if not is_duck_dask_array(var._data):
var = var.chunk()
# Broadcast then flatten the array:
var_new_dims = var.set_dims(ordered_dims).chunk(ds_chunks)
dask_array = var_new_dims._data.reshape(-1)
series = dd.from_dask_array(dask_array, columns=name, meta=df_meta)
series_list.append(series)
df = dd.concat(series_list, axis=1)
if set_index:
dim_order = [*ordered_dims]
if len(dim_order) == 1:
(dim,) = dim_order
df = df.set_index(dim)
else:
# triggers an error about multi-indexes, even if only one
# dimension is passed
df = df.set_index(dim_order)
return df
def to_dict(
self, data: bool | Literal["list", "array"] = "list", encoding: bool = False
) -> dict[str, Any]:
"""
Convert this dataset to a dictionary following xarray naming
conventions.
Converts all variables and attributes to native Python objects
Useful for converting to json. To avoid datetime incompatibility
use decode_times=False kwarg in xarrray.open_dataset.
Parameters
----------
data : bool or {"list", "array"}, default: "list"
Whether to include the actual data in the dictionary. When set to
False, returns just the schema. If set to "array", returns data as
underlying array type. If set to "list" (or True for backwards
compatibility), returns data in lists of Python data types. Note
that for obtaining the "list" output efficiently, use
`ds.compute().to_dict(data="list")`.
encoding : bool, default: False
Whether to include the Dataset's encoding in the dictionary.
Returns
-------
d : dict
Dict with keys: "coords", "attrs", "dims", "data_vars" and optionally
"encoding".
See Also
--------
Dataset.from_dict
DataArray.to_dict
"""
d: dict = {
"coords": {},
"attrs": decode_numpy_dict_values(self.attrs),
"dims": dict(self.sizes),
"data_vars": {},
}
for k in self.coords:
d["coords"].update(
{k: self[k].variable.to_dict(data=data, encoding=encoding)}
)
for k in self.data_vars:
d["data_vars"].update(
{k: self[k].variable.to_dict(data=data, encoding=encoding)}
)
if encoding:
d["encoding"] = dict(self.encoding)
return d
@classmethod
def from_dict(cls, d: Mapping[Any, Any]) -> Self:
"""Convert a dictionary into an xarray.Dataset.
Parameters
----------
d : dict-like
Mapping with a minimum structure of
``{"var_0": {"dims": [..], "data": [..]}, \
...}``
Returns
-------
obj : Dataset
See also
--------
Dataset.to_dict
DataArray.from_dict
Examples
--------
>>> d = {
... "t": {"dims": ("t"), "data": [0, 1, 2]},
... "a": {"dims": ("t"), "data": ["a", "b", "c"]},
... "b": {"dims": ("t"), "data": [10, 20, 30]},
... }
>>> ds = xr.Dataset.from_dict(d)
>>> ds
<xarray.Dataset> Size: 60B
Dimensions: (t: 3)
Coordinates:
* t (t) int64 24B 0 1 2
Data variables:
a (t) <U1 12B 'a' 'b' 'c'
b (t) int64 24B 10 20 30
>>> d = {
... "coords": {
... "t": {"dims": "t", "data": [0, 1, 2], "attrs": {"units": "s"}}
... },
... "attrs": {"title": "air temperature"},
... "dims": "t",
... "data_vars": {
... "a": {"dims": "t", "data": [10, 20, 30]},
... "b": {"dims": "t", "data": ["a", "b", "c"]},
... },
... }
>>> ds = xr.Dataset.from_dict(d)
>>> ds
<xarray.Dataset> Size: 60B
Dimensions: (t: 3)
Coordinates:
* t (t) int64 24B 0 1 2
Data variables:
a (t) int64 24B 10 20 30
b (t) <U1 12B 'a' 'b' 'c'
Attributes:
title: air temperature
"""
variables: Iterable[tuple[Hashable, Any]]
if not {"coords", "data_vars"}.issubset(set(d)):
variables = d.items()
else:
import itertools
variables = itertools.chain(
d.get("coords", {}).items(), d.get("data_vars", {}).items()
)
try:
variable_dict = {
k: (v["dims"], v["data"], v.get("attrs"), v.get("encoding"))
for k, v in variables
}
except KeyError as e:
raise ValueError(
f"cannot convert dict without the key '{e.args[0]}'"
) from e
obj = cls(variable_dict)
# what if coords aren't dims?
coords = set(d.get("coords", {})) - set(d.get("dims", {}))
obj = obj.set_coords(coords)
obj.attrs.update(d.get("attrs", {}))
obj.encoding.update(d.get("encoding", {}))
return obj
def _unary_op(self, f, *args, **kwargs) -> Self:
variables = {}
keep_attrs = kwargs.pop("keep_attrs", None)
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=True)
for k, v in self._variables.items():
if k in self._coord_names:
variables[k] = v
else:
variables[k] = f(v, *args, **kwargs)
if keep_attrs:
variables[k]._attrs = v._attrs
attrs = self._attrs if keep_attrs else None
return self._replace_with_new_dims(variables, attrs=attrs)
def _binary_op(self, other, f, reflexive=False, join=None) -> Dataset:
from xarray.core.dataarray import DataArray
from xarray.core.datatree import DataTree
from xarray.core.groupby import GroupBy
if isinstance(other, DataTree | GroupBy):
return NotImplemented
align_type = OPTIONS["arithmetic_join"] if join is None else join
if isinstance(other, DataArray | Dataset):
self, other = align(self, other, join=align_type, copy=False)
g = f if not reflexive else lambda x, y: f(y, x)
ds = self._calculate_binary_op(g, other, join=align_type)
keep_attrs = _get_keep_attrs(default=True)
if keep_attrs:
# Combine attributes from both operands, dropping conflicts
from xarray.structure.merge import merge_attrs
self_attrs = self.attrs
other_attrs = getattr(other, "attrs", {})
ds.attrs = merge_attrs([self_attrs, other_attrs], "drop_conflicts")
return ds
def _inplace_binary_op(self, other, f) -> Self:
from xarray.core.dataarray import DataArray
from xarray.core.groupby import GroupBy
if isinstance(other, GroupBy):
raise TypeError(
"in-place operations between a Dataset and "
"a grouped object are not permitted"
)
# we don't actually modify arrays in-place with in-place Dataset
# arithmetic -- this lets us automatically align things
if isinstance(other, DataArray | Dataset):
other = other.reindex_like(self, copy=False)
g = ops.inplace_to_noninplace_op(f)
ds = self._calculate_binary_op(g, other, inplace=True)
self._replace_with_new_dims(
ds._variables,
ds._coord_names,
attrs=ds._attrs,
indexes=ds._indexes,
inplace=True,
)
return self
def _calculate_binary_op(
self, f, other, join="inner", inplace: bool = False
) -> Dataset:
def apply_over_both(lhs_data_vars, rhs_data_vars, lhs_vars, rhs_vars):
if inplace and set(lhs_data_vars) != set(rhs_data_vars):
raise ValueError(
"datasets must have the same data variables "
f"for in-place arithmetic operations: {list(lhs_data_vars)}, {list(rhs_data_vars)}"
)
dest_vars = {}
for k in lhs_data_vars:
if k in rhs_data_vars:
dest_vars[k] = f(lhs_vars[k], rhs_vars[k])
elif join in ["left", "outer"]:
dest_vars[k] = f(lhs_vars[k], np.nan)
for k in rhs_data_vars:
if k not in dest_vars and join in ["right", "outer"]:
dest_vars[k] = f(rhs_vars[k], np.nan)
return dest_vars
if utils.is_dict_like(other) and not isinstance(other, Dataset):
# can't use our shortcut of doing the binary operation with
# Variable objects, so apply over our data vars instead.
new_data_vars = apply_over_both(
self.data_vars, other, self.data_vars, other
)
return type(self)(new_data_vars)
other_coords: Coordinates | None = getattr(other, "coords", None)
ds = self.coords.merge(other_coords)
if isinstance(other, Dataset):
new_vars = apply_over_both(
self.data_vars, other.data_vars, self.variables, other.variables
)
else:
other_variable = getattr(other, "variable", other)
new_vars = {k: f(self.variables[k], other_variable) for k in self.data_vars}
ds._variables.update(new_vars)
ds._dims = calculate_dimensions(ds._variables)
return ds
def _copy_attrs_from(self, other):
self.attrs = other.attrs
for v in other.variables:
if v in self.variables:
self.variables[v].attrs = other.variables[v].attrs
def diff(
self,
dim: Hashable,
n: int = 1,
*,
label: Literal["upper", "lower"] = "upper",
) -> Self:
"""Calculate the n-th order discrete difference along given axis.
Parameters
----------
dim : Hashable
Dimension over which to calculate the finite difference.
n : int, default: 1
The number of times values are differenced.
label : {"upper", "lower"}, default: "upper"
The new coordinate in dimension ``dim`` will have the
values of either the minuend's or subtrahend's coordinate
for values 'upper' and 'lower', respectively.
Returns
-------
difference : Dataset
The n-th order finite difference of this object.
Notes
-----
`n` matches numpy's behavior and is different from pandas' first argument named
`periods`.
Examples
--------
>>> ds = xr.Dataset({"foo": ("x", [5, 5, 6, 6])})
>>> ds.diff("x")
<xarray.Dataset> Size: 24B
Dimensions: (x: 3)
Dimensions without coordinates: x
Data variables:
foo (x) int64 24B 0 1 0
>>> ds.diff("x", 2)
<xarray.Dataset> Size: 16B
Dimensions: (x: 2)
Dimensions without coordinates: x
Data variables:
foo (x) int64 16B 1 -1
See Also
--------
Dataset.differentiate
"""
if n == 0:
return self
if n < 0:
raise ValueError(f"order `n` must be non-negative but got {n}")
# prepare slices
slice_start = {dim: slice(None, -1)}
slice_end = {dim: slice(1, None)}
# prepare new coordinate
if label == "upper":
slice_new = slice_end
elif label == "lower":
slice_new = slice_start
else:
raise ValueError("The 'label' argument has to be either 'upper' or 'lower'")
indexes, index_vars = isel_indexes(self.xindexes, slice_new)
variables = {}
for name, var in self.variables.items():
if name in index_vars:
variables[name] = index_vars[name]
elif dim in var.dims:
if name in self.data_vars:
variables[name] = var.isel(slice_end) - var.isel(slice_start)
else:
variables[name] = var.isel(slice_new)
else:
variables[name] = var
difference = self._replace_with_new_dims(variables, indexes=indexes)
if n > 1:
return difference.diff(dim, n - 1)
else:
return difference
def shift(
self,
shifts: Mapping[Any, int] | None = None,
fill_value: Any = xrdtypes.NA,
**shifts_kwargs: int,
) -> Self:
"""Shift this dataset by an offset along one or more dimensions.
Only data variables are moved; coordinates stay in place. This is
consistent with the behavior of ``shift`` in pandas.
Values shifted from beyond array bounds will appear at one end of
each dimension, which are filled according to `fill_value`. For periodic
offsets instead see `roll`.
Parameters
----------
shifts : mapping of hashable to int
Integer offset to shift along each of the given dimensions.
Positive offsets shift to the right; negative offsets shift to the
left.
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like, maps
variable names (including coordinates) to fill values.
**shifts_kwargs
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
shifted : Dataset
Dataset with the same coordinates and attributes but shifted data
variables.
See Also
--------
roll
Examples
--------
>>> ds = xr.Dataset({"foo": ("x", list("abcde"))})
>>> ds.shift(x=2)
<xarray.Dataset> Size: 40B
Dimensions: (x: 5)
Dimensions without coordinates: x
Data variables:
foo (x) object 40B nan nan 'a' 'b' 'c'
"""
shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "shift")
invalid = tuple(k for k in shifts if k not in self.dims)
if invalid:
raise ValueError(
f"Dimensions {invalid} not found in data dimensions {tuple(self.dims)}"
)
variables = {}
for name, var in self.variables.items():
if name in self.data_vars:
fill_value_ = (
fill_value.get(name, xrdtypes.NA)
if isinstance(fill_value, dict)
else fill_value
)
var_shifts = {k: v for k, v in shifts.items() if k in var.dims}
variables[name] = var.shift(fill_value=fill_value_, shifts=var_shifts)
else:
variables[name] = var
return self._replace(variables)
def roll(
self,
shifts: Mapping[Any, int] | None = None,
roll_coords: bool = False,
**shifts_kwargs: int,
) -> Self:
"""Roll this dataset by an offset along one or more dimensions.
Unlike shift, roll treats the given dimensions as periodic, so will not
create any missing values to be filled.
Also unlike shift, roll may rotate all variables, including coordinates
if specified. The direction of rotation is consistent with
:py:func:`numpy.roll`.
Parameters
----------
shifts : mapping of hashable to int, optional
A dict with keys matching dimensions and values given
by integers to rotate each of the given dimensions. Positive
offsets roll to the right; negative offsets roll to the left.
roll_coords : bool, default: False
Indicates whether to roll the coordinates by the offset too.
**shifts_kwargs : {dim: offset, ...}, optional
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
rolled : Dataset
Dataset with the same attributes but rolled data and coordinates.
See Also
--------
shift
Examples
--------
>>> ds = xr.Dataset({"foo": ("x", list("abcde"))}, coords={"x": np.arange(5)})
>>> ds.roll(x=2)
<xarray.Dataset> Size: 60B
Dimensions: (x: 5)
Coordinates:
* x (x) int64 40B 0 1 2 3 4
Data variables:
foo (x) <U1 20B 'd' 'e' 'a' 'b' 'c'
>>> ds.roll(x=2, roll_coords=True)
<xarray.Dataset> Size: 60B
Dimensions: (x: 5)
Coordinates:
* x (x) int64 40B 3 4 0 1 2
Data variables:
foo (x) <U1 20B 'd' 'e' 'a' 'b' 'c'
"""
shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "roll")
invalid = [k for k in shifts if k not in self.dims]
if invalid:
raise ValueError(
f"Dimensions {invalid} not found in data dimensions {tuple(self.dims)}"
)
unrolled_vars: tuple[Hashable, ...]
if roll_coords:
indexes, index_vars = roll_indexes(self.xindexes, shifts)
unrolled_vars = ()
else:
indexes = dict(self._indexes)
index_vars = dict(self.xindexes.variables)
unrolled_vars = tuple(self.coords)
variables = {}
for k, var in self.variables.items():
if k in index_vars:
variables[k] = index_vars[k]
elif k not in unrolled_vars:
variables[k] = var.roll(
shifts={k: s for k, s in shifts.items() if k in var.dims}
)
else:
variables[k] = var
return self._replace(variables, indexes=indexes)
def sortby(
self,
variables: (
Hashable
| DataArray
| Sequence[Hashable | DataArray]
| Callable[[Self], Hashable | DataArray | list[Hashable | DataArray]]
),
ascending: bool = True,
) -> Self:
"""
Sort object by labels or values (along an axis).
Sorts the dataset, either along specified dimensions,
or according to values of 1-D dataarrays that share dimension
with calling object.
If the input variables are dataarrays, then the dataarrays are aligned
(via left-join) to the calling object prior to sorting by cell values.
NaNs are sorted to the end, following Numpy convention.
If multiple sorts along the same dimension is
given, numpy's lexsort is performed along that dimension:
https://numpy.org/doc/stable/reference/generated/numpy.lexsort.html
and the FIRST key in the sequence is used as the primary sort key,
followed by the 2nd key, etc.
Parameters
----------
variables : Hashable, DataArray, sequence of Hashable or DataArray, or Callable
1D DataArray objects or name(s) of 1D variable(s) in coords whose values are
used to sort this array. If a callable, the callable is passed this object,
and the result is used as the value for cond.
ascending : bool, default: True
Whether to sort by ascending or descending order.
Returns
-------
sorted : Dataset
A new dataset where all the specified dims are sorted by dim
labels.
See Also
--------
DataArray.sortby
numpy.sort
pandas.sort_values
pandas.sort_index
Examples
--------
>>> ds = xr.Dataset(
... {
... "A": (("x", "y"), [[1, 2], [3, 4]]),
... "B": (("x", "y"), [[5, 6], [7, 8]]),
... },
... coords={"x": ["b", "a"], "y": [1, 0]},
... )
>>> ds.sortby("x")
<xarray.Dataset> Size: 88B
Dimensions: (x: 2, y: 2)
Coordinates:
* x (x) <U1 8B 'a' 'b'
* y (y) int64 16B 1 0
Data variables:
A (x, y) int64 32B 3 4 1 2
B (x, y) int64 32B 7 8 5 6
>>> ds.sortby(lambda x: -x["y"])
<xarray.Dataset> Size: 88B
Dimensions: (x: 2, y: 2)
Coordinates:
* x (x) <U1 8B 'b' 'a'
* y (y) int64 16B 1 0
Data variables:
A (x, y) int64 32B 1 2 3 4
B (x, y) int64 32B 5 6 7 8
"""
from xarray.core.dataarray import DataArray
if callable(variables):
variables = variables(self)
if not isinstance(variables, list):
variables = [variables]
arrays = [v if isinstance(v, DataArray) else self[v] for v in variables]
aligned_vars = align(self, *arrays, join="left")
aligned_self = cast("Self", aligned_vars[0])
aligned_other_vars = cast(tuple[DataArray, ...], aligned_vars[1:])
vars_by_dim = defaultdict(list)
for data_array in aligned_other_vars:
if data_array.ndim != 1:
raise ValueError("Input DataArray is not 1-D.")
(key,) = data_array.dims
vars_by_dim[key].append(data_array)
indices = {}
for key, arrays in vars_by_dim.items():
order = np.lexsort(tuple(reversed(arrays)))
indices[key] = order if ascending else order[::-1]
return aligned_self.isel(indices)
def quantile(
self,
q: ArrayLike,
dim: Dims = None,
*,
method: QuantileMethods = "linear",
numeric_only: bool = False,
keep_attrs: bool | None = None,
skipna: bool | None = None,
interpolation: QuantileMethods | None = None,
) -> Self:
"""Compute the qth quantile of the data along the specified dimension.
Returns the qth quantiles(s) of the array elements for each variable
in the Dataset.
Parameters
----------
q : float or array-like of float
Quantile to compute, which must be between 0 and 1 inclusive.
dim : str or Iterable of Hashable, optional
Dimension(s) over which to apply quantile.
method : str, default: "linear"
This optional parameter specifies the interpolation method to use when the
desired quantile lies between two data points. The options sorted by their R
type as summarized in the H&F paper [1]_ are:
1. "inverted_cdf"
2. "averaged_inverted_cdf"
3. "closest_observation"
4. "interpolated_inverted_cdf"
5. "hazen"
6. "weibull"
7. "linear" (default)
8. "median_unbiased"
9. "normal_unbiased"
The first three methods are discontiuous. The following discontinuous
variations of the default "linear" (7.) option are also available:
* "lower"
* "higher"
* "midpoint"
* "nearest"
See :py:func:`numpy.quantile` or [1]_ for details. The "method" argument
was previously called "interpolation", renamed in accordance with numpy
version 1.22.0.
keep_attrs : bool, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
numeric_only : bool, optional
If True, only apply ``func`` to variables with a numeric dtype.
skipna : bool, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
Returns
-------
quantiles : Dataset
If `q` is a single quantile, then the result is a scalar for each
variable in data_vars. If multiple percentiles are given, first
axis of the result corresponds to the quantile and a quantile
dimension is added to the return Dataset. The other dimensions are
the dimensions that remain after the reduction of the array.
See Also
--------
numpy.nanquantile, numpy.quantile, pandas.Series.quantile, DataArray.quantile
Examples
--------
>>> ds = xr.Dataset(
... {"a": (("x", "y"), [[0.7, 4.2, 9.4, 1.5], [6.5, 7.3, 2.6, 1.9]])},
... coords={"x": [7, 9], "y": [1, 1.5, 2, 2.5]},
... )
>>> ds.quantile(0) # or ds.quantile(0, dim=...)
<xarray.Dataset> Size: 16B
Dimensions: ()
Coordinates:
quantile float64 8B 0.0
Data variables:
a float64 8B 0.7
>>> ds.quantile(0, dim="x")
<xarray.Dataset> Size: 72B
Dimensions: (y: 4)
Coordinates:
* y (y) float64 32B 1.0 1.5 2.0 2.5
quantile float64 8B 0.0
Data variables:
a (y) float64 32B 0.7 4.2 2.6 1.5
>>> ds.quantile([0, 0.5, 1])
<xarray.Dataset> Size: 48B
Dimensions: (quantile: 3)
Coordinates:
* quantile (quantile) float64 24B 0.0 0.5 1.0
Data variables:
a (quantile) float64 24B 0.7 3.4 9.4
>>> ds.quantile([0, 0.5, 1], dim="x")
<xarray.Dataset> Size: 152B
Dimensions: (quantile: 3, y: 4)
Coordinates:
* quantile (quantile) float64 24B 0.0 0.5 1.0
* y (y) float64 32B 1.0 1.5 2.0 2.5
Data variables:
a (quantile, y) float64 96B 0.7 4.2 2.6 1.5 3.6 ... 6.5 7.3 9.4 1.9
References
----------
.. [1] R. J. Hyndman and Y. Fan,
"Sample quantiles in statistical packages,"
The American Statistician, 50(4), pp. 361-365, 1996
"""
# interpolation renamed to method in version 0.21.0
# check here and in variable to avoid repeated warnings
if interpolation is not None:
warnings.warn(
"The `interpolation` argument to quantile was renamed to `method`.",
FutureWarning,
stacklevel=2,
)
if method != "linear":
raise TypeError("Cannot pass interpolation and method keywords!")
method = interpolation
dims: set[Hashable]
if isinstance(dim, str):
dims = {dim}
elif dim is None or dim is ...:
dims = set(self.dims)
else:
dims = set(dim)
invalid_dims = set(dims) - set(self.dims)
if invalid_dims:
raise ValueError(
f"Dimensions {tuple(invalid_dims)} not found in data dimensions {tuple(self.dims)}"
)
q = np.asarray(q, dtype=np.float64)
variables = {}
for name, var in self.variables.items():
reduce_dims = [d for d in var.dims if d in dims]
if reduce_dims or not var.dims:
if name not in self.coords and (
not numeric_only
or np.issubdtype(var.dtype, np.number)
or var.dtype == np.bool_
):
variables[name] = var.quantile(
q,
dim=reduce_dims,
method=method,
keep_attrs=keep_attrs,
skipna=skipna,
)
else:
variables[name] = var
# construct the new dataset
coord_names = {k for k in self.coords if k in variables}
indexes = {k: v for k, v in self._indexes.items() if k in variables}
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=True)
attrs = self.attrs if keep_attrs else None
new = self._replace_with_new_dims(
variables, coord_names=coord_names, attrs=attrs, indexes=indexes
)
return new.assign_coords(quantile=q)
def rank(
self,
dim: Hashable,
*,
pct: bool = False,
keep_attrs: bool | None = None,
) -> Self:
"""Ranks the data.
Equal values are assigned a rank that is the average of the ranks that
would have been otherwise assigned to all of the values within
that set.
Ranks begin at 1, not 0. If pct is True, computes percentage ranks.
NaNs in the input array are returned as NaNs.
The `bottleneck` library is required.
Parameters
----------
dim : Hashable
Dimension over which to compute rank.
pct : bool, default: False
If True, compute percentage ranks, otherwise compute integer ranks.
keep_attrs : bool or None, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False, the new
object will be returned without attributes.
Returns
-------
ranked : Dataset
Variables that do not depend on `dim` are dropped.
"""
if not OPTIONS["use_bottleneck"]:
raise RuntimeError(
"rank requires bottleneck to be enabled."
" Call `xr.set_options(use_bottleneck=True)` to enable it."
)
if dim not in self.dims:
raise ValueError(
f"Dimension {dim!r} not found in data dimensions {tuple(self.dims)}"
)
variables = {}
for name, var in self.variables.items():
if name in self.data_vars:
if dim in var.dims:
variables[name] = var.rank(dim, pct=pct)
else:
variables[name] = var
coord_names = set(self.coords)
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=True)
attrs = self.attrs if keep_attrs else None
return self._replace(variables, coord_names, attrs=attrs)
def differentiate(
self,
coord: Hashable,
edge_order: Literal[1, 2] = 1,
datetime_unit: DatetimeUnitOptions | None = None,
) -> Self:
"""Differentiate with the second order accurate central
differences.
.. note::
This feature is limited to simple cartesian geometry, i.e. coord
must be one dimensional.
Parameters
----------
coord : Hashable
The coordinate to be used to compute the gradient.
edge_order : {1, 2}, default: 1
N-th order accurate differences at the boundaries.
datetime_unit : None or {"W", "D", "h", "m", "s", "ms", \
"us", "ns", "ps", "fs", "as", None}, default: None
Unit to compute gradient. Only valid for datetime coordinate.
Returns
-------
differentiated: Dataset
See also
--------
numpy.gradient: corresponding numpy function
"""
if coord not in self.variables and coord not in self.dims:
variables_and_dims = tuple(set(self.variables.keys()).union(self.dims))
raise ValueError(
f"Coordinate {coord!r} not found in variables or dimensions {variables_and_dims}."
)
coord_var = self[coord].variable
if coord_var.ndim != 1:
raise ValueError(
f"Coordinate {coord} must be 1 dimensional but is {coord_var.ndim}"
" dimensional"
)
dim = coord_var.dims[0]
if _contains_datetime_like_objects(coord_var):
if coord_var.dtype.kind in "mM" and datetime_unit is None:
datetime_unit = cast(
"DatetimeUnitOptions", np.datetime_data(coord_var.dtype)[0]
)
elif datetime_unit is None:
datetime_unit = "s" # Default to seconds for cftime objects
coord_var = coord_var._to_numeric(datetime_unit=datetime_unit)
variables = {}
for k, v in self.variables.items():
if k in self.data_vars and dim in v.dims and k not in self.coords:
if _contains_datetime_like_objects(v):
v = v._to_numeric(datetime_unit=datetime_unit)
grad = duck_array_ops.gradient(
v.data,
coord_var.data,
edge_order=edge_order,
axis=v.get_axis_num(dim),
)
variables[k] = Variable(v.dims, grad)
else:
variables[k] = v
return self._replace(variables)
def integrate(
self,
coord: Hashable | Sequence[Hashable],
datetime_unit: DatetimeUnitOptions = None,
) -> Self:
"""Integrate along the given coordinate using the trapezoidal rule.
.. note::
This feature is limited to simple cartesian geometry, i.e. coord
must be one dimensional.
Parameters
----------
coord : hashable, or sequence of hashable
Coordinate(s) used for the integration.
datetime_unit : {'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \
'ps', 'fs', 'as', None}, optional
Specify the unit if datetime coordinate is used.
Returns
-------
integrated : Dataset
See also
--------
DataArray.integrate
numpy.trapz : corresponding numpy function
Examples
--------
>>> ds = xr.Dataset(
... data_vars={"a": ("x", [5, 5, 6, 6]), "b": ("x", [1, 2, 1, 0])},
... coords={"x": [0, 1, 2, 3], "y": ("x", [1, 7, 3, 5])},
... )
>>> ds
<xarray.Dataset> Size: 128B
Dimensions: (x: 4)
Coordinates:
* x (x) int64 32B 0 1 2 3
y (x) int64 32B 1 7 3 5
Data variables:
a (x) int64 32B 5 5 6 6
b (x) int64 32B 1 2 1 0
>>> ds.integrate("x")
<xarray.Dataset> Size: 16B
Dimensions: ()
Data variables:
a float64 8B 16.5
b float64 8B 3.5
>>> ds.integrate("y")
<xarray.Dataset> Size: 16B
Dimensions: ()
Data variables:
a float64 8B 20.0
b float64 8B 4.0
"""
if not isinstance(coord, list | tuple):
coord = (coord,)
result = self
for c in coord:
result = result._integrate_one(c, datetime_unit=datetime_unit)
return result
def _integrate_one(self, coord, datetime_unit=None, cumulative=False):
if coord not in self.variables and coord not in self.dims:
variables_and_dims = tuple(set(self.variables.keys()).union(self.dims))
raise ValueError(
f"Coordinate {coord!r} not found in variables or dimensions {variables_and_dims}."
)
coord_var = self[coord].variable
if coord_var.ndim != 1:
raise ValueError(
f"Coordinate {coord} must be 1 dimensional but is {coord_var.ndim}"
" dimensional"
)
dim = coord_var.dims[0]
if _contains_datetime_like_objects(coord_var):
if coord_var.dtype.kind in "mM" and datetime_unit is None:
datetime_unit, _ = np.datetime_data(coord_var.dtype)
elif datetime_unit is None:
datetime_unit = "s" # Default to seconds for cftime objects
coord_var = coord_var._replace(
data=datetime_to_numeric(coord_var.data, datetime_unit=datetime_unit)
)
variables = {}
coord_names = set()
for k, v in self.variables.items():
if k in self.coords:
if dim not in v.dims or cumulative:
variables[k] = v
coord_names.add(k)
elif k in self.data_vars and dim in v.dims:
coord_data = to_like_array(coord_var.data, like=v.data)
if _contains_datetime_like_objects(v):
v = datetime_to_numeric(v, datetime_unit=datetime_unit)
if cumulative:
integ = duck_array_ops.cumulative_trapezoid(
v.data, coord_data, axis=v.get_axis_num(dim)
)
v_dims = v.dims
else:
integ = duck_array_ops.trapz(
v.data, coord_data, axis=v.get_axis_num(dim)
)
v_dims = list(v.dims)
v_dims.remove(dim)
variables[k] = Variable(v_dims, integ)
else:
variables[k] = v
indexes = {k: v for k, v in self._indexes.items() if k in variables}
return self._replace_with_new_dims(
variables, coord_names=coord_names, indexes=indexes
)
def cumulative_integrate(
self,
coord: Hashable | Sequence[Hashable],
datetime_unit: DatetimeUnitOptions = None,
) -> Self:
"""Integrate along the given coordinate using the trapezoidal rule.
.. note::
This feature is limited to simple cartesian geometry, i.e. coord
must be one dimensional.
The first entry of the cumulative integral of each variable is always 0, in
order to keep the length of the dimension unchanged between input and
output.
Parameters
----------
coord : hashable, or sequence of hashable
Coordinate(s) used for the integration.
datetime_unit : {'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \
'ps', 'fs', 'as', None}, optional
Specify the unit if datetime coordinate is used.
Returns
-------
integrated : Dataset
See also
--------
DataArray.cumulative_integrate
scipy.integrate.cumulative_trapezoid : corresponding scipy function
Examples
--------
>>> ds = xr.Dataset(
... data_vars={"a": ("x", [5, 5, 6, 6]), "b": ("x", [1, 2, 1, 0])},
... coords={"x": [0, 1, 2, 3], "y": ("x", [1, 7, 3, 5])},
... )
>>> ds
<xarray.Dataset> Size: 128B
Dimensions: (x: 4)
Coordinates:
* x (x) int64 32B 0 1 2 3
y (x) int64 32B 1 7 3 5
Data variables:
a (x) int64 32B 5 5 6 6
b (x) int64 32B 1 2 1 0
>>> ds.cumulative_integrate("x")
<xarray.Dataset> Size: 128B
Dimensions: (x: 4)
Coordinates:
* x (x) int64 32B 0 1 2 3
y (x) int64 32B 1 7 3 5
Data variables:
a (x) float64 32B 0.0 5.0 10.5 16.5
b (x) float64 32B 0.0 1.5 3.0 3.5
>>> ds.cumulative_integrate("y")
<xarray.Dataset> Size: 128B
Dimensions: (x: 4)
Coordinates:
* x (x) int64 32B 0 1 2 3
y (x) int64 32B 1 7 3 5
Data variables:
a (x) float64 32B 0.0 30.0 8.0 20.0
b (x) float64 32B 0.0 9.0 3.0 4.0
"""
if not isinstance(coord, list | tuple):
coord = (coord,)
result = self
for c in coord:
result = result._integrate_one(
c, datetime_unit=datetime_unit, cumulative=True
)
return result
@property
def real(self) -> Self:
"""
The real part of each data variable.
See Also
--------
numpy.ndarray.real
"""
return self.map(lambda x: x.real, keep_attrs=True)
@property
def imag(self) -> Self:
"""
The imaginary part of each data variable.
See Also
--------
numpy.ndarray.imag
"""
return self.map(lambda x: x.imag, keep_attrs=True)
plot = utils.UncachedAccessor(DatasetPlotAccessor)
def filter_by_attrs(self, **kwargs) -> Self:
"""Returns a ``Dataset`` with variables that match specific conditions.
Can pass in ``key=value`` or ``key=callable``. A Dataset is returned
containing only the variables for which all the filter tests pass.
These tests are either ``key=value`` for which the attribute ``key``
has the exact value ``value`` or the callable passed into
``key=callable`` returns True. The callable will be passed a single
value, either the value of the attribute ``key`` or ``None`` if the
DataArray does not have an attribute with the name ``key``.
Parameters
----------
**kwargs
key : str
Attribute name.
value : callable or obj
If value is a callable, it should return a boolean in the form
of bool = func(attr) where attr is da.attrs[key].
Otherwise, value will be compared to the each
DataArray's attrs[key].
Returns
-------
new : Dataset
New dataset with variables filtered by attribute.
Examples
--------
>>> temp = 15 + 8 * np.random.randn(2, 2, 3)
>>> precip = 10 * np.random.rand(2, 2, 3)
>>> lon = [[-99.83, -99.32], [-99.79, -99.23]]
>>> lat = [[42.25, 42.21], [42.63, 42.59]]
>>> dims = ["x", "y", "time"]
>>> temp_attr = dict(standard_name="air_potential_temperature")
>>> precip_attr = dict(standard_name="convective_precipitation_flux")
>>> ds = xr.Dataset(
... dict(
... temperature=(dims, temp, temp_attr),
... precipitation=(dims, precip, precip_attr),
... ),
... coords=dict(
... lon=(["x", "y"], lon),
... lat=(["x", "y"], lat),
... time=pd.date_range("2014-09-06", periods=3),
... reference_time=pd.Timestamp("2014-09-05"),
... ),
... )
Get variables matching a specific standard_name:
>>> ds.filter_by_attrs(standard_name="convective_precipitation_flux")
<xarray.Dataset> Size: 192B
Dimensions: (x: 2, y: 2, time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2014-09-06 2014-09-07 2014-09-08
lon (x, y) float64 32B -99.83 -99.32 -99.79 -99.23
lat (x, y) float64 32B 42.25 42.21 42.63 42.59
reference_time datetime64[ns] 8B 2014-09-05
Dimensions without coordinates: x, y
Data variables:
precipitation (x, y, time) float64 96B 5.68 9.256 0.7104 ... 4.615 7.805
Get all variables that have a standard_name attribute:
>>> standard_name = lambda v: v is not None
>>> ds.filter_by_attrs(standard_name=standard_name)
<xarray.Dataset> Size: 288B
Dimensions: (x: 2, y: 2, time: 3)
Coordinates:
* time (time) datetime64[ns] 24B 2014-09-06 2014-09-07 2014-09-08
lon (x, y) float64 32B -99.83 -99.32 -99.79 -99.23
lat (x, y) float64 32B 42.25 42.21 42.63 42.59
reference_time datetime64[ns] 8B 2014-09-05
Dimensions without coordinates: x, y
Data variables:
temperature (x, y, time) float64 96B 29.11 18.2 22.83 ... 16.15 26.63
precipitation (x, y, time) float64 96B 5.68 9.256 0.7104 ... 4.615 7.805
"""
selection = []
for var_name, variable in self.variables.items():
has_value_flag = False
for attr_name, pattern in kwargs.items():
attr_value = variable.attrs.get(attr_name)
if (callable(pattern) and pattern(attr_value)) or attr_value == pattern:
has_value_flag = True
else:
has_value_flag = False
break
if has_value_flag is True:
selection.append(var_name)
return self[selection]
def unify_chunks(self) -> Self:
"""Unify chunk size along all chunked dimensions of this Dataset.
Returns
-------
Dataset with consistent chunk sizes for all dask-array variables
See Also
--------
dask.array.core.unify_chunks
"""
return unify_chunks(self)[0]
def map_blocks(
self,
func: Callable[..., T_Xarray],
args: Sequence[Any] = (),
kwargs: Mapping[str, Any] | None = None,
template: DataArray | Dataset | None = None,
) -> T_Xarray:
"""
Apply a function to each block of this Dataset.
.. warning::
This method is experimental and its signature may change.
Parameters
----------
func : callable
User-provided function that accepts a Dataset as its first
parameter. The function will receive a subset or 'block' of this Dataset (see below),
corresponding to one chunk along each chunked dimension. ``func`` will be
executed as ``func(subset_dataset, *subset_args, **kwargs)``.
This function must return either a single DataArray or a single Dataset.
This function cannot add a new chunked dimension.
args : sequence
Passed to func after unpacking and subsetting any xarray objects by blocks.
xarray objects in args must be aligned with obj, otherwise an error is raised.
kwargs : Mapping or None
Passed verbatim to func after unpacking. xarray objects, if any, will not be
subset to blocks. Passing dask collections in kwargs is not allowed.
template : DataArray, Dataset or None, optional
xarray object representing the final result after compute is called. If not provided,
the function will be first run on mocked-up data, that looks like this object but
has sizes 0, to determine properties of the returned object such as dtype,
variable names, attributes, new dimensions and new indexes (if any).
``template`` must be provided if the function changes the size of existing dimensions.
When provided, ``attrs`` on variables in `template` are copied over to the result. Any
``attrs`` set by ``func`` will be ignored.
Returns
-------
A single DataArray or Dataset with dask backend, reassembled from the outputs of the
function.
Notes
-----
This function is designed for when ``func`` needs to manipulate a whole xarray object
subset to each block. Each block is loaded into memory. In the more common case where
``func`` can work on numpy arrays, it is recommended to use ``apply_ufunc``.
If none of the variables in this object is backed by dask arrays, calling this function is
equivalent to calling ``func(obj, *args, **kwargs)``.
See Also
--------
:func:`dask.array.map_blocks <dask.array.map_blocks>`
:func:`xarray.apply_ufunc <apply_ufunc>`
:func:`xarray.DataArray.map_blocks <xarray.DataArray.map_blocks>`
:doc:`xarray-tutorial:advanced/map_blocks/map_blocks`
Advanced Tutorial on map_blocks with dask
Examples
--------
Calculate an anomaly from climatology using ``.groupby()``. Using
``xr.map_blocks()`` allows for parallel operations with knowledge of ``xarray``,
its indices, and its methods like ``.groupby()``.
>>> def calculate_anomaly(da, groupby_type="time.month"):
... gb = da.groupby(groupby_type)
... clim = gb.mean(dim="time")
... return gb - clim
...
>>> time = xr.date_range("1990-01", "1992-01", freq="ME", use_cftime=True)
>>> month = xr.DataArray(time.month, coords={"time": time}, dims=["time"])
>>> np.random.seed(123)
>>> array = xr.DataArray(
... np.random.rand(len(time)),
... dims=["time"],
... coords={"time": time, "month": month},
... ).chunk()
>>> ds = xr.Dataset({"a": array})
>>> ds.map_blocks(calculate_anomaly, template=ds).compute()
<xarray.Dataset> Size: 576B
Dimensions: (time: 24)
Coordinates:
* time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00
month (time) int64 192B 1 2 3 4 5 6 7 8 9 10 ... 3 4 5 6 7 8 9 10 11 12
Data variables:
a (time) float64 192B 0.1289 0.1132 -0.0856 ... 0.1906 -0.05901
Note that one must explicitly use ``args=[]`` and ``kwargs={}`` to pass arguments
to the function being applied in ``xr.map_blocks()``:
>>> ds.map_blocks(
... calculate_anomaly,
... kwargs={"groupby_type": "time.year"},
... template=ds,
... )
<xarray.Dataset> Size: 576B
Dimensions: (time: 24)
Coordinates:
* time (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00
month (time) int64 192B dask.array<chunksize=(24,), meta=np.ndarray>
Data variables:
a (time) float64 192B dask.array<chunksize=(24,), meta=np.ndarray>
"""
from xarray.core.parallel import map_blocks
return map_blocks(func, self, args, kwargs, template)
def polyfit(
self,
dim: Hashable,
deg: int,
skipna: bool | None = None,
rcond: float | None = None,
w: Hashable | Any = None,
full: bool = False,
cov: bool | Literal["unscaled"] = False,
) -> Self:
"""
Least squares polynomial fit.
This replicates the behaviour of `numpy.polyfit` but differs by skipping
invalid values when `skipna = True`.
Parameters
----------
dim : hashable
Coordinate along which to fit the polynomials.
deg : int
Degree of the fitting polynomial.
skipna : bool or None, optional
If True, removes all invalid values before fitting each 1D slices of the array.
Default is True if data is stored in a dask.array or if there is any
invalid values, False otherwise.
rcond : float or None, optional
Relative condition number to the fit.
w : hashable or Any, optional
Weights to apply to the y-coordinate of the sample points.
Can be an array-like object or the name of a coordinate in the dataset.
full : bool, default: False
Whether to return the residuals, matrix rank and singular values in addition
to the coefficients.
cov : bool or "unscaled", default: False
Whether to return to the covariance matrix in addition to the coefficients.
The matrix is not scaled if `cov='unscaled'`.
Returns
-------
polyfit_results : Dataset
A single dataset which contains (for each "var" in the input dataset):
[var]_polyfit_coefficients
The coefficients of the best fit for each variable in this dataset.
[var]_polyfit_residuals
The residuals of the least-square computation for each variable (only included if `full=True`)
When the matrix rank is deficient, np.nan is returned.
[dim]_matrix_rank
The effective rank of the scaled Vandermonde coefficient matrix (only included if `full=True`)
The rank is computed ignoring the NaN values that might be skipped.
[dim]_singular_values
The singular values of the scaled Vandermonde coefficient matrix (only included if `full=True`)
[var]_polyfit_covariance
The covariance matrix of the polynomial coefficient estimates (only included if `full=False` and `cov=True`)
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is deficient.
The warning is not raised with in-memory (not dask) data and `full=True`.
See Also
--------
numpy.polyfit
numpy.polyval
xarray.polyval
"""
from xarray.computation.fit import polyfit as polyfit_impl
return polyfit_impl(self, dim, deg, skipna, rcond, w, full, cov)
def pad(
self,
pad_width: Mapping[Any, int | tuple[int, int]] | None = None,
mode: PadModeOptions = "constant",
stat_length: (
int | tuple[int, int] | Mapping[Any, tuple[int, int]] | None
) = None,
constant_values: T_DatasetPadConstantValues | None = None,
end_values: int | tuple[int, int] | Mapping[Any, tuple[int, int]] | None = None,
reflect_type: PadReflectOptions = None,
keep_attrs: bool | None = None,
**pad_width_kwargs: Any,
) -> Self:
"""Pad this dataset along one or more dimensions.
.. warning::
This function is experimental and its behaviour is likely to change
especially regarding padding of dimension coordinates (or IndexVariables).
When using one of the modes ("edge", "reflect", "symmetric", "wrap"),
coordinates will be padded with the same mode, otherwise coordinates
are padded using the "constant" mode with fill_value dtypes.NA.
Parameters
----------
pad_width : mapping of hashable to tuple of int
Mapping with the form of {dim: (pad_before, pad_after)}
describing the number of values padded along each dimension.
{dim: pad} is a shortcut for pad_before = pad_after = pad
mode : {"constant", "edge", "linear_ramp", "maximum", "mean", "median", \
"minimum", "reflect", "symmetric", "wrap"}, default: "constant"
How to pad the DataArray (taken from numpy docs):
- "constant": Pads with a constant value.
- "edge": Pads with the edge values of array.
- "linear_ramp": Pads with the linear ramp between end_value and the
array edge value.
- "maximum": Pads with the maximum value of all or part of the
vector along each axis.
- "mean": Pads with the mean value of all or part of the
vector along each axis.
- "median": Pads with the median value of all or part of the
vector along each axis.
- "minimum": Pads with the minimum value of all or part of the
vector along each axis.
- "reflect": Pads with the reflection of the vector mirrored on
the first and last values of the vector along each axis.
- "symmetric": Pads with the reflection of the vector mirrored
along the edge of the array.
- "wrap": Pads with the wrap of the vector along the axis.
The first values are used to pad the end and the
end values are used to pad the beginning.
stat_length : int, tuple or mapping of hashable to tuple, default: None
Used in 'maximum', 'mean', 'median', and 'minimum'. Number of
values at edge of each axis used to calculate the statistic value.
{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)} unique
statistic lengths along each dimension.
((before, after),) yields same before and after statistic lengths
for each dimension.
(stat_length,) or int is a shortcut for before = after = statistic
length for all axes.
Default is ``None``, to use the entire axis.
constant_values : scalar, tuple, mapping of dim name to scalar or tuple, or \
mapping of var name to scalar, tuple or to mapping of dim name to scalar or tuple, default: None
Used in 'constant'. The values to set the padded values for each data variable / axis.
``{var_1: {dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}, ...
var_M: (before, after)}`` unique pad constants per data variable.
``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique
pad constants along each dimension.
``((before, after),)`` yields same before and after constants for each
dimension.
``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for
all dimensions.
Default is ``None``, pads with ``np.nan``.
end_values : scalar, tuple or mapping of hashable to tuple, default: None
Used in 'linear_ramp'. The values used for the ending value of the
linear_ramp and that will form the edge of the padded array.
``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique
end values along each dimension.
``((before, after),)`` yields same before and after end values for each
axis.
``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for
all axes.
Default is None.
reflect_type : {"even", "odd", None}, optional
Used in "reflect", and "symmetric". The "even" style is the
default with an unaltered reflection around the edge value. For
the "odd" style, the extended part of the array is created by
subtracting the reflected values from two times the edge value.
keep_attrs : bool or None, optional
If True, the attributes (``attrs``) will be copied from the
original object to the new one. If False, the new object
will be returned without attributes.
**pad_width_kwargs
The keyword arguments form of ``pad_width``.
One of ``pad_width`` or ``pad_width_kwargs`` must be provided.
Returns
-------
padded : Dataset
Dataset with the padded coordinates and data.
See Also
--------
Dataset.shift, Dataset.roll, Dataset.bfill, Dataset.ffill, numpy.pad, dask.array.pad
Notes
-----
By default when ``mode="constant"`` and ``constant_values=None``, integer types will be
promoted to ``float`` and padded with ``np.nan``. To avoid type promotion
specify ``constant_values=np.nan``
Padding coordinates will drop their corresponding index (if any) and will reset default
indexes for dimension coordinates.
Examples
--------
>>> ds = xr.Dataset({"foo": ("x", range(5))})
>>> ds.pad(x=(1, 2))
<xarray.Dataset> Size: 64B
Dimensions: (x: 8)
Dimensions without coordinates: x
Data variables:
foo (x) float64 64B nan 0.0 1.0 2.0 3.0 4.0 nan nan
"""
pad_width = either_dict_or_kwargs(pad_width, pad_width_kwargs, "pad")
if mode in ("edge", "reflect", "symmetric", "wrap"):
coord_pad_mode = mode
coord_pad_options = {
"stat_length": stat_length,
"constant_values": constant_values,
"end_values": end_values,
"reflect_type": reflect_type,
}
else:
coord_pad_mode = "constant"
coord_pad_options = {}
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=True)
variables = {}
# keep indexes that won't be affected by pad and drop all other indexes
xindexes = self.xindexes
pad_dims = set(pad_width)
indexes = {
k: idx
for k, idx in xindexes.items()
if not pad_dims.intersection(xindexes.get_all_dims(k))
}
for name, var in self.variables.items():
var_pad_width = {k: v for k, v in pad_width.items() if k in var.dims}
if not var_pad_width:
variables[name] = var
elif name in self.data_vars:
if utils.is_dict_like(constant_values):
if name in constant_values.keys():
filtered_constant_values = constant_values[name]
elif not set(var.dims).isdisjoint(constant_values.keys()):
filtered_constant_values = {
k: v for k, v in constant_values.items() if k in var.dims
}
else:
filtered_constant_values = 0 # TODO: https://github.com/pydata/xarray/pull/9353#discussion_r1724018352
else:
filtered_constant_values = constant_values
variables[name] = var.pad(
pad_width=var_pad_width,
mode=mode,
stat_length=stat_length,
constant_values=filtered_constant_values,
end_values=end_values,
reflect_type=reflect_type,
keep_attrs=keep_attrs,
)
else:
variables[name] = var.pad(
pad_width=var_pad_width,
mode=coord_pad_mode,
keep_attrs=keep_attrs,
**coord_pad_options, # type: ignore[arg-type]
)
# reset default index of dimension coordinates
if (name,) == var.dims:
dim_var = {name: variables[name]}
index = PandasIndex.from_variables(dim_var, options={})
index_vars = index.create_variables(dim_var)
indexes[name] = index
variables[name] = index_vars[name]
attrs = self._attrs if keep_attrs else None
return self._replace_with_new_dims(variables, indexes=indexes, attrs=attrs)
def idxmin(
self,
dim: Hashable | None = None,
*,
skipna: bool | None = None,
fill_value: Any = xrdtypes.NA,
keep_attrs: bool | None = None,
) -> Self:
"""Return the coordinate label of the minimum value along a dimension.
Returns a new `Dataset` named after the dimension with the values of
the coordinate labels along that dimension corresponding to minimum
values along that dimension.
In comparison to :py:meth:`~Dataset.argmin`, this returns the
coordinate label while :py:meth:`~Dataset.argmin` returns the index.
Parameters
----------
dim : Hashable, optional
Dimension over which to apply `idxmin`. This is optional for 1D
variables, but required for variables with 2 or more dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for ``float``, ``complex``, and ``object``
dtypes; other dtypes either do not have a sentinel missing value
(``int``) or ``skipna=True`` has not been implemented
(``datetime64`` or ``timedelta64``).
fill_value : Any, default: NaN
Value to be filled in case all of the values along a dimension are
null. By default this is NaN. The fill value and result are
automatically converted to a compatible dtype if possible.
Ignored if ``skipna`` is False.
keep_attrs : bool or None, optional
If True, the attributes (``attrs``) will be copied from the
original object to the new one. If False, the new object
will be returned without attributes.
Returns
-------
reduced : Dataset
New `Dataset` object with `idxmin` applied to its data and the
indicated dimension removed.
See Also
--------
DataArray.idxmin, Dataset.idxmax, Dataset.min, Dataset.argmin
Examples
--------
>>> array1 = xr.DataArray(
... [0, 2, 1, 0, -2], dims="x", coords={"x": ["a", "b", "c", "d", "e"]}
... )
>>> array2 = xr.DataArray(
... [
... [2.0, 1.0, 2.0, 0.0, -2.0],
... [-4.0, np.nan, 2.0, np.nan, -2.0],
... [np.nan, np.nan, 1.0, np.nan, np.nan],
... ],
... dims=["y", "x"],
... coords={"y": [-1, 0, 1], "x": ["a", "b", "c", "d", "e"]},
... )
>>> ds = xr.Dataset({"int": array1, "float": array2})
>>> ds.min(dim="x")
<xarray.Dataset> Size: 56B
Dimensions: (y: 3)
Coordinates:
* y (y) int64 24B -1 0 1
Data variables:
int int64 8B -2
float (y) float64 24B -2.0 -4.0 1.0
>>> ds.argmin(dim="x")
<xarray.Dataset> Size: 56B
Dimensions: (y: 3)
Coordinates:
* y (y) int64 24B -1 0 1
Data variables:
int int64 8B 4
float (y) int64 24B 4 0 2
>>> ds.idxmin(dim="x")
<xarray.Dataset> Size: 52B
Dimensions: (y: 3)
Coordinates:
* y (y) int64 24B -1 0 1
Data variables:
int <U1 4B 'e'
float (y) object 24B 'e' 'a' 'c'
"""
return self.map(
methodcaller(
"idxmin",
dim=dim,
skipna=skipna,
fill_value=fill_value,
keep_attrs=keep_attrs,
)
)
def idxmax(
self,
dim: Hashable | None = None,
*,
skipna: bool | None = None,
fill_value: Any = xrdtypes.NA,
keep_attrs: bool | None = None,
) -> Self:
"""Return the coordinate label of the maximum value along a dimension.
Returns a new `Dataset` named after the dimension with the values of
the coordinate labels along that dimension corresponding to maximum
values along that dimension.
In comparison to :py:meth:`~Dataset.argmax`, this returns the
coordinate label while :py:meth:`~Dataset.argmax` returns the index.
Parameters
----------
dim : str, optional
Dimension over which to apply `idxmax`. This is optional for 1D
variables, but required for variables with 2 or more dimensions.
skipna : bool or None, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for ``float``, ``complex``, and ``object``
dtypes; other dtypes either do not have a sentinel missing value
(``int``) or ``skipna=True`` has not been implemented
(``datetime64`` or ``timedelta64``).
fill_value : Any, default: NaN
Value to be filled in case all of the values along a dimension are
null. By default this is NaN. The fill value and result are
automatically converted to a compatible dtype if possible.
Ignored if ``skipna`` is False.
keep_attrs : bool or None, optional
If True, the attributes (``attrs``) will be copied from the
original object to the new one. If False, the new object
will be returned without attributes.
Returns
-------
reduced : Dataset
New `Dataset` object with `idxmax` applied to its data and the
indicated dimension removed.
See Also
--------
DataArray.idxmax, Dataset.idxmin, Dataset.max, Dataset.argmax
Examples
--------
>>> array1 = xr.DataArray(
... [0, 2, 1, 0, -2], dims="x", coords={"x": ["a", "b", "c", "d", "e"]}
... )
>>> array2 = xr.DataArray(
... [
... [2.0, 1.0, 2.0, 0.0, -2.0],
... [-4.0, np.nan, 2.0, np.nan, -2.0],
... [np.nan, np.nan, 1.0, np.nan, np.nan],
... ],
... dims=["y", "x"],
... coords={"y": [-1, 0, 1], "x": ["a", "b", "c", "d", "e"]},
... )
>>> ds = xr.Dataset({"int": array1, "float": array2})
>>> ds.max(dim="x")
<xarray.Dataset> Size: 56B
Dimensions: (y: 3)
Coordinates:
* y (y) int64 24B -1 0 1
Data variables:
int int64 8B 2
float (y) float64 24B 2.0 2.0 1.0
>>> ds.argmax(dim="x")
<xarray.Dataset> Size: 56B
Dimensions: (y: 3)
Coordinates:
* y (y) int64 24B -1 0 1
Data variables:
int int64 8B 1
float (y) int64 24B 0 2 2
>>> ds.idxmax(dim="x")
<xarray.Dataset> Size: 52B
Dimensions: (y: 3)
Coordinates:
* y (y) int64 24B -1 0 1
Data variables:
int <U1 4B 'b'
float (y) object 24B 'a' 'c' 'c'
"""
return self.map(
methodcaller(
"idxmax",
dim=dim,
skipna=skipna,
fill_value=fill_value,
keep_attrs=keep_attrs,
)
)
def argmin(self, dim: Hashable | None = None, **kwargs) -> Self:
"""Indices of the minima of the member variables.
If there are multiple minima, the indices of the first one found will be
returned.
Parameters
----------
dim : Hashable, optional
The dimension over which to find the minimum. By default, finds minimum over
all dimensions - for now returning an int for backward compatibility, but
this is deprecated, in future will be an error, since DataArray.argmin will
return a dict with indices for all dimensions, which does not make sense for
a Dataset.
keep_attrs : bool, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
skipna : bool, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
Returns
-------
result : Dataset
Examples
--------
>>> dataset = xr.Dataset(
... {
... "math_scores": (
... ["student", "test"],
... [[90, 85, 79], [78, 80, 85], [95, 92, 98]],
... ),
... "english_scores": (
... ["student", "test"],
... [[88, 90, 92], [75, 82, 79], [39, 96, 78]],
... ),
... },
... coords={
... "student": ["Alice", "Bob", "Charlie"],
... "test": ["Test 1", "Test 2", "Test 3"],
... },
... )
# Indices of the minimum values along the 'student' dimension are calculated
>>> argmin_indices = dataset.argmin(dim="student")
>>> min_score_in_math = dataset["student"].isel(
... student=argmin_indices["math_scores"]
... )
>>> min_score_in_math
<xarray.DataArray 'student' (test: 3)> Size: 84B
array(['Bob', 'Bob', 'Alice'], dtype='<U7')
Coordinates:
* test (test) <U6 72B 'Test 1' 'Test 2' 'Test 3'
student (test) <U7 84B 'Bob' 'Bob' 'Alice'
>>> min_score_in_english = dataset["student"].isel(
... student=argmin_indices["english_scores"]
... )
>>> min_score_in_english
<xarray.DataArray 'student' (test: 3)> Size: 84B
array(['Charlie', 'Bob', 'Charlie'], dtype='<U7')
Coordinates:
* test (test) <U6 72B 'Test 1' 'Test 2' 'Test 3'
student (test) <U7 84B 'Charlie' 'Bob' 'Charlie'
See Also
--------
Dataset.idxmin
DataArray.argmin
"""
if dim is None:
warnings.warn(
"Once the behaviour of DataArray.argmin() and Variable.argmin() without "
"dim changes to return a dict of indices of each dimension, for "
"consistency it will be an error to call Dataset.argmin() with no argument,"
"since we don't return a dict of Datasets.",
DeprecationWarning,
stacklevel=2,
)
if (
dim is None
or (not isinstance(dim, Sequence) and dim is not ...)
or isinstance(dim, str)
):
# Return int index if single dimension is passed, and is not part of a
# sequence
argmin_func = duck_array_ops.argmin
return self.reduce(
argmin_func, dim=None if dim is None else [dim], **kwargs
)
else:
raise ValueError(
"When dim is a sequence or ..., DataArray.argmin() returns a dict. "
"dicts cannot be contained in a Dataset, so cannot call "
"Dataset.argmin() with a sequence or ... for dim"
)
def argmax(self, dim: Hashable | None = None, **kwargs) -> Self:
"""Indices of the maxima of the member variables.
If there are multiple maxima, the indices of the first one found will be
returned.
Parameters
----------
dim : str, optional
The dimension over which to find the maximum. By default, finds maximum over
all dimensions - for now returning an int for backward compatibility, but
this is deprecated, in future will be an error, since DataArray.argmax will
return a dict with indices for all dimensions, which does not make sense for
a Dataset.
keep_attrs : bool, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
skipna : bool, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
Returns
-------
result : Dataset
Examples
--------
>>> dataset = xr.Dataset(
... {
... "math_scores": (
... ["student", "test"],
... [[90, 85, 92], [78, 80, 85], [95, 92, 98]],
... ),
... "english_scores": (
... ["student", "test"],
... [[88, 90, 92], [75, 82, 79], [93, 96, 91]],
... ),
... },
... coords={
... "student": ["Alice", "Bob", "Charlie"],
... "test": ["Test 1", "Test 2", "Test 3"],
... },
... )
# Indices of the maximum values along the 'student' dimension are calculated
>>> argmax_indices = dataset.argmax(dim="test")
>>> argmax_indices
<xarray.Dataset> Size: 132B
Dimensions: (student: 3)
Coordinates:
* student (student) <U7 84B 'Alice' 'Bob' 'Charlie'
Data variables:
math_scores (student) int64 24B 2 2 2
english_scores (student) int64 24B 2 1 1
See Also
--------
DataArray.argmax
"""
if dim is None:
warnings.warn(
"Once the behaviour of DataArray.argmin() and Variable.argmin() without "
"dim changes to return a dict of indices of each dimension, for "
"consistency it will be an error to call Dataset.argmin() with no argument,"
"since we don't return a dict of Datasets.",
DeprecationWarning,
stacklevel=2,
)
if (
dim is None
or (not isinstance(dim, Sequence) and dim is not ...)
or isinstance(dim, str)
):
# Return int index if single dimension is passed, and is not part of a
# sequence
argmax_func = duck_array_ops.argmax
return self.reduce(
argmax_func, dim=None if dim is None else [dim], **kwargs
)
else:
raise ValueError(
"When dim is a sequence or ..., DataArray.argmin() returns a dict. "
"dicts cannot be contained in a Dataset, so cannot call "
"Dataset.argmin() with a sequence or ... for dim"
)
def eval(
self,
statement: str,
*,
parser: QueryParserOptions = "pandas",
) -> Self | T_DataArray:
"""
Calculate an expression supplied as a string in the context of the dataset.
This is currently experimental; the API may change particularly around
assignments, which currently return a ``Dataset`` with the additional variable.
Currently only the ``python`` engine is supported, which has the same
performance as executing in python.
Parameters
----------
statement : str
String containing the Python-like expression to evaluate.
Returns
-------
result : Dataset or DataArray, depending on whether ``statement`` contains an
assignment.
Examples
--------
>>> ds = xr.Dataset(
... {"a": ("x", np.arange(0, 5, 1)), "b": ("x", np.linspace(0, 1, 5))}
... )
>>> ds
<xarray.Dataset> Size: 80B
Dimensions: (x: 5)
Dimensions without coordinates: x
Data variables:
a (x) int64 40B 0 1 2 3 4
b (x) float64 40B 0.0 0.25 0.5 0.75 1.0
>>> ds.eval("a + b")
<xarray.DataArray (x: 5)> Size: 40B
array([0. , 1.25, 2.5 , 3.75, 5. ])
Dimensions without coordinates: x
>>> ds.eval("c = a + b")
<xarray.Dataset> Size: 120B
Dimensions: (x: 5)
Dimensions without coordinates: x
Data variables:
a (x) int64 40B 0 1 2 3 4
b (x) float64 40B 0.0 0.25 0.5 0.75 1.0
c (x) float64 40B 0.0 1.25 2.5 3.75 5.0
"""
return pd.eval( # type: ignore[return-value]
statement,
resolvers=[self],
target=self,
parser=parser,
# Because numexpr returns a numpy array, using that engine results in
# different behavior. We'd be very open to a contribution handling this.
engine="python",
)
def query(
self,
queries: Mapping[Any, Any] | None = None,
parser: QueryParserOptions = "pandas",
engine: QueryEngineOptions = None,
missing_dims: ErrorOptionsWithWarn = "raise",
**queries_kwargs: Any,
) -> Self:
"""Return a new dataset with each array indexed along the specified
dimension(s), where the indexers are given as strings containing
Python expressions to be evaluated against the data variables in the
dataset.
Parameters
----------
queries : dict-like, optional
A dict-like with keys matching dimensions and values given by strings
containing Python expressions to be evaluated against the data variables
in the dataset. The expressions will be evaluated using the pandas
eval() function, and can contain any valid Python expressions but cannot
contain any Python statements.
parser : {"pandas", "python"}, default: "pandas"
The parser to use to construct the syntax tree from the expression.
The default of 'pandas' parses code slightly different than standard
Python. Alternatively, you can parse an expression using the 'python'
parser to retain strict Python semantics.
engine : {"python", "numexpr", None}, default: None
The engine used to evaluate the expression. Supported engines are:
- None: tries to use numexpr, falls back to python
- "numexpr": evaluates expressions using numexpr
- "python": performs operations as if you had eval’d in top level python
missing_dims : {"raise", "warn", "ignore"}, default: "raise"
What to do if dimensions that should be selected from are not present in the
Dataset:
- "raise": raise an exception
- "warn": raise a warning, and ignore the missing dimensions
- "ignore": ignore the missing dimensions
**queries_kwargs : {dim: query, ...}, optional
The keyword arguments form of ``queries``.
One of queries or queries_kwargs must be provided.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
array and dimension is indexed by the results of the appropriate
queries.
See Also
--------
Dataset.isel
pandas.eval
Examples
--------
>>> a = np.arange(0, 5, 1)
>>> b = np.linspace(0, 1, 5)
>>> ds = xr.Dataset({"a": ("x", a), "b": ("x", b)})
>>> ds
<xarray.Dataset> Size: 80B
Dimensions: (x: 5)
Dimensions without coordinates: x
Data variables:
a (x) int64 40B 0 1 2 3 4
b (x) float64 40B 0.0 0.25 0.5 0.75 1.0
>>> ds.query(x="a > 2")
<xarray.Dataset> Size: 32B
Dimensions: (x: 2)
Dimensions without coordinates: x
Data variables:
a (x) int64 16B 3 4
b (x) float64 16B 0.75 1.0
"""
# allow queries to be given either as a dict or as kwargs
queries = either_dict_or_kwargs(queries, queries_kwargs, "query")
# check queries
for dim, expr in queries.items():
if not isinstance(expr, str):
msg = f"expr for dim {dim} must be a string to be evaluated, {type(expr)} given"
raise ValueError(msg)
# evaluate the queries to create the indexers
indexers = {
dim: pd.eval(expr, resolvers=[self], parser=parser, engine=engine)
for dim, expr in queries.items()
}
# apply the selection
return self.isel(indexers, missing_dims=missing_dims)
def curvefit(
self,
coords: str | DataArray | Iterable[str | DataArray],
func: Callable[..., Any],
reduce_dims: Dims = None,
skipna: bool = True,
p0: Mapping[str, float | DataArray] | None = None,
bounds: Mapping[str, tuple[float | DataArray, float | DataArray]] | None = None,
param_names: Sequence[str] | None = None,
errors: ErrorOptions = "raise",
kwargs: dict[str, Any] | None = None,
) -> Self:
"""
Curve fitting optimization for arbitrary functions.
Wraps :py:func:`scipy.optimize.curve_fit` with :py:func:`~xarray.apply_ufunc`.
Parameters
----------
coords : hashable, DataArray, or sequence of hashable or DataArray
Independent coordinate(s) over which to perform the curve fitting. Must share
at least one dimension with the calling object. When fitting multi-dimensional
functions, supply `coords` as a sequence in the same order as arguments in
`func`. To fit along existing dimensions of the calling object, `coords` can
also be specified as a str or sequence of strs.
func : callable
User specified function in the form `f(x, *params)` which returns a numpy
array of length `len(x)`. `params` are the fittable parameters which are optimized
by scipy curve_fit. `x` can also be specified as a sequence containing multiple
coordinates, e.g. `f((x0, x1), *params)`.
reduce_dims : str, Iterable of Hashable or None, optional
Additional dimension(s) over which to aggregate while fitting. For example,
calling `ds.curvefit(coords='time', reduce_dims=['lat', 'lon'], ...)` will
aggregate all lat and lon points and fit the specified function along the
time dimension.
skipna : bool, default: True
Whether to skip missing values when fitting. Default is True.
p0 : dict-like, optional
Optional dictionary of parameter names to initial guesses passed to the
`curve_fit` `p0` arg. If the values are DataArrays, they will be appropriately
broadcast to the coordinates of the array. If none or only some parameters are
passed, the rest will be assigned initial values following the default scipy
behavior.
bounds : dict-like, optional
Optional dictionary of parameter names to tuples of bounding values passed to the
`curve_fit` `bounds` arg. If any of the bounds are DataArrays, they will be
appropriately broadcast to the coordinates of the array. If none or only some
parameters are passed, the rest will be unbounded following the default scipy
behavior.
param_names : sequence of hashable, optional
Sequence of names for the fittable parameters of `func`. If not supplied,
this will be automatically determined by arguments of `func`. `param_names`
should be manually supplied when fitting a function that takes a variable
number of parameters.
errors : {"raise", "ignore"}, default: "raise"
If 'raise', any errors from the `scipy.optimize_curve_fit` optimization will
raise an exception. If 'ignore', the coefficients and covariances for the
coordinates where the fitting failed will be NaN.
**kwargs : optional
Additional keyword arguments to passed to scipy curve_fit.
Returns
-------
curvefit_results : Dataset
A single dataset which contains:
[var]_curvefit_coefficients
The coefficients of the best fit.
[var]_curvefit_covariance
The covariance matrix of the coefficient estimates.
See Also
--------
Dataset.polyfit
scipy.optimize.curve_fit
xarray.Dataset.xlm.modelfit
External method from `xarray-lmfit <https://xarray-lmfit.readthedocs.io/>`_
with more curve fitting functionality.
"""
from xarray.computation.fit import curvefit as curvefit_impl
return curvefit_impl(
self,
coords,
func,
reduce_dims,
skipna,
p0,
bounds,
param_names,
errors,
kwargs,
)
def drop_duplicates(
self,
dim: Hashable | Iterable[Hashable],
*,
keep: Literal["first", "last", False] = "first",
) -> Self:
"""Returns a new Dataset with duplicate dimension values removed.
Parameters
----------
dim : dimension label or labels
Pass `...` to drop duplicates along all dimensions.
keep : {"first", "last", False}, default: "first"
Determines which duplicates (if any) to keep.
- ``"first"`` : Drop duplicates except for the first occurrence.
- ``"last"`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
Returns
-------
Dataset
See Also
--------
DataArray.drop_duplicates
"""
if isinstance(dim, str):
dims: Iterable = (dim,)
elif dim is ...:
dims = self.dims
elif not isinstance(dim, Iterable):
dims = [dim]
else:
dims = dim
missing_dims = set(dims) - set(self.dims)
if missing_dims:
raise ValueError(
f"Dimensions {tuple(missing_dims)} not found in data dimensions {tuple(self.dims)}"
)
indexes = {dim: ~self.get_index(dim).duplicated(keep=keep) for dim in dims}
return self.isel(indexes)
def convert_calendar(
self,
calendar: CFCalendar,
dim: Hashable = "time",
align_on: Literal["date", "year"] | None = None,
missing: Any | None = None,
use_cftime: bool | None = None,
) -> Self:
"""Convert the Dataset to another calendar.
Only converts the individual timestamps, does not modify any data except
in dropping invalid/surplus dates or inserting missing dates.
If the source and target calendars are either no_leap, all_leap or a
standard type, only the type of the time array is modified.
When converting to a leap year from a non-leap year, the 29th of February
is removed from the array. In the other direction the 29th of February
will be missing in the output, unless `missing` is specified,
in which case that value is inserted.
For conversions involving `360_day` calendars, see Notes.
This method is safe to use with sub-daily data as it doesn't touch the
time part of the timestamps.
Parameters
---------
calendar : str
The target calendar name.
dim : Hashable, default: "time"
Name of the time coordinate.
align_on : {None, 'date', 'year'}, optional
Must be specified when either source or target is a `360_day` calendar,
ignored otherwise. See Notes.
missing : Any or None, optional
By default, i.e. if the value is None, this method will simply attempt
to convert the dates in the source calendar to the same dates in the
target calendar, and drop any of those that are not possible to
represent. If a value is provided, a new time coordinate will be
created in the target calendar with the same frequency as the original
time coordinate; for any dates that are not present in the source, the
data will be filled with this value. Note that using this mode requires
that the source data have an inferable frequency; for more information
see :py:func:`xarray.infer_freq`. For certain frequency, source, and
target calendar combinations, this could result in many missing values, see notes.
use_cftime : bool or None, optional
Whether to use cftime objects in the output, only used if `calendar`
is one of {"proleptic_gregorian", "gregorian" or "standard"}.
If True, the new time axis uses cftime objects.
If None (default), it uses :py:class:`numpy.datetime64` values if the
date range permits it, and :py:class:`cftime.datetime` objects if not.
If False, it uses :py:class:`numpy.datetime64` or fails.
Returns
-------
Dataset
Copy of the dataarray with the time coordinate converted to the
target calendar. If 'missing' was None (default), invalid dates in
the new calendar are dropped, but missing dates are not inserted.
If `missing` was given, the new data is reindexed to have a time axis
with the same frequency as the source, but in the new calendar; any
missing datapoints are filled with `missing`.
Notes
-----
Passing a value to `missing` is only usable if the source's time coordinate as an
inferable frequencies (see :py:func:`~xarray.infer_freq`) and is only appropriate
if the target coordinate, generated from this frequency, has dates equivalent to the
source. It is usually **not** appropriate to use this mode with:
- Period-end frequencies : 'A', 'Y', 'Q' or 'M', in opposition to 'AS' 'YS', 'QS' and 'MS'
- Sub-monthly frequencies that do not divide a day evenly : 'W', 'nD' where `N != 1`
or 'mH' where 24 % m != 0).
If one of the source or target calendars is `"360_day"`, `align_on` must
be specified and two options are offered.
- "year"
The dates are translated according to their relative position in the year,
ignoring their original month and day information, meaning that the
missing/surplus days are added/removed at regular intervals.
From a `360_day` to a standard calendar, the output will be missing the
following dates (day of year in parentheses):
To a leap year:
January 31st (31), March 31st (91), June 1st (153), July 31st (213),
September 31st (275) and November 30th (335).
To a non-leap year:
February 6th (36), April 19th (109), July 2nd (183),
September 12th (255), November 25th (329).
From a standard calendar to a `"360_day"`, the following dates in the
source array will be dropped:
From a leap year:
January 31st (31), April 1st (92), June 1st (153), August 1st (214),
September 31st (275), December 1st (336)
From a non-leap year:
February 6th (37), April 20th (110), July 2nd (183),
September 13th (256), November 25th (329)
This option is best used on daily and subdaily data.
- "date"
The month/day information is conserved and invalid dates are dropped
from the output. This means that when converting from a `"360_day"` to a
standard calendar, all 31st (Jan, March, May, July, August, October and
December) will be missing as there is no equivalent dates in the
`"360_day"` calendar and the 29th (on non-leap years) and 30th of February
will be dropped as there are no equivalent dates in a standard calendar.
This option is best used with data on a frequency coarser than daily.
"""
return convert_calendar(
self,
calendar,
dim=dim,
align_on=align_on,
missing=missing,
use_cftime=use_cftime,
)
def interp_calendar(
self,
target: pd.DatetimeIndex | CFTimeIndex | DataArray,
dim: Hashable = "time",
) -> Self:
"""Interpolates the Dataset to another calendar based on decimal year measure.
Each timestamp in `source` and `target` are first converted to their decimal
year equivalent then `source` is interpolated on the target coordinate.
The decimal year of a timestamp is its year plus its sub-year component
converted to the fraction of its year. For example "2000-03-01 12:00" is
2000.1653 in a standard calendar or 2000.16301 in a `"noleap"` calendar.
This method should only be used when the time (HH:MM:SS) information of
time coordinate is not important.
Parameters
----------
target: DataArray or DatetimeIndex or CFTimeIndex
The target time coordinate of a valid dtype
(np.datetime64 or cftime objects)
dim : Hashable, default: "time"
The time coordinate name.
Return
------
DataArray
The source interpolated on the decimal years of target,
"""
return interp_calendar(self, target, dim=dim)
@_deprecate_positional_args("v2024.07.0")
def groupby(
self,
group: GroupInput = None,
*,
squeeze: Literal[False] = False,
restore_coord_dims: bool = False,
eagerly_compute_group: Literal[False] | None = None,
**groupers: Grouper,
) -> DatasetGroupBy:
"""Returns a DatasetGroupBy object for performing grouped operations.
Parameters
----------
group : str or DataArray or IndexVariable or sequence of hashable or mapping of hashable to Grouper
Array whose unique values should be used to group this array. If a
Hashable, must be the name of a coordinate contained in this dataarray. If a dictionary,
must map an existing variable name to a :py:class:`Grouper` instance.
squeeze : False
This argument is deprecated.
restore_coord_dims : bool, default: False
If True, also restore the dimension order of multi-dimensional
coordinates.
eagerly_compute_group: False, optional
This argument is deprecated.
**groupers : Mapping of str to Grouper or Resampler
Mapping of variable name to group by to :py:class:`Grouper` or :py:class:`Resampler` object.
One of ``group`` or ``groupers`` must be provided.
Only a single ``grouper`` is allowed at present.
Returns
-------
grouped : DatasetGroupBy
A `DatasetGroupBy` object patterned after `pandas.GroupBy` that can be
iterated over in the form of `(unique_value, grouped_array)` pairs.
Examples
--------
>>> ds = xr.Dataset(
... {"foo": (("x", "y"), np.arange(12).reshape((4, 3)))},
... coords={"x": [10, 20, 30, 40], "letters": ("x", list("abba"))},
... )
Grouping by a single variable is easy
>>> ds.groupby("letters")
<DatasetGroupBy, grouped over 1 grouper(s), 2 groups in total:
'letters': UniqueGrouper('letters'), 2/2 groups with labels 'a', 'b'>
Execute a reduction
>>> ds.groupby("letters").sum()
<xarray.Dataset> Size: 64B
Dimensions: (letters: 2, y: 3)
Coordinates:
* letters (letters) object 16B 'a' 'b'
Dimensions without coordinates: y
Data variables:
foo (letters, y) int64 48B 9 11 13 9 11 13
Grouping by multiple variables
>>> ds.groupby(["letters", "x"])
<DatasetGroupBy, grouped over 2 grouper(s), 8 groups in total:
'letters': UniqueGrouper('letters'), 2/2 groups with labels 'a', 'b'
'x': UniqueGrouper('x'), 4/4 groups with labels 10, 20, 30, 40>
Use Grouper objects to express more complicated GroupBy operations
>>> from xarray.groupers import BinGrouper, UniqueGrouper
>>>
>>> ds.groupby(x=BinGrouper(bins=[5, 15, 25]), letters=UniqueGrouper()).sum()
<xarray.Dataset> Size: 144B
Dimensions: (y: 3, x_bins: 2, letters: 2)
Coordinates:
* x_bins (x_bins) interval[int64, right] 32B (5, 15] (15, 25]
* letters (letters) object 16B 'a' 'b'
Dimensions without coordinates: y
Data variables:
foo (y, x_bins, letters) float64 96B 0.0 nan nan 3.0 ... nan nan 5.0
See Also
--------
:ref:`groupby`
Users guide explanation of how to group and bin data.
:doc:`xarray-tutorial:intermediate/computation/01-high-level-computation-patterns`
Tutorial on :py:func:`~xarray.Dataset.Groupby` for windowed computation.
:doc:`xarray-tutorial:fundamentals/03.2_groupby_with_xarray`
Tutorial on :py:func:`~xarray.Dataset.Groupby` demonstrating reductions, transformation and comparison with :py:func:`~xarray.Dataset.resample`.
:external:py:meth:`pandas.DataFrame.groupby <pandas.DataFrame.groupby>`
:func:`Dataset.groupby_bins <Dataset.groupby_bins>`
:func:`DataArray.groupby <DataArray.groupby>`
:class:`core.groupby.DatasetGroupBy`
:func:`Dataset.coarsen <Dataset.coarsen>`
:func:`Dataset.resample <Dataset.resample>`
:func:`DataArray.resample <DataArray.resample>`
"""
from xarray.core.groupby import (
DatasetGroupBy,
_parse_group_and_groupers,
_validate_groupby_squeeze,
)
_validate_groupby_squeeze(squeeze)
rgroupers = _parse_group_and_groupers(
self, group, groupers, eagerly_compute_group=eagerly_compute_group
)
return DatasetGroupBy(self, rgroupers, restore_coord_dims=restore_coord_dims)
@_deprecate_positional_args("v2024.07.0")
def groupby_bins(
self,
group: Hashable | DataArray | IndexVariable,
bins: Bins,
right: bool = True,
labels: ArrayLike | None = None,
precision: int = 3,
include_lowest: bool = False,
squeeze: Literal[False] = False,
restore_coord_dims: bool = False,
duplicates: Literal["raise", "drop"] = "raise",
eagerly_compute_group: Literal[False] | None = None,
) -> DatasetGroupBy:
"""Returns a DatasetGroupBy object for performing grouped operations.
Rather than using all unique values of `group`, the values are discretized
first by applying `pandas.cut` [1]_ to `group`.
Parameters
----------
group : Hashable, DataArray or IndexVariable
Array whose binned values should be used to group this array. If a
string, must be the name of a variable contained in this dataset.
bins : int or array-like
If bins is an int, it defines the number of equal-width bins in the
range of x. However, in this case, the range of x is extended by .1%
on each side to include the min or max values of x. If bins is a
sequence it defines the bin edges allowing for non-uniform bin
width. No extension of the range of x is done in this case.
right : bool, default: True
Indicates whether the bins include the rightmost edge or not. If
right == True (the default), then the bins [1,2,3,4] indicate
(1,2], (2,3], (3,4].
labels : array-like or bool, default: None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, string bin labels are assigned by
`pandas.cut`.
precision : int, default: 3
The precision at which to store and display the bins labels.
include_lowest : bool, default: False
Whether the first interval should be left-inclusive or not.
squeeze : False
This argument is deprecated.
restore_coord_dims : bool, default: False
If True, also restore the dimension order of multi-dimensional
coordinates.
duplicates : {"raise", "drop"}, default: "raise"
If bin edges are not unique, raise ValueError or drop non-uniques.
eagerly_compute_group: False, optional
This argument is deprecated.
Returns
-------
grouped : DatasetGroupBy
A `DatasetGroupBy` object patterned after `pandas.GroupBy` that can be
iterated over in the form of `(unique_value, grouped_array)` pairs.
The name of the group has the added suffix `_bins` in order to
distinguish it from the original variable.
See Also
--------
:ref:`groupby`
Users guide explanation of how to group and bin data.
Dataset.groupby
DataArray.groupby_bins
core.groupby.DatasetGroupBy
pandas.DataFrame.groupby
References
----------
.. [1] https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.cut.html
"""
from xarray.core.groupby import (
DatasetGroupBy,
ResolvedGrouper,
_validate_groupby_squeeze,
)
from xarray.groupers import BinGrouper
_validate_groupby_squeeze(squeeze)
grouper = BinGrouper(
bins=bins,
right=right,
labels=labels,
precision=precision,
include_lowest=include_lowest,
)
rgrouper = ResolvedGrouper(
grouper, group, self, eagerly_compute_group=eagerly_compute_group
)
return DatasetGroupBy(
self,
(rgrouper,),
restore_coord_dims=restore_coord_dims,
)
def weighted(self, weights: DataArray) -> DatasetWeighted:
"""
Weighted Dataset operations.
Parameters
----------
weights : DataArray
An array of weights associated with the values in this Dataset.
Each value in the data contributes to the reduction operation
according to its associated weight.
Notes
-----
``weights`` must be a DataArray and cannot contain missing values.
Missing values can be replaced by ``weights.fillna(0)``.
Returns
-------
computation.weighted.DatasetWeighted
See Also
--------
:func:`DataArray.weighted <DataArray.weighted>`
:ref:`compute.weighted`
User guide on weighted array reduction using :py:func:`~xarray.Dataset.weighted`
:doc:`xarray-tutorial:fundamentals/03.4_weighted`
Tutorial on Weighted Reduction using :py:func:`~xarray.Dataset.weighted`
"""
from xarray.computation.weighted import DatasetWeighted
return DatasetWeighted(self, weights)
def rolling(
self,
dim: Mapping[Any, int] | None = None,
min_periods: int | None = None,
center: bool | Mapping[Any, bool] = False,
**window_kwargs: int,
) -> DatasetRolling:
"""
Rolling window object for Datasets.
Parameters
----------
dim : dict, optional
Mapping from the dimension name to create the rolling iterator
along (e.g. `time`) to its moving window size.
min_periods : int or None, default: None
Minimum number of observations in window required to have a value
(otherwise result is NA). The default, None, is equivalent to
setting min_periods equal to the size of the window.
center : bool or Mapping to int, default: False
Set the labels at the center of the window. The default, False,
sets the labels at the right edge of the window.
**window_kwargs : optional
The keyword arguments form of ``dim``.
One of dim or window_kwargs must be provided.
Returns
-------
computation.rolling.DatasetRolling
See Also
--------
Dataset.cumulative
DataArray.rolling
DataArray.rolling_exp
"""
from xarray.computation.rolling import DatasetRolling
dim = either_dict_or_kwargs(dim, window_kwargs, "rolling")
return DatasetRolling(self, dim, min_periods=min_periods, center=center)
def cumulative(
self,
dim: str | Iterable[Hashable],
min_periods: int = 1,
) -> DatasetRolling:
"""
Accumulating object for Datasets
Parameters
----------
dims : iterable of hashable
The name(s) of the dimensions to create the cumulative window along
min_periods : int, default: 1
Minimum number of observations in window required to have a value
(otherwise result is NA). The default is 1 (note this is different
from ``Rolling``, whose default is the size of the window).
Returns
-------
computation.rolling.DatasetRolling
See Also
--------
DataArray.cumulative
Dataset.rolling
Dataset.rolling_exp
"""
from xarray.computation.rolling import DatasetRolling
if isinstance(dim, str):
if dim not in self.dims:
raise ValueError(
f"Dimension {dim} not found in data dimensions: {self.dims}"
)
dim = {dim: self.sizes[dim]}
else:
missing_dims = set(dim) - set(self.dims)
if missing_dims:
raise ValueError(
f"Dimensions {missing_dims} not found in data dimensions: {self.dims}"
)
dim = {d: self.sizes[d] for d in dim}
return DatasetRolling(self, dim, min_periods=min_periods, center=False)
def coarsen(
self,
dim: Mapping[Any, int] | None = None,
boundary: CoarsenBoundaryOptions = "exact",
side: SideOptions | Mapping[Any, SideOptions] = "left",
coord_func: str | Callable | Mapping[Any, str | Callable] = "mean",
**window_kwargs: int,
) -> DatasetCoarsen:
"""
Coarsen object for Datasets.
Parameters
----------
dim : mapping of hashable to int, optional
Mapping from the dimension name to the window size.
boundary : {"exact", "trim", "pad"}, default: "exact"
If 'exact', a ValueError will be raised if dimension size is not a
multiple of the window size. If 'trim', the excess entries are
dropped. If 'pad', NA will be padded.
side : {"left", "right"} or mapping of str to {"left", "right"}, default: "left"
coord_func : str or mapping of hashable to str, default: "mean"
function (name) that is applied to the coordinates,
or a mapping from coordinate name to function (name).
Returns
-------
computation.rolling.DatasetCoarsen
See Also
--------
:class:`computation.rolling.DatasetCoarsen`
:func:`DataArray.coarsen <DataArray.coarsen>`
:ref:`reshape.coarsen`
User guide describing :py:func:`~xarray.Dataset.coarsen`
:ref:`compute.coarsen`
User guide on block arrgragation :py:func:`~xarray.Dataset.coarsen`
:doc:`xarray-tutorial:fundamentals/03.3_windowed`
Tutorial on windowed computation using :py:func:`~xarray.Dataset.coarsen`
"""
from xarray.computation.rolling import DatasetCoarsen
dim = either_dict_or_kwargs(dim, window_kwargs, "coarsen")
return DatasetCoarsen(
self,
dim,
boundary=boundary,
side=side,
coord_func=coord_func,
)
@_deprecate_positional_args("v2024.07.0")
def resample(
self,
indexer: Mapping[Any, ResampleCompatible | Resampler] | None = None,
*,
skipna: bool | None = None,
closed: SideOptions | None = None,
label: SideOptions | None = None,
offset: pd.Timedelta | datetime.timedelta | str | None = None,
origin: str | DatetimeLike = "start_day",
restore_coord_dims: bool | None = None,
**indexer_kwargs: ResampleCompatible | Resampler,
) -> DatasetResample:
"""Returns a Resample object for performing resampling operations.
Handles both downsampling and upsampling. The resampled
dimension must be a datetime-like coordinate. If any intervals
contain no values from the original object, they will be given
the value ``NaN``.
Parameters
----------
indexer : Mapping of Hashable to str, datetime.timedelta, pd.Timedelta, pd.DateOffset, or Resampler, optional
Mapping from the dimension name to resample frequency [1]_. The
dimension must be datetime-like.
skipna : bool, optional
Whether to skip missing values when aggregating in downsampling.
closed : {"left", "right"}, optional
Side of each interval to treat as closed.
label : {"left", "right"}, optional
Side of each interval to use for labeling.
origin : {'epoch', 'start', 'start_day', 'end', 'end_day'}, pd.Timestamp, datetime.datetime, np.datetime64, or cftime.datetime, default 'start_day'
The datetime on which to adjust the grouping. The timezone of origin
must match the timezone of the index.
If a datetime is not used, these values are also supported:
- 'epoch': `origin` is 1970-01-01
- 'start': `origin` is the first value of the timeseries
- 'start_day': `origin` is the first day at midnight of the timeseries
- 'end': `origin` is the last value of the timeseries
- 'end_day': `origin` is the ceiling midnight of the last day
offset : pd.Timedelta, datetime.timedelta, or str, default is None
An offset timedelta added to the origin.
restore_coord_dims : bool, optional
If True, also restore the dimension order of multi-dimensional
coordinates.
**indexer_kwargs : str, datetime.timedelta, pd.Timedelta, pd.DateOffset, or Resampler
The keyword arguments form of ``indexer``.
One of indexer or indexer_kwargs must be provided.
Returns
-------
resampled : core.resample.DataArrayResample
This object resampled.
See Also
--------
DataArray.resample
pandas.Series.resample
pandas.DataFrame.resample
Dataset.groupby
DataArray.groupby
References
----------
.. [1] https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
"""
from xarray.core.resample import DatasetResample
return self._resample(
resample_cls=DatasetResample,
indexer=indexer,
skipna=skipna,
closed=closed,
label=label,
offset=offset,
origin=origin,
restore_coord_dims=restore_coord_dims,
**indexer_kwargs,
)
def drop_attrs(self, *, deep: bool = True) -> Self:
"""
Removes all attributes from the Dataset and its variables.
Parameters
----------
deep : bool, default True
Removes attributes from all variables.
Returns
-------
Dataset
"""
# Remove attributes from the dataset
self = self._replace(attrs={})
if not deep:
return self
# Remove attributes from each variable in the dataset
for var in self.variables:
# variables don't have a `._replace` method, so we copy and then remove
# attrs. If we added a `._replace` method, we could use that instead.
if var not in self.indexes:
self[var] = self[var].copy()
self[var].attrs = {}
new_idx_variables = {}
# Not sure this is the most elegant way of doing this, but it works.
# (Should we have a more general "map over all variables, including
# indexes" approach?)
for idx, idx_vars in self.xindexes.group_by_index():
# copy each coordinate variable of an index and drop their attrs
temp_idx_variables = {k: v.copy() for k, v in idx_vars.items()}
for v in temp_idx_variables.values():
v.attrs = {}
# re-wrap the index object in new coordinate variables
new_idx_variables.update(idx.create_variables(temp_idx_variables))
self = self.assign(new_idx_variables)
return self
| Dataset |
python | tox-dev__tox | src/tox/execute/api.py | {
"start": 5918,
"end": 9898
} | class ____:
"""Result of a command execution."""
OK = 0
def __init__( # noqa: PLR0913
self,
request: ExecuteRequest,
show_on_standard: bool, # noqa: FBT001
exit_code: int | None,
out: str,
err: str,
start: float,
end: float,
cmd: Sequence[str],
metadata: dict[str, Any],
) -> None:
"""
Create a new execution outcome.
:param request: the execution request
:param show_on_standard: a flag indicating if the execution was shown on stdout/stderr
:param exit_code: the exit code for the execution
:param out: the standard output of the execution
:param err: the standard error of the execution
:param start: a timer sample for the start of the execution
:param end: a timer sample for the end of the execution
:param cmd: the command as executed
:param metadata: additional metadata attached to the execution
"""
self.request = request #: the execution request
self.show_on_standard = show_on_standard #: a flag indicating if the execution was shown on stdout/stderr
self.exit_code = exit_code #: the exit code for the execution
self.out = out #: the standard output of the execution
self.err = err #: the standard error of the execution
self.start = start #: a timer sample for the start of the execution
self.end = end #: a timer sample for the end of the execution
self.cmd = cmd #: the command as executed
self.metadata = metadata #: additional metadata attached to the execution
def __bool__(self) -> bool:
return self.exit_code == self.OK
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}: exit {self.exit_code} in {self.elapsed:.2f} seconds"
f" for {self.request.shell_cmd}"
)
def assert_success(self) -> None:
"""Assert that the execution succeeded."""
if self.exit_code is not None and self.exit_code != self.OK:
self._assert_fail()
self.log_run_done(logging.INFO)
def assert_failure(self) -> None:
"""Assert that the execution failed."""
if self.exit_code is not None and self.exit_code == self.OK:
self._assert_fail()
self.log_run_done(logging.INFO)
def _assert_fail(self) -> NoReturn:
if self.show_on_standard is False:
if self.out:
sys.stdout.write(self.out)
if not self.out.endswith("\n"):
sys.stdout.write("\n")
if self.err:
sys.stderr.write(Fore.RED)
sys.stderr.write(self.err)
sys.stderr.write(Fore.RESET)
if not self.err.endswith("\n"):
sys.stderr.write("\n")
self.log_run_done(logging.CRITICAL)
raise SystemExit(self.exit_code)
def log_run_done(self, lvl: int) -> None:
"""
Log that the run was done.
:param lvl: the level on what to log as interpreted by :func:`logging.log`
"""
req = self.request
metadata = ""
if self.metadata:
metadata = f" {', '.join(f'{k}={v}' for k, v in self.metadata.items())}"
LOGGER.log(
lvl,
"exit %s (%.2f seconds) %s> %s%s",
self.exit_code,
self.elapsed,
req.cwd,
req.shell_cmd,
metadata,
)
@property
def elapsed(self) -> float:
""":return: time the execution took in seconds"""
return self.end - self.start
def out_err(self) -> tuple[str, str]:
""":return: a tuple of the standard output and standard error"""
return self.out, self.err
__all__ = (
"ContentHandler",
"Execute",
"ExecuteInstance",
"ExecuteOptions",
"ExecuteStatus",
"Outcome",
"StdinSource",
)
| Outcome |
python | getsentry__sentry | src/sentry/monitors/endpoints/base_monitor_details.py | {
"start": 1266,
"end": 8232
} | class ____(BaseEndpointMixin):
def get_monitor(self, request: Request, project: Project, monitor: Monitor) -> Response:
"""
Retrieves details for a monitor.
"""
environments = get_environments(request, project.organization)
expand = request.GET.getlist("expand", [])
return self.respond(
serialize(
monitor, request.user, MonitorSerializer(environments=environments, expand=expand)
)
)
def update_monitor(
self, request: AuthenticatedHttpRequest, project: Project, monitor: Monitor
) -> Response:
"""
Update a monitor.
"""
validator = MonitorValidator(
data=request.data,
partial=True,
instance=monitor,
context={
"organization": project.organization,
"access": request.access,
"request": request,
"monitor": monitor,
},
)
if not validator.is_valid():
return self.respond(validator.errors, status=400)
try:
updated_monitor = validator.save()
except serializers.ValidationError as e:
return self.respond(e.detail, status=400)
return self.respond(serialize(updated_monitor, request.user))
def delete_monitor(self, request: Request, project: Project, monitor: Monitor) -> Response:
"""
Delete a monitor or monitor environments.
"""
environment_names = request.query_params.getlist("environment")
env_ids = None
if environment_names:
env_ids = list(
Environment.objects.filter(
organization_id=project.organization_id, name__in=environment_names
).values_list("id", flat=True)
)
with transaction.atomic(router.db_for_write(MonitorEnvironment)):
monitor_objects: QuerySet[MonitorEnvironment] | QuerySet[Monitor]
if env_ids:
monitor_objects = (
MonitorEnvironment.objects.filter(
environment_id__in=env_ids, monitor_id=monitor.id
)
.exclude(
monitor__status__in=[
ObjectStatus.PENDING_DELETION,
ObjectStatus.DELETION_IN_PROGRESS,
]
)
.exclude(
status__in=[
MonitorStatus.PENDING_DELETION,
MonitorStatus.DELETION_IN_PROGRESS,
]
)
.select_related("monitor")
)
event = audit_log.get_event_id("MONITOR_ENVIRONMENT_REMOVE")
issue_alert_rule_id = None
else:
monitor_objects = monitor_monitor_objects = Monitor.objects.filter(
id=monitor.id
).exclude(
status__in=[
ObjectStatus.PENDING_DELETION,
ObjectStatus.DELETION_IN_PROGRESS,
]
)
event = audit_log.get_event_id("MONITOR_REMOVE")
# Mark rule for deletion if present and monitor is being deleted
first_monitor = monitor_monitor_objects.first()
issue_alert_rule_id = (
first_monitor.config.get("alert_rule_id") if first_monitor else None
)
# create copy of queryset as update will remove objects
monitor_objects_list: list[MonitorEnvironment | Monitor] = list(monitor_objects)
if not monitor_objects or not monitor_objects.update(
status=ObjectStatus.PENDING_DELETION
):
return self.respond(status=404)
for monitor_object in monitor_objects_list:
# randomize slug on monitor deletion to prevent re-creation side effects
if isinstance(monitor_object, Monitor):
new_slug = get_random_string(length=24)
# we disable the monitor seat so that it can be re-used for another monitor
quotas.backend.disable_seat(DataCategory.MONITOR_SEAT, monitor)
quotas.backend.update_monitor_slug(monitor.slug, new_slug, monitor.project_id)
monitor_object.update(slug=new_slug)
with (
in_test_hide_transaction_boundary(),
atomic_transaction(
[
router.db_for_write(Rule),
router.db_for_write(Monitor),
router.db_for_write(Detector),
]
),
):
for monitor_object in monitor_objects_list:
if isinstance(monitor_object, Monitor):
ensure_cron_detector_deletion(monitor_object)
schedule = RegionScheduledDeletion.schedule(
monitor_object, days=0, actor=request.user
)
self.create_audit_entry(
request=request,
organization=project.organization,
target_object=monitor_object.id,
event=event,
data=monitor_object.get_audit_log_data(),
transaction_id=schedule.guid,
)
# Mark rule for deletion if present and monitor is being deleted
if issue_alert_rule_id:
rule = (
Rule.objects.filter(
project_id=monitor.project_id,
id=issue_alert_rule_id,
)
.exclude(
status__in=[
ObjectStatus.PENDING_DELETION,
ObjectStatus.DELETION_IN_PROGRESS,
]
)
.first()
)
if rule:
rule.update(status=ObjectStatus.PENDING_DELETION)
RuleActivity.objects.create(
rule=rule, user_id=request.user.id, type=RuleActivityType.DELETED.value
)
scheduled_rule = RegionScheduledDeletion.schedule(
rule, days=0, actor=request.user
)
self.create_audit_entry(
request=request,
organization=project.organization,
target_object=rule.id,
event=audit_log.get_event_id("RULE_REMOVE"),
data=rule.get_audit_log_data(),
transaction_id=scheduled_rule,
)
return self.respond(status=202)
| MonitorDetailsMixin |
python | kamyu104__LeetCode-Solutions | Python/palindrome-pairs.py | {
"start": 3209,
"end": 4074
} | class ____(object):
def __init__(self):
self.word_idx = -1
self.leaves = {}
def insert(self, word, i):
cur = self
for c in word:
if not c in cur.leaves:
cur.leaves[c] = TrieNode()
cur = cur.leaves[c]
cur.word_idx = i
def find(self, s, idx, res):
cur = self
for i in reversed(xrange(len(s))):
if s[i] in cur.leaves:
cur = cur.leaves[s[i]]
if cur.word_idx not in (-1, idx) and \
self.is_palindrome(s, i - 1):
res.append([cur.word_idx, idx])
else:
break
def is_palindrome(self, s, j):
i = 0
while i <= j:
if s[i] != s[j]:
return False
i += 1
j -= 1
return True
| TrieNode |
python | Netflix__metaflow | metaflow/_vendor/click/testing.py | {
"start": 2970,
"end": 12854
} | class ____(object):
"""The CLI runner provides functionality to invoke a Click command line
script for unittesting purposes in a isolated environment. This only
works in single-threaded systems without any concurrency as it changes the
global interpreter state.
:param charset: the character set for the input and output data. This is
UTF-8 by default and should not be changed currently as
the reporting to Click only works in Python 2 properly.
:param env: a dictionary with environment variables for overriding.
:param echo_stdin: if this is set to `True`, then reading from stdin writes
to stdout. This is useful for showing examples in
some circumstances. Note that regular prompts
will automatically echo the input.
:param mix_stderr: if this is set to `False`, then stdout and stderr are
preserved as independent streams. This is useful for
Unix-philosophy apps that have predictable stdout and
noisy stderr, such that each may be measured
independently
"""
def __init__(self, charset=None, env=None, echo_stdin=False, mix_stderr=True):
if charset is None:
charset = "utf-8"
self.charset = charset
self.env = env or {}
self.echo_stdin = echo_stdin
self.mix_stderr = mix_stderr
def get_default_prog_name(self, cli):
"""Given a command object it will return the default program name
for it. The default is the `name` attribute or ``"root"`` if not
set.
"""
return cli.name or "root"
def make_env(self, overrides=None):
"""Returns the environment overrides for invoking a script."""
rv = dict(self.env)
if overrides:
rv.update(overrides)
return rv
@contextlib.contextmanager
def isolation(self, input=None, env=None, color=False):
"""A context manager that sets up the isolation for invoking of a
command line tool. This sets up stdin with the given input data
and `os.environ` with the overrides from the given dictionary.
This also rebinds some internals in Click to be mocked (like the
prompt functionality).
This is automatically done in the :meth:`invoke` method.
.. versionadded:: 4.0
The ``color`` parameter was added.
:param input: the input stream to put into sys.stdin.
:param env: the environment overrides as dictionary.
:param color: whether the output should contain color codes. The
application can still override this explicitly.
"""
input = make_input_stream(input, self.charset)
old_stdin = sys.stdin
old_stdout = sys.stdout
old_stderr = sys.stderr
old_forced_width = formatting.FORCED_WIDTH
formatting.FORCED_WIDTH = 80
env = self.make_env(env)
if PY2:
bytes_output = StringIO()
if self.echo_stdin:
input = EchoingStdin(input, bytes_output)
sys.stdout = bytes_output
if not self.mix_stderr:
bytes_error = StringIO()
sys.stderr = bytes_error
else:
bytes_output = io.BytesIO()
if self.echo_stdin:
input = EchoingStdin(input, bytes_output)
input = io.TextIOWrapper(input, encoding=self.charset)
sys.stdout = io.TextIOWrapper(bytes_output, encoding=self.charset)
if not self.mix_stderr:
bytes_error = io.BytesIO()
sys.stderr = io.TextIOWrapper(bytes_error, encoding=self.charset)
if self.mix_stderr:
sys.stderr = sys.stdout
sys.stdin = input
def visible_input(prompt=None):
sys.stdout.write(prompt or "")
val = input.readline().rstrip("\r\n")
sys.stdout.write("{}\n".format(val))
sys.stdout.flush()
return val
def hidden_input(prompt=None):
sys.stdout.write("{}\n".format(prompt or ""))
sys.stdout.flush()
return input.readline().rstrip("\r\n")
def _getchar(echo):
char = sys.stdin.read(1)
if echo:
sys.stdout.write(char)
sys.stdout.flush()
return char
default_color = color
def should_strip_ansi(stream=None, color=None):
if color is None:
return not default_color
return not color
old_visible_prompt_func = termui.visible_prompt_func
old_hidden_prompt_func = termui.hidden_prompt_func
old__getchar_func = termui._getchar
old_should_strip_ansi = utils.should_strip_ansi
termui.visible_prompt_func = visible_input
termui.hidden_prompt_func = hidden_input
termui._getchar = _getchar
utils.should_strip_ansi = should_strip_ansi
old_env = {}
try:
for key, value in iteritems(env):
old_env[key] = os.environ.get(key)
if value is None:
try:
del os.environ[key]
except Exception:
pass
else:
os.environ[key] = value
yield (bytes_output, not self.mix_stderr and bytes_error)
finally:
for key, value in iteritems(old_env):
if value is None:
try:
del os.environ[key]
except Exception:
pass
else:
os.environ[key] = value
sys.stdout = old_stdout
sys.stderr = old_stderr
sys.stdin = old_stdin
termui.visible_prompt_func = old_visible_prompt_func
termui.hidden_prompt_func = old_hidden_prompt_func
termui._getchar = old__getchar_func
utils.should_strip_ansi = old_should_strip_ansi
formatting.FORCED_WIDTH = old_forced_width
def invoke(
self,
cli,
args=None,
input=None,
env=None,
catch_exceptions=True,
color=False,
**extra
):
"""Invokes a command in an isolated environment. The arguments are
forwarded directly to the command line script, the `extra` keyword
arguments are passed to the :meth:`~clickpkg.Command.main` function of
the command.
This returns a :class:`Result` object.
.. versionadded:: 3.0
The ``catch_exceptions`` parameter was added.
.. versionchanged:: 3.0
The result object now has an `exc_info` attribute with the
traceback if available.
.. versionadded:: 4.0
The ``color`` parameter was added.
:param cli: the command to invoke
:param args: the arguments to invoke. It may be given as an iterable
or a string. When given as string it will be interpreted
as a Unix shell command. More details at
:func:`shlex.split`.
:param input: the input data for `sys.stdin`.
:param env: the environment overrides.
:param catch_exceptions: Whether to catch any other exceptions than
``SystemExit``.
:param extra: the keyword arguments to pass to :meth:`main`.
:param color: whether the output should contain color codes. The
application can still override this explicitly.
"""
exc_info = None
with self.isolation(input=input, env=env, color=color) as outstreams:
exception = None
exit_code = 0
if isinstance(args, string_types):
args = shlex.split(args)
try:
prog_name = extra.pop("prog_name")
except KeyError:
prog_name = self.get_default_prog_name(cli)
try:
cli.main(args=args or (), prog_name=prog_name, **extra)
except SystemExit as e:
exc_info = sys.exc_info()
exit_code = e.code
if exit_code is None:
exit_code = 0
if exit_code != 0:
exception = e
if not isinstance(exit_code, int):
sys.stdout.write(str(exit_code))
sys.stdout.write("\n")
exit_code = 1
except Exception as e:
if not catch_exceptions:
raise
exception = e
exit_code = 1
exc_info = sys.exc_info()
finally:
sys.stdout.flush()
stdout = outstreams[0].getvalue()
if self.mix_stderr:
stderr = None
else:
stderr = outstreams[1].getvalue()
return Result(
runner=self,
stdout_bytes=stdout,
stderr_bytes=stderr,
exit_code=exit_code,
exception=exception,
exc_info=exc_info,
)
@contextlib.contextmanager
def isolated_filesystem(self):
"""A context manager that creates a temporary folder and changes
the current working directory to it for isolated filesystem tests.
"""
cwd = os.getcwd()
t = tempfile.mkdtemp()
os.chdir(t)
try:
yield t
finally:
os.chdir(cwd)
try:
shutil.rmtree(t)
except (OSError, IOError): # noqa: B014
pass
| CliRunner |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 2055,
"end": 2132
} | class ____(StringEnum):
input = "input"
output = "output"
| ModelTypeEnum |
python | tiangolo__fastapi | docs_src/body/tutorial004.py | {
"start": 87,
"end": 452
} | class ____(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: Union[float, None] = None
app = FastAPI()
@app.put("/items/{item_id}")
async def update_item(item_id: int, item: Item, q: Union[str, None] = None):
result = {"item_id": item_id, **item.dict()}
if q:
result.update({"q": q})
return result
| Item |
python | ZoranPandovski__al-go-rithms | puzzles/Rubik's Cube/rubik's cube simulator.py | {
"start": 2933,
"end": 5477
} | class ____:
@staticmethod
def getFaceColor(face, smallDisplay, physicalSide):
toReturn = ""
if face == "U":
toReturn = bcolors.WHITE + physicalSide
elif face == "F":
toReturn = bcolors.GREEN + physicalSide
elif face == "R":
toReturn = bcolors.RED + physicalSide
elif face == "L":
toReturn = bcolors.ORANGE + physicalSide
elif face == "B":
toReturn = bcolors.BLUE + physicalSide
elif face == "D":
toReturn = bcolors.YELLOW + physicalSide
else:
toReturn = bcolors.WHITE + "X"
if not smallDisplay:
return toReturn * 3
else:
return toReturn * 2
@staticmethod
def isSolved():
for loc in locations1D.values():
if loc.id != loc.piece.id or loc.piece.rotation != 0:
return False
return True
@staticmethod
def reset():
for loc in locations1D.values():
loc.piece = None
loc.setPiece(Piece(loc.id))
@staticmethod
def move(move):
if len(move) == 2 and move[1] == "'":
moveData = {v:k for k,v in moves[move[0].upper()]["movements"].items()} # Swap keys and values of movement data... In effect this is moving in the opposite direction
else:
moveData = moves[move[0].upper()]["movements"]
rotations = moves[move[0].upper()]["rotations"]
toMove = []
rots = {}
for key, corners in rotations.items():
for c in corners:
rots[c] = key
for key, movement in moveData.items():
origin = Location.getLocation(key)
target = Location.getLocation(movement)
toMove.append({"piece": origin.piece, "origin": origin, "target": target})
for movement in toMove:
piece = movement["piece"]
movement["target"].setPiece(piece)
if piece.type == "CORNER" or piece.type == "EDGE":
piece.rotate(rots[movement["origin"].id])
if move == "r'" or move == "l":
Cube.move("M")
elif move == "r" or move == "l'":
Cube.move("M'")
elif move == "u" or move == "d'":
Cube.move("E'")
elif move == "u'" or move == "d":
Cube.move("E")
elif move == "f" or move == "b'":
Cube.move("S")
elif move == "f'" or move == "b":
Cube.move("S'")
| Cube |
python | astropy__astropy | astropy/table/tests/test_init_table.py | {
"start": 3992,
"end": 4084
} | class ____(BaseInitFrom):
pass
@pytest.mark.usefixtures("table_type")
| BaseInitFromDictLike |
python | davidhalter__jedi | jedi/inference/gradual/base.py | {
"start": 11969,
"end": 13463
} | class ____(Value):
"""
In typeshed, some classes are defined like this:
Tuple: _SpecialForm = ...
Now this is not a real class, therefore we have to do some workarounds like
this class. Essentially this class makes it possible to goto that `Tuple`
name, without affecting anything else negatively.
"""
api_type = 'class'
def __init__(self, parent_context, tree_name):
super().__init__(
parent_context.inference_state,
parent_context
)
self._tree_name = tree_name
@property
def tree_node(self):
return self._tree_name
def get_filters(self, *args, **kwargs):
# TODO this is obviously wrong. Is it though?
class EmptyFilter(ClassFilter):
def __init__(self):
pass
def get(self, name, **kwargs):
return []
def values(self, **kwargs):
return []
yield EmptyFilter()
def py__class__(self):
# This might not be 100% correct, but it is good enough. The details of
# the typing library are not really an issue for Jedi.
return builtin_from_name(self.inference_state, 'type')
@property
def name(self):
return ValueName(self, self._tree_name)
def get_qualified_names(self):
return (self._tree_name.value,)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._tree_name.value)
| _PseudoTreeNameClass |
python | catalyst-team__catalyst | tests/contrib/scripts/test_tune.py | {
"start": 342,
"end": 2668
} | class ____(nn.Module):
def __init__(self, in_features: int, num_hidden: int, out_features: int):
super().__init__()
self.net = nn.Sequential(
nn.Flatten(),
nn.Linear(in_features, num_hidden),
nn.Linear(num_hidden, out_features),
)
def forward(self, x):
return self.net(x)
def train_experiment_from_configs(*auxiliary_configs: str):
current_dir = Path(__file__).parent
main_config = str(current_dir / f"{Path(__file__).stem}.yml")
engine_configs_dir = current_dir.parent.parent / "pipelines" / "configs"
auxiliary_configs = " ".join(str(engine_configs_dir / c) for c in auxiliary_configs)
script = Path("catalyst", "contrib", "scripts", "tune.py")
cmd = f"python {script} -C {main_config} {auxiliary_configs} --n-trials 2"
subprocess.run(cmd.split(), check=True)
# Device
@mark.skipif(
not SETTINGS.optuna_required or not IS_CONFIGS_REQUIRED or not IS_CPU_REQUIRED,
reason="CPU device is not available",
)
def test_config_run_on_cpu():
train_experiment_from_configs("engine_cpu.yml")
@mark.skipif(
not SETTINGS.optuna_required
or not IS_CONFIGS_REQUIRED
or not all([IS_GPU_REQUIRED, IS_CUDA_AVAILABLE]),
reason="CUDA device is not available",
)
def test_config_run_on_torch_cuda0():
train_experiment_from_configs("engine_gpu.yml")
@mark.skipif(
not SETTINGS.optuna_required
or not IS_CONFIGS_REQUIRED
or not all([IS_GPU_AMP_REQUIRED, IS_CUDA_AVAILABLE, SETTINGS.amp_required]),
reason="No CUDA or AMP found",
)
def test_config_run_on_amp():
train_experiment_from_configs("engine_gpu_amp.yml")
@mark.skipif(
not SETTINGS.optuna_required
or not IS_CONFIGS_REQUIRED
or not all([IS_DP_REQUIRED, IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES >= 2]),
reason="No CUDA>=2 found",
)
def test_config_run_on_torch_dp():
train_experiment_from_configs("engine_dp.yml")
@mark.skipif(
not SETTINGS.optuna_required
or not IS_CONFIGS_REQUIRED
or not all(
[
IS_DP_AMP_REQUIRED,
IS_CUDA_AVAILABLE,
NUM_CUDA_DEVICES >= 2,
SETTINGS.amp_required,
]
),
reason="No CUDA>=2 or AMP found",
)
def test_config_run_on_amp_dp():
train_experiment_from_configs("engine_dp_amp.yml")
| CustomModule |
python | realpython__materials | python-assignment-statements/point.py | {
"start": 0,
"end": 476
} | class ____:
@property
def x(self):
return self._x
@x.setter
def x(self, value):
try:
self._x = float(value)
except ValueError:
raise ValueError('"x" must be a number') from None
@property
def y(self):
return self._y
@y.setter
def y(self, value):
try:
self._y = float(value)
except ValueError:
raise ValueError('"y" must be a number') from None
| Point |
python | pytorch__pytorch | test/inductor/test_mem_estimation.py | {
"start": 6228,
"end": 13469
} | class ____(InductorTestCase):
def test_memory_tracker_original_order(self):
"""Test that MemoryTracker works correctly with original scheduling order and matches runtime profiling."""
def create_inputs_and_weights():
"""Create inputs and weights on CUDA."""
x = torch.randn(32, 100, device=GPU_TYPE)
w1 = torch.randn(100, 50, device=GPU_TYPE)
w2 = torch.randn(50, 10, device=GPU_TYPE)
return x, w1, w2
def fn(x, w1, w2):
# Create a simple function that allocates intermediate tensors
h1 = torch.matmul(x, w1) # Allocates h1
h2 = torch.relu(h1) # h1 can be freed, h2 allocated
out = torch.matmul(h2, w2) # h2 can be freed, out allocated
return out
with FakeTensorMode():
# Create inputs
x, w1, w2 = create_inputs_and_weights()
# Trace the function
fx_graph = make_fx(fn)(x, w1, w2)
# Test MemoryTracker with original order
memory_tracker = MemoryTracker(fx_graph.graph, device_filter=device_filter)
# Schedule nodes in original order
compute_nodes = [
node
for node in fx_graph.graph.nodes
if node.op not in ("placeholder", "get_attr", "output")
]
for node in compute_nodes:
memory_tracker.schedule_node(node)
memory_tracker_peak = memory_tracker.get_current_memory_bytes()
# Compare with runtime profiling using FakeTensorMemoryProfilerMode
profiler = FakeTensorMemoryProfilerMode(device_filter=device_filter)
with profiler:
x_runtime, w1_runtime, w2_runtime = create_inputs_and_weights()
result = fn(x_runtime, w1_runtime, w2_runtime)
del result
runtime_peak = profiler.max_memory
# Verify both approaches track meaningful memory usage
self.assertGreater(
memory_tracker_peak, 0, "MemoryTracker should track memory usage"
)
self.assertGreater(
runtime_peak, 0, "Runtime profiler should track memory usage"
)
def test_memory_tracker_different_scheduling(self):
"""Test that different scheduling orders produce different memory usage patterns."""
def foo(primals_1):
zeros = torch.zeros_like(primals_1) # Create zeros tensor
add_result = zeros + 1 # Use zeros (first use)
sum_result = zeros.sum() # Use zeros (second use)
cpu = torch.zeros([20], device="cpu")
cpu_2 = cpu + 1
return add_result, sum_result, cpu_2
with FakeTensorMode():
# Create input
primals_1 = torch.randn(1000, 1000, device=GPU_TYPE)
# Trace the function
fx_graph = make_fx(foo)(primals_1)
# Get compute nodes (excluding placeholders, get_attr, output)
compute_nodes = [
node
for node in fx_graph.graph.nodes
if node.op not in ("placeholder", "get_attr", "output")
]
# Test original order: zeros_like, add, sum
# zeros gets freed after sum (last use of zeros)
memory_tracker1 = MemoryTracker(fx_graph.graph, device_filter=device_filter)
memory_profile1 = []
initial_mem = memory_tracker1.get_current_memory_bytes()
for node in compute_nodes:
memory_tracker1.schedule_node(node)
memory_profile1.append(memory_tracker1.get_current_memory_bytes())
# use of primals should not deallocate
self.assertEqual(memory_profile1[0], initial_mem * 2)
# Test different order: zeros_like, sum, add
# zeros gets freed after add (last use of zeros in new order)
memory_tracker2 = MemoryTracker(fx_graph.graph, device_filter=device_filter)
memory_profile2 = []
# Alternative schedule: change which operation is the last use of zeros
# Original: zeros_like, add, sum (zeros freed after sum)
# Alternative: zeros_like, sum, add (zeros freed after add)
assert len(compute_nodes) == 5, (
f"Expected 3 compute nodes, got {len(compute_nodes)}"
)
reordered_nodes = [
compute_nodes[0], # zeros_like: zeros = torch.zeros_like(primals_1)
compute_nodes[2], # sum: sum_result = zeros.sum() (zeros still alive)
compute_nodes[
1
], # add: add_result = zeros + 1 (last use, zeros freed here)
compute_nodes[3], # cpu = torch.zeros([20], device="cpu")
compute_nodes[4], # cpu_2 = cpu + 1
]
for node in reordered_nodes:
memory_tracker2.schedule_node(node)
memory_profile2.append(memory_tracker2.get_current_memory_bytes())
# Compare peak memories
peak1 = max(memory_profile1)
peak2 = max(memory_profile2)
# Both should end with the same final memory (all intermediate tensors freed)
self.assertEqual(memory_profile1[-1], memory_profile2[-1])
# The profiles should be different, showing different memory patterns
self.assertNotEqual(
memory_profile1,
memory_profile2,
"Different scheduling should produce different memory profiles",
)
# The different scheduling should produce different peak memory!
# Original: zeros + add_result both alive → higher peak
# Reordered: zeros freed before add_result created → lower peak
self.assertGreater(
peak1, peak2, "Original order should have higher peak memory"
)
# Specifically, original has both zeros and add_result alive simultaneously
self.assertGreater(
memory_profile1[1],
memory_profile2[1],
"Original order keeps more tensors alive simultaneously",
)
# The reordered version should have lower intermediate memory usage
self.assertLess(
peak2,
peak1,
"Reordered schedule reduces peak memory through better deallocation timing",
)
# Verify the MemoryTracker correctly tracks different scheduling
# The first tracker should match since we tested accuracy against FakeTensorMemoryProfilerMode
self.assertLessEqual(
abs(memory_tracker1.peak_memory - peak1),
8,
"First tracker peak should match profile peak",
)
# The key test: profiles show different peaks due to different deallocation timing
self.assertNotEqual(
peak1, peak2, "Different scheduling produces different peak memory"
)
if __name__ == "__main__":
if HAS_GPU:
run_tests(needs="filelock")
| TestMemoryTracker |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-hubspot/unit_tests/test_source.py | {
"start": 7764,
"end": 25819
} | class ____:
BASE_OBJECT_BODY = {
"createdAt": "2020-12-10T07:58:09.554Z",
"updatedAt": "2021-07-31T08:18:58.954Z",
"archived": False,
}
@staticmethod
def set_mock_properties(requests_mock, url, fake_properties_list):
properties_response = [
{
"json": [
{"name": property_name, "type": "string", "updatedAt": 1571085954360, "createdAt": 1565059306048}
for property_name in fake_properties_list
],
"status_code": 200,
},
]
requests_mock.register_uri("GET", url, properties_response)
# Mock the getter method that handles requests.
def get(self, url, api, params=None):
response = api._session.get(api.BASE_URL + url, params=params)
return api._parse_and_handle_errors(response)
def test_stream_with_splitting_properties_with_pagination(self, requests_mock, config, fake_properties_list):
"""
Check working stream `products` with large list of properties using new functionality with splitting properties
"""
mock_dynamic_schema_requests_with_skip(requests_mock, ["product"])
requests_mock.get("https://api.hubapi.com/crm/v3/schemas", json={}, status_code=200)
self.set_mock_properties(requests_mock, "/properties/v2/product/properties", fake_properties_list)
test_stream = find_stream("products", config)
property_slices = (fake_properties_list[:686], fake_properties_list[686:1351], fake_properties_list[1351:])
for property_slice in property_slices:
data = {p: "fake_data" for p in property_slice}
record_responses = [
{
"json": {
"results": [
{**self.BASE_OBJECT_BODY, **{"id": id, "properties": data}}
for id in ["6043593519", "1092593519", "1092593518", "1092593517", "1092593516"]
],
"paging": {},
},
"status_code": 200,
}
]
params = {
"archived": "false",
"properties": ",".join(property_slice),
"limit": 100,
}
stream_retriever = test_stream._stream_partition_generator._partition_factory._retriever
test_stream_url = stream_retriever.requester.url_base + "/" + stream_retriever.requester.get_path()
url = f"{test_stream_url}?{urlencode(params)}"
requests_mock.register_uri(
"GET",
url,
record_responses,
)
state = (
StateBuilder()
.with_stream_state(
"products",
{"updatedAt": "2006-01-01T00:03:18.336Z"},
)
.build()
)
stream_records = read_from_stream(config, "products", SyncMode.incremental, state).records
assert len(stream_records) == 5
for record_ab_message in stream_records:
record = record_ab_message.record.data
assert len(record["properties"]) == NUMBER_OF_PROPERTIES
properties = [field for field in record if field.startswith("properties_")]
assert len(properties) == NUMBER_OF_PROPERTIES
@freezegun.freeze_time("2022-03-10T14:42:00Z") # less than one month after state date in test
def test_search_based_stream_should_not_attempt_to_get_more_than_10k_records(
requests_mock, config, fake_properties_list, mock_dynamic_schema_requests
):
"""
If there are more than 10,000 records that would be returned by the Hubspot search endpoint,
the CRMSearchStream instance should stop at the 10Kth record. 10k changed to 600 for testing purposes.
"""
requests_mock.get("https://api.hubapi.com/crm/v3/schemas", json={}, status_code=200)
responses = [
{
"json": {
"results": [{"id": f"{y}", "updatedAt": "2022-02-25T16:43:11Z"} for y in range(200)],
"paging": {
"next": {
"after": f"{x * 200}",
}
},
},
"status_code": 200,
}
for x in range(1, 3)
]
# Last page... it does not have paging->next->after
responses.append(
{
"json": {"results": [{"id": f"{y}", "updatedAt": "2022-03-01T00:00:00Z"} for y in range(200)], "paging": {}},
"status_code": 200,
}
)
# After reaching 1000 records, it performs a new search query.
responses.append(
{
"json": {
"results": [{"id": f"{y}", "updatedAt": "2022-03-01T00:00:00Z"} for y in range(200)],
"paging": {
"next": {
"after": "200",
}
},
},
"status_code": 200,
}
)
# Last page... it does not have paging->next->after
responses.append(
{
"json": {"results": [{"id": f"{y}", "updatedAt": "2022-03-01T00:00:00Z"} for y in range(200)], "paging": {}},
"status_code": 200,
}
)
properties_response = [
{
"json": [
{"name": property_name, "type": "string", "updatedAt": 1571085954360, "createdAt": 1565059306048}
for property_name in fake_properties_list
],
"status_code": 200,
}
]
# Create test_stream instance with some state
state = (
StateBuilder()
.with_stream_state(
"companies",
{"updatedAt": "2022-02-24T16:43:11Z"},
)
.build()
)
test_stream_url = "https://api.hubapi.com/crm/v3/objects/company/search"
requests_mock.register_uri("POST", test_stream_url, responses)
requests_mock.register_uri("GET", "/properties/v2/company/properties", properties_response)
requests_mock.register_uri(
"POST",
"/crm/v4/associations/company/contacts/batch/read",
[{"status_code": 200, "json": {"results": [{"from": {"id": "1"}, "to": [{"toObjectId": "2"}]}]}}],
)
requests_mock.register_uri(
"POST",
"/crm/v4/associations/company/contacts/batch/read",
[{"status_code": 200, "json": {"results": [{"from": {"id": "1"}, "to": [{"toObjectId": "2"}]}]}}],
)
with mock.patch("components.HubspotCRMSearchPaginationStrategy.RECORDS_LIMIT", 600):
output = read_from_stream(config, "companies", SyncMode.incremental, state)
# The stream should not attempt to get more than 600 records.
# Instead, it should use the new state to start a new search query.
assert len(output.records) == 1000
assert output.state_messages[1].state.stream.stream_state.updatedAt == "2022-03-01T00:00:00.000000Z"
def test_engagements_stream_pagination_works(requests_mock, config):
"""
Tests the engagements stream handles pagination correctly, for both
full_refresh and incremental sync modes.
"""
requests_mock.get("https://api.hubapi.com/crm/v3/schemas", json={}, status_code=200)
# Mocking Request
requests_mock.register_uri(
"GET",
"/engagements/v1/engagements/paged?count=250",
[
{
"json": {
"results": [{"engagement": {"id": f"{y}", "lastUpdated": 1641234593251}} for y in range(250)],
"hasMore": True,
"offset": 250,
},
"status_code": 200,
},
{
"json": {
"results": [{"engagement": {"id": f"{y}", "lastUpdated": 1641234593251}} for y in range(250, 500)],
"hasMore": True,
"offset": 500,
},
"status_code": 200,
},
{
"json": {
"results": [{"engagement": {"id": f"{y}", "lastUpdated": 1641234595251}} for y in range(500, 600)],
"hasMore": False,
},
"status_code": 200,
},
],
)
requests_mock.register_uri(
"GET",
"/engagements/v1/engagements/recent/modified?count=250",
[
{
"json": {
"results": [{"engagement": {"id": f"{y}", "lastUpdated": 1641234595252}} for y in range(100)],
"hasMore": True,
"offset": 100,
},
"status_code": 200,
},
{
"json": {
"results": [{"engagement": {"id": f"{y}", "lastUpdated": 1641234595252}} for y in range(100, 200)],
"hasMore": True,
"offset": 200,
},
"status_code": 200,
},
{
"json": {
"results": [{"engagement": {"id": f"{y}", "lastUpdated": 1641234595252}} for y in range(200, 250)],
"hasMore": False,
},
"status_code": 200,
},
],
)
# Create test_stream instance for full refresh.
test_stream = find_stream("engagements", config)
records = run_read(test_stream)
# The stream should handle pagination correctly and output 600 records.
assert len(records) == 600
test_stream = find_stream("engagements", config)
records = run_read(test_stream)
# The stream should handle pagination correctly and output 250 records.
assert len(records) == 100
def test_engagements_stream_since_old_date(mock_dynamic_schema_requests, requests_mock, fake_properties_list, config):
"""
Connector should use 'All Engagements' API for old dates (more than 30 days)
"""
requests_mock.get("https://api.hubapi.com/crm/v3/schemas", json={}, status_code=200)
old_date = 1614038400000 # Tuesday, 23 February 2021, 0:00:00
recent_date = 1645315200000
responses = [
{
"json": {
"results": [{"engagement": {"id": f"{y}", "lastUpdated": recent_date}} for y in range(100)],
"hasMore": False,
"offset": 0,
"total": 100,
},
"status_code": 200,
}
]
# Mocking Request
requests_mock.register_uri("GET", "/engagements/v1/engagements/paged?count=250", responses)
state = (
StateBuilder()
.with_stream_state(
"engagements",
{"lastUpdated": old_date},
)
.build()
)
output = read_from_stream(config, "engagements", SyncMode.incremental, state)
assert len(output.records) == 100
assert int(output.state_messages[0].state.stream.stream_state.lastUpdated) == recent_date
def test_engagements_stream_since_recent_date(mock_dynamic_schema_requests, requests_mock, fake_properties_list, config):
"""
Connector should use 'Recent Engagements' API for recent dates (less than 30 days)
"""
requests_mock.get("https://api.hubapi.com/crm/v3/schemas", json={}, status_code=200)
recent_date = ab_datetime_now() - timedelta(days=10) # 10 days ago
recent_date = int(recent_date.timestamp() * 1000)
responses = [
{
"json": {
"results": [{"engagement": {"id": f"{y}", "lastUpdated": recent_date}} for y in range(100)],
"hasMore": False,
"offset": 0,
"total": 100,
},
"status_code": 200,
}
]
state = StateBuilder().with_stream_state("engagements", {"lastUpdated": recent_date}).build()
# Mocking Request
engagement_url = f"/engagements/v1/engagements/recent/modified?count=250&since={recent_date}"
requests_mock.register_uri("GET", engagement_url, responses)
output = read_from_stream(config, "engagements", SyncMode.incremental, state)
# The stream should not attempt to get more than 10K records.
assert len(output.records) == 100
assert int(output.state_messages[0].state.stream.stream_state.lastUpdated) == recent_date
def test_engagements_stream_since_recent_date_more_than_10k(mock_dynamic_schema_requests, requests_mock, fake_properties_list, config):
"""
Connector should use 'Recent Engagements' API for recent dates (less than 30 days).
If response from 'Recent Engagements' API returns 10k records, it means that there more records,
so 'All Engagements' API should be used.
"""
requests_mock.get("https://api.hubapi.com/crm/v3/schemas", json={}, status_code=200)
recent_date = ab_datetime_now() - timedelta(days=10) # 10 days ago
recent_date = int(recent_date.timestamp() * 1000)
responses = [
{
"json": {
"results": [{"engagement": {"id": f"{y}", "lastUpdated": recent_date}} for y in range(100)],
"hasMore": False,
"offset": 0,
"total": 10001,
},
"status_code": 200,
}
]
state = StateBuilder().with_stream_state("engagements", {"lastUpdated": recent_date}).build()
# Mocking Request
engagement_url = f"/engagements/v1/engagements/recent/modified?count=250&since={recent_date}"
requests_mock.register_uri("GET", engagement_url, responses)
requests_mock.register_uri("GET", "/engagements/v1/engagements/paged?count=250", responses)
output = read_from_stream(config, "engagements", SyncMode.incremental, state)
assert len(output.records) == 100
assert int(output.state_messages[0].state.stream.stream_state.lastUpdated) == recent_date
def test_pagination_marketing_emails_stream(requests_mock, config):
"""
Test pagination for Marketing Emails stream using v3 API with includeStats=true
Verifies that statistics are included directly in the response (not merged from separate calls)
"""
requests_mock.get("https://api.hubapi.com/crm/v3/schemas", json={}, status_code=200)
requests_mock.register_uri(
"GET",
"/marketing/v3/emails?includeStats=true&limit=250",
[
{
"json": {
"results": [
{
"id": f"{y}",
"updated": 1641234593251,
# Statistics included directly with includeStats=true
"delivered": 100,
"opens": 50,
"clicks": 25,
"bounces": 5,
"optouts": 2,
}
for y in range(250)
],
"limit": 250,
"offset": 0,
"total": 600,
},
"status_code": 200,
},
{
"json": {
"results": [
{
"id": f"{y}",
"updated": 1641234593251,
# Statistics included directly with includeStats=true
"delivered": 100,
"opens": 50,
"clicks": 25,
"bounces": 5,
"optouts": 2,
}
for y in range(250, 500)
],
"limit": 250,
"offset": 250,
"total": 600,
},
"status_code": 200,
},
{
"json": {
"results": [
{
"id": f"{y}",
"updated": 1641234595251,
# Statistics included directly with includeStats=true
"delivered": 100,
"opens": 50,
"clicks": 25,
"bounces": 5,
"optouts": 2,
}
for y in range(500, 600)
],
"limit": 250,
"offset": 500,
"total": 600,
},
"status_code": 200,
},
],
)
# No longer need separate statistics endpoint mocks since includeStats=true
# includes statistics directly in the main response
test_stream = find_stream("marketing_emails", config)
records = run_read(test_stream)
# The stream should handle pagination correctly and output 600 records.
assert len(records) == 600
# Verify that statistics data is included directly in the email records
# (using includeStats=true parameter includes statistics in the main response)
sample_record = records[5]
# Assert that statistics fields are present in the record (from includeStats=true)
assert sample_record["delivered"] == 100, "Statistics 'delivered' field should be included with includeStats=true"
assert sample_record["opens"] == 50, "Statistics 'opens' field should be included with includeStats=true"
assert sample_record["clicks"] == 25, "Statistics 'clicks' field should be included with includeStats=true"
assert sample_record["bounces"] == 5, "Statistics 'bounces' field should be included with includeStats=true"
assert sample_record["optouts"] == 2, "Statistics 'optouts' field should be included with includeStats=true"
# Verify that the email record also has the base email fields
assert "id" in sample_record, "Email 'id' field should be present from /marketing/v3/emails endpoint"
assert "updated" in sample_record, "Email 'updated' field should be present from /marketing/v3/emails endpoint"
| TestSplittingPropertiesFunctionality |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_button04.py | {
"start": 315,
"end": 896
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("button04.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
worksheet1.insert_button("C2", {})
worksheet2.insert_button("E5", {})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | huggingface__transformers | src/transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py | {
"start": 4863,
"end": 5454
} | class ____(PreTrainedModel):
config: KyutaiSpeechToTextConfig
base_model_prefix = "model"
input_modalities = ("audio", "text")
supports_gradient_checkpointing = True
_no_split_modules = ["KyutaiSpeechToTextDecoderLayer", "MimiTransformerLayer"]
_supports_flash_attn = True
_supports_sdpa = True
main_input_name = "input_ids"
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, KyutaiSpeechToTextFlexibleLinear):
init.normal_(module.weight)
| KyutaiSpeechToTextPreTrainedModel |
python | pytorch__pytorch | test/quantization/fx/test_model_report_fx.py | {
"start": 28890,
"end": 32843
} | class ____(QuantizationTestCase):
@skipIfNoFBGEMM
def test_nested_detection_case(self):
class SingleLinear(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
class TwoBlockNet(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.block1 = SingleLinear()
self.block2 = SingleLinear()
def forward(self, x):
x = self.block1(x)
y = self.block2(x)
z = x + y
z = F.relu(z)
return z
with override_quantized_engine('fbgemm'):
# create model, example input, and qconfig mapping
torch.backends.quantized.engine = "fbgemm"
model = TwoBlockNet()
example_input = torch.randint(-10, 0, (1, 3, 3, 3))
example_input = example_input.to(torch.float)
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig("fbgemm"))
# prep model and select observer
model_prep = quantize_fx.prepare_fx(model, q_config_mapping, example_input)
obs_ctr = ModelReportObserver
# find layer to attach to and store
linear_fqn = "block2.linear" # fqn of target linear
target_linear = None
for node in model_prep.graph.nodes:
if node.target == linear_fqn:
target_linear = node
break
# insert into both module and graph pre and post
# set up to insert before target_linear (pre_observer)
with model_prep.graph.inserting_before(target_linear):
obs_to_insert = obs_ctr()
pre_obs_fqn = linear_fqn + ".model_report_pre_observer"
model_prep.add_submodule(pre_obs_fqn, obs_to_insert)
model_prep.graph.create_node(op="call_module", target=pre_obs_fqn, args=target_linear.args)
# set up and insert after the target_linear (post_observer)
with model_prep.graph.inserting_after(target_linear):
obs_to_insert = obs_ctr()
post_obs_fqn = linear_fqn + ".model_report_post_observer"
model_prep.add_submodule(post_obs_fqn, obs_to_insert)
model_prep.graph.create_node(op="call_module", target=post_obs_fqn, args=(target_linear,))
# need to recompile module after submodule added and pass input through
model_prep.recompile()
num_iterations = 10
for i in range(num_iterations):
if i % 2 == 0:
example_input = torch.randint(-10, 0, (1, 3, 3, 3)).to(torch.float)
else:
example_input = torch.randint(0, 10, (1, 3, 3, 3)).to(torch.float)
model_prep(example_input)
# run it through the dynamic vs static detector
dynamic_vs_static_detector = DynamicStaticDetector()
dynam_vs_stat_str, dynam_vs_stat_dict = dynamic_vs_static_detector.generate_detector_report(model_prep)
# one of the stats should be stationary, and the other non-stationary
# as a result, dynamic should be recommended
data_dist_info = [
dynam_vs_stat_dict[linear_fqn][DynamicStaticDetector.PRE_OBS_DATA_DIST_KEY],
dynam_vs_stat_dict[linear_fqn][DynamicStaticDetector.POST_OBS_DATA_DIST_KEY],
]
self.assertTrue("stationary" in data_dist_info)
self.assertTrue("non-stationary" in data_dist_info)
self.assertTrue(dynam_vs_stat_dict[linear_fqn]["dynamic_recommended"])
| TestFxModelReportDetectDynamicStatic |
python | huggingface__transformers | src/transformers/modeling_layers.py | {
"start": 9349,
"end": 11566
} | class ____:
base_model_prefix = "model"
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
# Similar to `self.model = AutoModel.from_config(config)` but allows to change the base model name if needed in the child class
setattr(self, self.base_model_prefix, AutoModel.from_config(config))
if getattr(config, "classifier_dropout", None) is not None:
classifier_dropout = config.classifier_dropout
elif getattr(config, "hidden_dropout", None) is not None:
classifier_dropout = config.hidden_dropout
else:
classifier_dropout = 0.1
self.dropout = nn.Dropout(classifier_dropout)
self.score = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
**kwargs: Unpack[TransformersKwargs],
) -> TokenClassifierOutput:
outputs: BaseModelOutputWithPast = getattr(self, self.base_model_prefix)(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
**kwargs,
)
sequence_output = outputs.last_hidden_state
sequence_output = self.dropout(sequence_output)
logits = self.score(sequence_output)
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.config)
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| GenericForTokenClassification |
python | kamyu104__LeetCode-Solutions | Python/minimum-array-changes-to-make-differences-equal.py | {
"start": 64,
"end": 903
} | class ____(object):
def minChanges(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
diff = [0]*((k+1)+1)
def update(left, right, d):
diff[left] += d
diff[right+1] -= d
for i in xrange(len(nums)//2):
curr = abs(nums[i]-nums[~i])
mx = max(nums[i]-0, k-nums[i], nums[~i]-0, k-nums[~i])
# 1 change for i in range(0, curr)
update(0, curr-1, 1)
# 1 change for i in range(curr+1, mx+1)
update(curr+1, mx, 1)
# 2 changes for i in range(mx+1, k+1)
update(mx+1, k, 2)
result = len(nums)//2
curr = 0
for i in xrange(k+1):
curr += diff[i]
result = min(result, curr)
return result
| Solution |
python | huggingface__transformers | tests/models/ernie4_5/test_modeling_ernie4_5.py | {
"start": 1698,
"end": 3511
} | class ____(unittest.TestCase):
def setup(self):
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_ernie4_5_0p3B(self):
"""
An integration test for Ernie 4.5 0.3B.
"""
expected_texts = Expectations(
{
("xpu", 3): "User: Hey, are you conscious? Can you talk to me?\nAssistant: Hey! I'm here to help you with whatever you need. Are you feeling a bit overwhelmed or stressed? I'm here to listen and provide support.",
("cuda", None): "User: Hey, are you conscious? Can you talk to me?\nAssistant: Hey! I'm here to help you with whatever you need. Are you feeling a bit overwhelmed or stressed? I'm here to listen and provide support.",
}
) # fmt: skip
EXPECTED_TEXT = expected_texts.get_expectation()
tokenizer = AutoTokenizer.from_pretrained("baidu/ERNIE-4.5-0.3B-PT", revision="refs/pr/3")
model = Ernie4_5ForCausalLM.from_pretrained(
"baidu/ERNIE-4.5-0.3B-PT",
device_map="auto",
dtype=torch.bfloat16,
)
prompt = "Hey, are you conscious? Can you talk to me?"
messages = [{"role": "user", "content": prompt}]
text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
model_inputs = tokenizer([text], add_special_tokens=False, return_tensors="pt").to(model.device)
generated_ids = model.generate(
model_inputs.input_ids,
max_new_tokens=128,
do_sample=False,
)
generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True).strip("\n")
self.assertEqual(generated_text, EXPECTED_TEXT)
| Ernie4_5IntegrationTest |
python | matplotlib__matplotlib | lib/matplotlib/testing/compare.py | {
"start": 2941,
"end": 4558
} | class ____(_Converter):
def __call__(self, orig, dest):
if not self._proc:
self._proc = subprocess.Popen(
[mpl._get_executable_info("gs").executable,
"-dNOSAFER", "-dNOPAUSE", "-dEPSCrop", "-sDEVICE=png16m"],
# As far as I can see, ghostscript never outputs to stderr.
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
try:
self._read_until(b"\nGS")
except _ConverterError as e:
raise OSError(f"Failed to start Ghostscript:\n\n{e.args[0]}") from None
def encode_and_escape(name):
return (os.fsencode(name)
.replace(b"\\", b"\\\\")
.replace(b"(", br"\(")
.replace(b")", br"\)"))
self._proc.stdin.write(
b"<< /OutputFile ("
+ encode_and_escape(dest)
+ b") >> setpagedevice ("
+ encode_and_escape(orig)
+ b") run flush\n")
self._proc.stdin.flush()
# GS> if nothing left on the stack; GS<n> if n items left on the stack.
err = self._read_until((b"GS<", b"GS>"))
stack = self._read_until(b">") if err.endswith(b"GS<") else b""
if stack or not os.path.exists(dest):
stack_size = int(stack[:-1]) if stack else 0
self._proc.stdin.write(b"pop\n" * stack_size)
# Using the systemencoding should at least get the filenames right.
raise ImageComparisonFailure(
(err + stack).decode(sys.getfilesystemencoding(), "replace"))
| _GSConverter |
python | pytorch__pytorch | torch/utils/_pytree.py | {
"start": 23988,
"end": 25339
} | class ____:
name: str
def __str__(self) -> str:
return f".{self.name}"
def get(self, obj: Any) -> Any:
return getattr(obj, self.name)
# Reference: https://github.com/metaopt/optree/blob/main/optree/typing.py
def is_namedtuple(obj: object | type) -> bool:
"""Return whether the object is an instance of namedtuple or a subclass of namedtuple."""
cls = obj if isinstance(obj, type) else type(obj)
return is_namedtuple_class(cls)
# Reference: https://github.com/metaopt/optree/blob/main/optree/typing.py
def is_namedtuple_class(cls: type) -> bool:
"""Return whether the class is a subclass of namedtuple."""
return (
isinstance(cls, type)
and issubclass(cls, tuple)
and isinstance(getattr(cls, "_fields", None), tuple)
and all(type(field) is str for field in cls._fields) # type: ignore[attr-defined]
and callable(getattr(cls, "_make", None))
and callable(getattr(cls, "_asdict", None))
)
# Reference: https://github.com/metaopt/optree/blob/main/optree/typing.py
def is_namedtuple_instance(obj: object) -> bool:
"""Return whether the object is an instance of namedtuple."""
return is_namedtuple_class(type(obj))
_T_co = TypeVar("_T_co", covariant=True)
# Reference: https://github.com/metaopt/optree/blob/main/optree/typing.py
| GetAttrKey |
python | django__django | tests/m2m_signals/models.py | {
"start": 451,
"end": 696
} | class ____(models.Model):
name = models.CharField(max_length=20)
fans = models.ManyToManyField("self", related_name="idols", symmetrical=False)
friends = models.ManyToManyField("self")
class Meta:
ordering = ("name",)
| Person |
python | numba__numba | numba/core/typing/builtins.py | {
"start": 10159,
"end": 10250
} | class ____(BitwiseShiftOperation):
pass
@infer_global(operator.irshift)
| BitwiseRightShift |
python | huggingface__transformers | tests/utils/test_core_model_loading.py | {
"start": 6302,
"end": 6452
} | class ____(nn.Module):
def __init__(self, shape):
super().__init__()
self.weight = nn.Parameter(torch.zeros(shape))
| DummyParamModule |
python | Textualize__textual | src/textual/worker.py | {
"start": 722,
"end": 794
} | class ____(Exception):
"""There is no active worker."""
| NoActiveWorker |
python | python-poetry__poetry | src/poetry/config/config.py | {
"start": 3798,
"end": 13872
} | class ____:
default_config: ClassVar[dict[str, Any]] = {
"cache-dir": str(DEFAULT_CACHE_DIR),
"data-dir": str(data_dir()),
"virtualenvs": {
"create": True,
"in-project": None,
"path": os.path.join("{cache-dir}", "virtualenvs"),
"options": {
"always-copy": False,
"system-site-packages": False,
"no-pip": False,
},
"use-poetry-python": False,
"prompt": "{project_name}-py{python_version}",
},
"requests": {
"max-retries": 0,
},
"installer": {
"re-resolve": True,
"parallel": True,
"max-workers": None,
"no-binary": None,
"only-binary": None,
"build-config-settings": {},
},
"python": {"installation-dir": os.path.join("{data-dir}", "python")},
"solver": {
"lazy-wheel": True,
},
"system-git-client": False,
"keyring": {
"enabled": True,
},
}
def __init__(self, use_environment: bool = True) -> None:
self._config = deepcopy(self.default_config)
self._use_environment = use_environment
self._config_source: ConfigSource = DictConfigSource()
self._auth_config_source: ConfigSource = DictConfigSource()
@property
def config(self) -> dict[str, Any]:
return self._config
@property
def config_source(self) -> ConfigSource:
return self._config_source
@property
def auth_config_source(self) -> ConfigSource:
return self._auth_config_source
def set_config_source(self, config_source: ConfigSource) -> Config:
self._config_source = config_source
return self
def set_auth_config_source(self, config_source: ConfigSource) -> Config:
self._auth_config_source = config_source
return self
def merge(self, config: dict[str, Any]) -> None:
from poetry.utils.helpers import merge_dicts
merge_dicts(self._config, config)
def all(self) -> dict[str, Any]:
def _all(config: dict[str, Any], parent_key: str = "") -> dict[str, Any]:
all_ = {}
for key in config:
value = self.get(parent_key + key)
if isinstance(value, dict):
if parent_key != "":
current_parent = parent_key + key + "."
else:
current_parent = key + "."
all_[key] = _all(config[key], parent_key=current_parent)
continue
all_[key] = value
return all_
return _all(self.config)
def raw(self) -> dict[str, Any]:
return self._config
@staticmethod
def _get_environment_repositories() -> dict[str, dict[str, str]]:
repositories = {}
pattern = re.compile(r"POETRY_REPOSITORIES_(?P<name>[A-Z_]+)_URL")
for env_key in os.environ:
match = pattern.match(env_key)
if match:
repositories[match.group("name").lower().replace("_", "-")] = {
"url": os.environ[env_key]
}
return repositories
@staticmethod
def _get_environment_build_config_settings() -> Mapping[
NormalizedName, Mapping[str, str | Sequence[str]]
]:
build_config_settings = {}
pattern = re.compile(r"POETRY_INSTALLER_BUILD_CONFIG_SETTINGS_(?P<name>[^.]+)")
for env_key in os.environ:
if match := pattern.match(env_key):
if not build_config_setting_validator(os.environ[env_key]):
logger.debug(
"Invalid value set for environment variable %s", env_key
)
continue
build_config_settings[canonicalize_name(match.group("name"))] = (
build_config_setting_normalizer(os.environ[env_key])
)
return build_config_settings
@property
def repository_cache_directory(self) -> Path:
return Path(self.get("cache-dir")).expanduser() / "cache" / "repositories"
@property
def artifacts_cache_directory(self) -> Path:
return Path(self.get("cache-dir")).expanduser() / "artifacts"
@property
def virtualenvs_path(self) -> Path:
path = self.get("virtualenvs.path")
if path is None:
path = Path(self.get("cache-dir")) / "virtualenvs"
return Path(path).expanduser()
@property
def python_installation_dir(self) -> Path:
path = self.get("python.installation-dir")
if path is None:
path = Path(self.get("data-dir")) / "python"
return Path(path).expanduser()
@property
def installer_max_workers(self) -> int:
# This should be directly handled by ThreadPoolExecutor
# however, on some systems the number of CPUs cannot be determined
# (it raises a NotImplementedError), so, in this case, we assume
# that the system only has one CPU.
try:
default_max_workers = (os.cpu_count() or 1) + 4
except NotImplementedError:
default_max_workers = 5
desired_max_workers = self.get("installer.max-workers")
if desired_max_workers is None:
return default_max_workers
return min(default_max_workers, int(desired_max_workers))
def get(self, setting_name: str, default: Any = None) -> Any:
"""
Retrieve a setting value.
"""
keys = setting_name.split(".")
build_config_settings: Mapping[
NormalizedName, Mapping[str, str | Sequence[str]]
] = {}
# Looking in the environment if the setting
# is set via a POETRY_* environment variable
if self._use_environment:
if setting_name == "repositories":
# repositories setting is special for now
repositories = self._get_environment_repositories()
if repositories:
return repositories
build_config_settings_key = "installer.build-config-settings"
if setting_name == build_config_settings_key or setting_name.startswith(
f"{build_config_settings_key}."
):
build_config_settings = self._get_environment_build_config_settings()
else:
env = "POETRY_" + "_".join(k.upper().replace("-", "_") for k in keys)
env_value = os.getenv(env)
if env_value is not None:
return self.process(self._get_normalizer(setting_name)(env_value))
value = self._config
# merge installer build config settings from the environment
for package_name in build_config_settings:
value["installer"]["build-config-settings"][package_name] = (
build_config_settings[package_name]
)
for key in keys:
if key not in value:
return self.process(default)
value = value[key]
if self._use_environment and isinstance(value, dict):
# this is a configuration table, it is likely that we missed env vars
# in order to capture them recurse, eg: virtualenvs.options
return {k: self.get(f"{setting_name}.{k}") for k in value}
return self.process(value)
def process(self, value: Any) -> Any:
if not isinstance(value, str):
return value
def resolve_from_config(match: re.Match[str]) -> Any:
key = match.group(1)
config_value = self.get(key)
if config_value:
return config_value
# The key doesn't exist in the config but might be resolved later,
# so we keep it as a format variable.
return f"{{{key}}}"
return re.sub(r"{(.+?)}", resolve_from_config, value)
@staticmethod
def _get_normalizer(name: str) -> Callable[[str], Any]:
if name in {
"virtualenvs.create",
"virtualenvs.in-project",
"virtualenvs.options.always-copy",
"virtualenvs.options.no-pip",
"virtualenvs.options.system-site-packages",
"virtualenvs.use-poetry-python",
"installer.re-resolve",
"installer.parallel",
"solver.lazy-wheel",
"system-git-client",
"keyring.enabled",
}:
return boolean_normalizer
if name == "virtualenvs.path":
return lambda val: str(Path(val))
if name in {
"installer.max-workers",
"requests.max-retries",
}:
return int_normalizer
if name in ["installer.no-binary", "installer.only-binary"]:
return PackageFilterPolicy.normalize
if name.startswith("installer.build-config-settings."):
return build_config_setting_normalizer
return lambda val: val
@classmethod
def create(cls, reload: bool = False) -> Config:
global _default_config
if _default_config is None or reload:
_default_config = cls()
# Load global config
config_file = TOMLFile(CONFIG_DIR / "config.toml")
if config_file.exists():
logger.debug("Loading configuration file %s", config_file.path)
_default_config.merge(config_file.read())
_default_config.set_config_source(FileConfigSource(config_file))
# Load global auth config
auth_config_file = TOMLFile(CONFIG_DIR / "auth.toml")
if auth_config_file.exists():
logger.debug("Loading configuration file %s", auth_config_file.path)
_default_config.merge(auth_config_file.read())
_default_config.set_auth_config_source(FileConfigSource(auth_config_file))
return _default_config
| Config |
python | pytorch__pytorch | test/test_schema_check.py | {
"start": 3823,
"end": 21285
} | class ____(JitTestCase):
def setUp(self):
if TEST_WITH_TORCHDYNAMO:
self.skipTest("SchemaCheckMode is ignored by dynamo")
super().setUp()
# Tests that SchemaCheckMode records operator order with grad
def test_schema_check_mode_operator_order(self):
with SchemaCheckMode() as schema_check:
x = torch.rand((3, 3), requires_grad=True)
x.relu().sin()
self.assertEqual(["aten::rand", "aten::relu", "aten::detach", "aten::sin"], schema_check.ops)
# Tests that SchemaCheckMode records operator order without grad
def test_schema_check_mode_operator_order_without_grad(self):
with SchemaCheckMode() as schema_check:
x = torch.rand((3, 3), requires_grad=False)
x.relu().sin()
self.assertEqual(["aten::rand", "aten::relu", "aten::sin"], schema_check.ops)
# Tests that SchemaCheckMode records mutations and aliases with none expected
def test_schema_check_mode_mutated_aliasing_none(self):
# NB: previously requires_grad=True, but this induces a detach for
# saved variable
x = torch.rand((3, 3))
with SchemaCheckMode() as schema_check:
actual = x.relu().sin()
self.assertEqual([], schema_check.mutated)
self.assertEqual([], schema_check.aliasing)
# Tests that SchemaCheckMode records mutations and aliases with mutation expected
def test_schema_check_mode_mutated_aliasing_mutation(self):
actual = torch.rand((3, 3), requires_grad=False)
with SchemaCheckMode() as schema_check:
actual.sinh_()
self.assertEqual([('aten::sinh_', 'input')], schema_check.mutated)
self.assertEqual([('aten::sinh_', 'input', 'output_0')], schema_check.aliasing)
# Tests that SchemaCheckMode records mutations and aliases with resize_
def test_schema_check_mode_mutated_aliasing_resize_(self):
actual = torch.rand((3, 3), requires_grad=False)
with SchemaCheckMode() as schema_check:
actual.resize_(9)
self.assertEqual([('aten::resize_', 'input')], schema_check.mutated)
self.assertEqual([('aten::resize_', 'input', 'output_0')], schema_check.aliasing)
# Tests that SchemaCheckMode records mutations and aliases with aliasing inputs
def test_schema_check_mode_mutated_aliasing_aliasing_inputs(self):
actual = torch.rand((3, 3))
y = actual
with SchemaCheckMode() as schema_check:
actual.add_(y)
self.assertEqual(
[
('aten::add_', 'input'),
('aten::add_', 'other')
],
schema_check.mutated
)
self.assertEqual(
[
('aten::add_', 'input', 'output_0'),
('aten::add_', 'other', 'output_0')
],
schema_check.aliasing
)
# Tests that SchemaCheckMode records mutations and alias with as_strided
def test_schema_check_mode_mutated_aliasing_as_strided(self):
x = torch.rand((3, 6, 4))
with SchemaCheckMode() as schema_check:
x.as_strided_([3, 6, 4], [9, 1, 1])
self.assertEqual(
[
('aten::as_strided_', 'input')
],
schema_check.mutated
)
self.assertEqual(
[
('aten::as_strided_', 'input', 'output_0')
],
schema_check.aliasing
)
# Tests that SchemaCheckMode records mutations and aliases with multiple outputs
def test_schema_check_mode_mutated_aliasing_multiple_outputs(self):
x = torch.arange(9.)
m_actual = torch.arange(9.)
e_actual = torch.zeros([9], dtype=torch.int32)
with SchemaCheckMode() as schema_check:
torch.frexp(x, out=(m_actual, e_actual))
self.assertEqual(
[
('aten::frexp', 'mantissa'),
('aten::frexp', 'exponent')
],
schema_check.mutated
)
self.assertEqual(
[
('aten::frexp', 'mantissa', 'output_0'),
('aten::frexp', 'exponent', 'output_1')
],
schema_check.aliasing
)
# Tests that SchemaCheckMode records mutations and aliases with aliasing outputs
def test_schema_check_mode_mutated_aliasing_aliasing_outputs(self):
x = torch.rand((3, 3))
actual = torch.zeros(3)
with SchemaCheckMode() as schema_check:
torch.aminmax(x, dim=0, out=[actual, actual])
self.assertEqual(
[
('aten::aminmax', 'min'),
('aten::aminmax', 'max')
],
schema_check.mutated
)
self.assertEqual(
[
('aten::aminmax', 'min', 'output_0'),
('aten::aminmax', 'min', 'output_1'),
('aten::aminmax', 'max', 'output_0'),
('aten::aminmax', 'max', 'output_1')
],
schema_check.aliasing
)
# Tests that SchemaCheckMode wraps torch.Tensor
def test_schema_check_mode_functionality(self):
x = torch.rand((3, 3), requires_grad=True)
expected = x.relu().sin()
with SchemaCheckMode():
actual = x.relu().sin()
self.assertEqual(expected, actual)
# Tests that SchemaCheckMode wraps torch.Tensor when an argument's default is overridden
def test_schema_check_mode_functionality_default_replaced(self):
x = torch.rand((3, 3), requires_grad=True)
expected = x.add(x, alpha=2)
with SchemaCheckMode():
actual = x.add(x, alpha=2)
self.assertEqual(expected, actual)
# Tests that SchemaCheckMode wraps torch.Tensor when there is a Tensor[] argument
def test_schema_check_mode_functionality_list_input(self):
a = torch.rand((3, 3))
b = torch.rand((3, 3))
c = torch.rand((3, 3))
expected = torch.linalg.multi_dot([a, b, c])
with SchemaCheckMode():
actual = torch.linalg.multi_dot([a, b, c])
self.assertEqual(expected, actual)
# Tests that SchemaCheckMode wraps torch.Tensor with an op that has the (a -> *) notation
def test_schema_check_mode_functionality_wildcard_after(self):
x = torch.rand((3, 3))
expected = x.chunk(6)
with SchemaCheckMode():
actual = x.chunk(6)
self.assertEqual(expected, actual)
# Tests that SchemaCheckMode wraps torch.Tensor when there is a kwarg tensor input
@unittest.skipIf(not torch._C.has_spectral, "ATen not built with FFT.")
def test_schema_check_mode_functionality_kwarg_tensor(self):
x = torch.rand((3, 5))
w = torch.rand(4)
expected = torch.stft(x, 4, win_length=4, window=w, return_complex=True)
with SchemaCheckMode():
actual = torch.stft(x, 4, win_length=4, window=w, return_complex=True)
self.assertEqual(expected, actual)
# Tests that SchemaCheckMode wraps torch.Tensor with a mutable op
def test_schema_check_mode_functionality_mutable_inputs(self):
expected = torch.rand((3, 3), requires_grad=False)
actual = torch.clone(expected)
expected.sinh_()
with SchemaCheckMode():
actual.sinh_()
self.assertEqual(expected, actual)
# Tests that SchemaCheckMode wraps Torch.tensor when inputs alias
def test_schema_check_mode_functionality_aliasing_inputs(self):
expected = torch.rand((3, 3))
x = expected
actual = torch.clone(expected)
y = actual
expected.add_(x)
with SchemaCheckMode():
actual.add_(y)
self.assertEqual(expected, actual)
# Tests that SchemaCheckMode wraps Torch.tensor with multiple tensor outputs
def test_schema_check_mode_functionality_with_multiple_outputs(self):
x = torch.arange(9.)
m_expected, e_expected = torch.frexp(x)
m_actual = torch.arange(9.)
e_actual = torch.zeros([9], dtype=torch.int32)
with SchemaCheckMode():
torch.frexp(x, out=(m_actual, e_actual))
self.assertEqual(m_expected, m_actual)
self.assertEqual(e_expected, e_actual)
# Tests that SchemaCheckMode wraps Torch.tensor with aliasing outputs due to aliasing inputs
def test_schema_check_mode_functionality_with_multiple_outputs_aliasing(self):
x = torch.rand((3, 3))
actual = torch.zeros(3)
with SchemaCheckMode():
torch.aminmax(x, dim=0, out=[actual, actual])
self.assertEqual(torch.amax(x, dim=0), actual)
# Tests that SchemaCheckMode wraps Torch.tensor in ops with real Device input
def test_schema_check_mode_functionality_device_input(self):
with SchemaCheckMode():
x = torch.rand((3, 3), device="cpu", dtype=torch.double)
y = x + x
self.assertEqual(x + x, y)
# Tests that SchemaCheckMode wraps Torch.tensor in special training op edge case
def test_schema_check_mode_functionality_training_op(self):
x = torch.rand((3, 3), requires_grad=True)
batch = torch.nn.BatchNorm1d(3, track_running_stats=True)
expected = batch(x)
with SchemaCheckMode():
actual = batch(x)
self.assertEqual(expected, actual)
# Tests that SchemaCheckMode wraps Torch.tensor with nested training op edge case
def test_schema_check_mode_functionality_nested_training_op(self):
actual = torch.rand((3, 3))
batch = torch.nn.BatchNorm1d(3, track_running_stats=True)
expected = torch.clone(actual)
expected.sinh_()
expected.tanh_()
expected.relu_()
expected = batch(expected)
with SchemaCheckMode():
actual.sinh_()
actual.tanh_()
actual.relu_()
actual = batch(actual)
self.assertEqual(expected, actual)
# Tests that SchemaCheckMode wraps Torch.tensor with empty list input
def test_schema_check_mode_empty_list_input(self):
expected = torch.atleast_1d([])
with SchemaCheckMode():
actual = torch.atleast_1d([])
self.assertEqual(expected, actual)
# Tests that an exception is raised for a mismatching mutation
def test_mutation_check_fail(self):
with self.assertRaisesRegex(RuntimeError, "Argument input is not defined as mutable but was mutated"):
x = torch.rand((3, 3))
y = torch.rand((3, 3))
with SchemaCheckMode():
IncorrectAliasTensor(x).sub(IncorrectAliasTensor(y))
# # Tests that an exception is raised for a mismatching mutation over multiple ops
def test_mutation_check_fail_multiple_operators(self):
with self.assertRaisesRegex(RuntimeError, "Argument input is not defined as mutable but was mutated"):
x = torch.rand((3, 3))
y = torch.rand((3, 3))
with SchemaCheckMode():
IncorrectAliasTensor(x).sin().cos().sub(IncorrectAliasTensor(y))
# Tests that an exception is raised for a mismatching alias
def test_alias_check_fail_simple(self):
with self.assertRaisesRegex(RuntimeError, "Argument input is not defined to alias output but was aliasing"):
x = torch.rand((3, 3), requires_grad=True)
y = torch.rand((3, 3))
with SchemaCheckMode():
IncorrectAliasTensor(x).add(IncorrectAliasTensor(y), alpha=2)
# Tests that an exception is raised for a mismatching alias over multiple ops
def test_alias_check_fail_multiple_operators(self):
with self.assertRaisesRegex(RuntimeError, "Argument input is not defined to alias output but was aliasing"):
x = torch.rand((3, 3), requires_grad=True)
y = torch.zeros((3, 3), requires_grad=True)
with SchemaCheckMode():
IncorrectAliasTensor(x).sin().relu().add(IncorrectAliasTensor(y), alpha=2)
# Tests that an exception is raised for a centered mismatching alias over multiple ops
def test_alias_check_fail_multiple_operators_centered(self):
with self.assertRaisesRegex(RuntimeError, "Argument input is not defined to alias output but was aliasing"):
x = torch.rand((3, 3), requires_grad=True)
y = torch.zeros((3, 3), requires_grad=True)
with SchemaCheckMode():
IncorrectAliasTensor(x).sin().add(IncorrectAliasTensor(y), alpha=2).relu()
# Tests that an exception is raised for a centered mismatching alias over multiple ops
def test_alias_check_fail_outputs_unexpectedly_aliasing(self):
with self.assertRaisesRegex(RuntimeError, "Outputs 0 and 1 alias unexpectedly"):
x = torch.rand((3, 3))
with SchemaCheckMode() as s:
IncorrectAliasTensor(x).aminmax(dim=0)
# When this file was written, python op registration didn't exist.
# It's probably worth re-writing the entire file to use it,
# but instead I just added extra tests.
def test_alias_check_fail_custom_ops_secretly_aliasing(self):
def f(x):
return torch.ops.bad_schemas.secretly_aliasing(x)
x = torch.rand((3, 3))
with self.assertRaisesRegex(RuntimeError, "not defined to alias output but was aliasing"):
with SchemaCheckMode() as s:
out = f(x)
def test_alias_check_fail_custom_ops_secretly_mutating(self):
def f(x):
return torch.ops.bad_schemas.secretly_mutating(x)
x = torch.rand((3, 3))
with self.assertRaisesRegex(RuntimeError, "not defined as mutable but was mutated"):
with SchemaCheckMode() as s:
out = f(x)
def test_alias_check_fail_custom_ops_output_is_input(self):
def f(x):
return torch.ops.bad_schemas.output_is_input(x)
x = torch.rand((3, 3))
with self.assertRaisesRegex(RuntimeError, "are not allowed to directly return inputs"):
with SchemaCheckMode() as s:
out = f(x)
# Tests that is_alias_of returns as expected
def test_is_alias_of_basic(self):
x = torch.rand((3, 3), requires_grad=True)
y = torch.rand((3, 3), requires_grad=True)
y = x.add(x, alpha=2)
self.assertTrue(torch._C._is_alias_of(x, x))
self.assertFalse(torch._C._is_alias_of(x, y))
# Tests that is_alias_of returns as expected with empty containers
def test_is_alias_of_empty_container(self):
x = []
y = torch.rand((3, 3), requires_grad=True)
self.assertFalse(torch._C._is_alias_of(x, x))
self.assertFalse(torch._C._is_alias_of(x, y))
# Tests that overlaps returns as expected
def test_overlaps_basic(self):
x = torch.rand((3, 3), requires_grad=True)
y = torch.rand((3, 3), requires_grad=True)
z = [x, y]
self.assertTrue(torch._C._overlaps(x, x))
self.assertFalse(torch._C._overlaps(x, y))
self.assertTrue(torch._C._overlaps(z, x))
self.assertTrue(torch._C._overlaps(z, y))
# Tests that overlaps returns correctly with empty containers
def test_overlaps_empty_container(self):
x = []
y = [torch.rand((3, 3), requires_grad=True)]
# Empty containers return false
self.assertFalse(torch._C._overlaps(y, x))
self.assertTrue(torch._C._overlaps(y, y))
# Tests that SchemaInfo Bindings work as expected
def test_schema_info_bind_basic(self):
class SchemaInfoBindTestMode(TorchDispatchMode):
def __init__(self, test_self):
self.test_self = test_self
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
named_arg_list = normalize_function(
func,
args,
kwargs,
normalize_to_only_use_kwargs=True
).kwargs
schema_info_value_test = torch._C._SchemaInfo(func._schema)
schema_info_values_test = torch._C._SchemaInfo(func._schema)
self.test_self.assertFalse(schema_info_value_test.may_alias(
torch._C._SchemaArgument(torch._C._SchemaArgType.input, 0),
torch._C._SchemaArgument(torch._C._SchemaArgType.input, 1)))
self.test_self.assertFalse(schema_info_values_test.may_alias(
torch._C._SchemaArgument(torch._C._SchemaArgType.input, 0),
torch._C._SchemaArgument(torch._C._SchemaArgType.input, 1)))
for i in named_arg_list:
schema_info_value_test.add_argument_value(i, named_arg_list[i])
schema_info_values_test.add_argument_values(named_arg_list)
self.test_self.assertTrue(schema_info_value_test.may_alias(
torch._C._SchemaArgument(torch._C._SchemaArgType.input, 0),
torch._C._SchemaArgument(torch._C._SchemaArgType.input, 1)))
self.test_self.assertTrue(schema_info_values_test.may_alias(
torch._C._SchemaArgument(torch._C._SchemaArgType.input, 0),
torch._C._SchemaArgument(torch._C._SchemaArgType.input, 1)))
return func(*args, **kwargs)
x = torch.rand((3, 3))
with SchemaInfoBindTestMode(self) as schemaInfoCheck:
x.add(x)
| TestSchemaCheck |
python | doocs__leetcode | solution/1700-1799/1749.Maximum Absolute Sum of Any Subarray/Solution.py | {
"start": 0,
"end": 244
} | class ____:
def maxAbsoluteSum(self, nums: List[int]) -> int:
f = g = 0
ans = 0
for x in nums:
f = max(f, 0) + x
g = min(g, 0) + x
ans = max(ans, f, abs(g))
return ans
| Solution |
python | dagster-io__dagster | python_modules/libraries/dagster-pagerduty/dagster_pagerduty/resources.py | {
"start": 361,
"end": 7936
} | class ____(ConfigurableResource):
"""This resource is for posting events to PagerDuty."""
"""Integrates with PagerDuty via the pypd library.
See:
https://v2.developer.pagerduty.com/docs/events-api-v2
https://v2.developer.pagerduty.com/docs/send-an-event-events-api-v2
https://support.pagerduty.com/docs/services-and-integrations#section-events-api-v2
https://github.com/PagerDuty/pagerduty-api-python-client
for documentation and more information.
"""
routing_key: str = PyField(
...,
description=(
"The routing key provisions access to your PagerDuty service. You"
"will need to include the integration key for your new integration, as a"
"routing_key in the event payload."
),
)
@classmethod
def _is_dagster_maintained(cls) -> bool:
return True
def EventV2_create(
self,
summary: str,
source: str,
severity: str,
event_action: str = "trigger",
dedup_key: Optional[str] = None,
timestamp: Optional[str] = None,
component: Optional[str] = None,
group: Optional[str] = None,
event_class: Optional[str] = None,
custom_details: Optional[object] = None,
) -> object:
"""Events API v2 enables you to add PagerDuty's advanced event and incident management
functionality to any system that can make an outbound HTTP connection.
Args:
summary (str):
A high-level, text summary message of the event. Will be used to construct an
alert's description. Example:
"PING OK - Packet loss = 0%, RTA = 1.41 ms" "Host
'acme-andromeda-sv1-c40 :: 179.21.24.50' is DOWN"
source (str):
Specific human-readable unique identifier, such as a hostname, for the system having
the problem. Examples:
"prod05.theseus.acme-widgets.com"
"171.26.23.22"
"aws:elasticache:us-east-1:852511987:cluster/api-stats-prod-003"
"9c09acd49a25"
severity (str):
How impacted the affected system is. Displayed to users in lists and influences the
priority of any created incidents. Must be one of {info, warning, error, critical}
Keyword Args:
event_action (str):
There are three types of events that PagerDuty recognizes, and are used to represent
different types of activity in your monitored systems. (default: 'trigger')
* trigger: When PagerDuty receives a trigger event, it will either open a new alert,
or add a new trigger log entry to an existing alert, depending on the
provided dedup_key. Your monitoring tools should send PagerDuty a trigger
when a new problem has been detected. You may send additional triggers
when a previously detected problem has occurred again.
* acknowledge: acknowledge events cause the referenced incident to enter the
acknowledged state. While an incident is acknowledged, it won't
generate any additional notifications, even if it receives new
trigger events. Your monitoring tools should send PagerDuty an
acknowledge event when they know someone is presently working on the
problem.
* resolve: resolve events cause the referenced incident to enter the resolved state.
Once an incident is resolved, it won't generate any additional
notifications. New trigger events with the same dedup_key as a resolved
incident won't re-open the incident. Instead, a new incident will be
created. Your monitoring tools should send PagerDuty a resolve event when
the problem that caused the initial trigger event has been fixed.
dedup_key (str):
Deduplication key for correlating triggers and resolves. The maximum permitted
length of this property is 255 characters.
timestamp (str):
Timestamp (ISO 8601). When the upstream system detected / created the event. This is
useful if a system batches or holds events before sending them to PagerDuty. This
will be auto-generated by PagerDuty if not provided. Example:
2015-07-17T08:42:58.315+0000
component (str):
The part or component of the affected system that is broken. Examples:
"keepalive"
"webping"
"mysql"
"wqueue"
group (str):
A cluster or grouping of sources. For example, sources "prod-datapipe-02" and
"prod-datapipe-03" might both be part of "prod-datapipe". Examples:
"prod-datapipe"
"www"
"web_stack"
event_class (str):
The class/type of the event. Examples:
"High CPU"
"Latency"
"500 Error"
custom_details (Dict[str, str]):
Additional details about the event and affected system. Example:
{"ping time": "1500ms", "load avg": 0.75 }
"""
data = {
"routing_key": self.routing_key,
"event_action": event_action,
"payload": {"summary": summary, "source": source, "severity": severity},
}
if dedup_key is not None:
data["dedup_key"] = dedup_key
payload: dict[str, object] = cast("dict[str, object]", data["payload"])
if timestamp is not None:
payload["timestamp"] = timestamp
if component is not None:
payload["component"] = component
if group is not None:
payload["group"] = group
if event_class is not None:
payload["class"] = event_class
if custom_details is not None:
payload["custom_details"] = custom_details
return pypd.EventV2.create(data=data)
@dagster_maintained_resource
@resource(
config_schema=infer_schema_from_config_class(PagerDutyService),
description="""This resource is for posting events to PagerDuty.""",
)
@suppress_dagster_warnings
def pagerduty_resource(context) -> PagerDutyService:
"""A resource for posting events (alerts) to PagerDuty.
Example:
.. code-block:: python
@op
def pagerduty_op(pagerduty: PagerDutyService):
pagerduty.EventV2_create(
summary='alert from dagster',
source='localhost',
severity='error',
event_action='trigger',
)
@job(resource_defs={ 'pagerduty': pagerduty_resource })
def pagerduty_test():
pagerduty_op()
pagerduty_test.execute_in_process(
run_config={
"resources": {
'pagerduty': {'config': {'routing_key': '0123456789abcdef0123456789abcdef'}}
}
}
)
"""
return PagerDutyService(**context.resource_config)
| PagerDutyService |
python | great-expectations__great_expectations | great_expectations/data_context/data_context/cloud_data_context.py | {
"start": 2806,
"end": 3131
} | class ____(Exception):
def __init__(self):
super().__init__(
"Workspace id is not set and this user does not belong to exactly 1 workspace. "
f"Please set {GXCloudEnvironmentVariable.WORKSPACE_ID.value} or set it when "
"instantiating the context."
)
| WorkspaceNotSetError |
python | getsentry__sentry | tests/sentry/api/serializers/test_team.py | {
"start": 9750,
"end": 10964
} | class ____(TestCase):
def test_simple(self) -> None:
user = self.create_user(username="foo")
organization = self.create_organization()
team = self.create_team(organization=organization)
project = self.create_project(teams=[team], organization=organization, name="foo")
project2 = self.create_project(teams=[team], organization=organization, name="bar")
result = serialize(team, user, TeamWithProjectsSerializer())
serialized_projects = serialize(
[project2, project], user, ProjectSerializer(collapse=["unusedFeatures"])
)
assert result == {
"id": str(team.id),
"slug": team.slug,
"name": team.name,
"access": TEAM_CONTRIBUTOR["scopes"],
"hasAccess": True,
"isPending": False,
"isMember": False,
"teamRole": None,
"flags": {"idp:provisioned": False},
"projects": serialized_projects,
"avatar": {"avatarType": "letter_avatar", "avatarUuid": None},
"memberCount": 0,
"dateCreated": team.date_added,
"externalTeams": [],
}
| TeamWithProjectsSerializerTest |
python | openai__openai-python | src/openai/types/responses/response_function_tool_call_output_item.py | {
"start": 646,
"end": 1394
} | class ____(BaseModel):
id: str
"""The unique ID of the function call tool output."""
call_id: str
"""The unique ID of the function tool call generated by the model."""
output: Union[str, List[OutputOutputContentList]]
"""
The output from the function call generated by your code. Can be a string or an
list of output content.
"""
type: Literal["function_call_output"]
"""The type of the function tool call output. Always `function_call_output`."""
status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
"""The status of the item.
One of `in_progress`, `completed`, or `incomplete`. Populated when items are
returned via API.
"""
| ResponseFunctionToolCallOutputItem |
python | pypa__virtualenv | src/virtualenv/create/via_global_ref/builtin/cpython/mac_os.py | {
"start": 11133,
"end": 11665
} | class ____(CPython3, CPythonPosix):
@classmethod
def can_describe(cls, interpreter):
return is_macos_brew(interpreter) and super().can_describe(interpreter)
@classmethod
def setup_meta(cls, interpreter): # noqa: ARG003
meta = BuiltinViaGlobalRefMeta()
meta.copy_error = "Brew disables copy creation: https://github.com/Homebrew/homebrew-core/issues/138159"
return meta
__all__ = [
"CPython3macOsBrew",
"CPython3macOsFramework",
"CPythonmacOsFramework",
]
| CPython3macOsBrew |
python | weaviate__weaviate-python-client | weaviate/str_enum.py | {
"start": 201,
"end": 509
} | class ____(EnumMeta):
def __contains__(cls, item: Any) -> bool:
try:
# when item is type ConsistencyLevel
return item.name in cls.__members__.keys()
except AttributeError:
# when item is type str
return item in cls.__members__.keys()
| MetaEnum |
python | lazyprogrammer__machine_learning_examples | rl2/atari/dqn_theano.py | {
"start": 5937,
"end": 6787
} | class ____(object):
def __init__(self, mi, mo, filtsz=5, stride=2, f=T.nnet.relu):
# mi = input feature map size
# mo = output feature map size
sz = (mo, mi, filtsz, filtsz)
W0 = init_filter(sz)
self.W = theano.shared(W0)
b0 = np.zeros(mo, dtype=np.float32)
self.b = theano.shared(b0)
self.stride = (stride, stride)
self.params = [self.W, self.b]
self.f = f
# self.cut = cut
def forward(self, X):
conv_out = conv2d(
input=X,
filters=self.W,
subsample=self.stride,
# border_mode='half',
border_mode='valid',
)
# cut off 1 pixel from each edge
# to make the output the same size as input
# like tensorflow
# if self.cut:
# conv_out = conv_out[:, : ,:self.cut ,:self.cut]
return self.f(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
| ConvLayer |
python | weaviate__weaviate-python-client | weaviate/collections/queries/fetch_objects/query/sync.py | {
"start": 314,
"end": 461
} | class ____(
Generic[Properties, References],
_FetchObjectsQueryExecutor[ConnectionSync, Properties, References],
):
pass
| _FetchObjectsQuery |
python | encode__django-rest-framework | tests/test_api_client.py | {
"start": 3879,
"end": 4041
} | class ____(APIView):
renderer_classes = [CoreJSONRenderer]
def get(self, request):
schema = get_schema()
return Response(schema)
| SchemaView |
python | huggingface__transformers | src/transformers/models/videomae/modeling_videomae.py | {
"start": 14900,
"end": 15448
} | class ____(nn.Module):
def __init__(self, config: VideoMAEConfig):
super().__init__()
self.config = config
self.layer = nn.ModuleList([VideoMAELayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.Tensor) -> BaseModelOutput:
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states)
return BaseModelOutput(last_hidden_state=hidden_states)
@auto_docstring
| VideoMAEEncoder |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_artifacts.py | {
"start": 25780,
"end": 29738
} | class ____:
async def test_counting_artifacts(self, artifacts, session):
count = await models.artifacts.count_artifacts(session=session)
assert count == 5
async def test_counting_single_artifact(self, artifact, session):
count = await models.artifacts.count_artifacts(session=session)
assert count == 1
async def test_count_artifacts_with_artifact_filter(self, artifacts, client):
key_filter = dict(
artifacts=schemas.filters.ArtifactFilter(
key=schemas.filters.ArtifactFilterKey(
any_=[artifacts[0]["key"], artifacts[1]["key"]]
)
).model_dump(mode="json"),
)
response = await client.post("/artifacts/count", json=key_filter)
assert response.status_code == 200
assert response.json() == 2
async def test_count_artifacts_with_flow_run_filter(self, artifacts, client):
flow_run_filter = dict(
flow_runs=schemas.filters.FlowRunFilter(
id=schemas.filters.FlowRunFilterId(any_=[artifacts[0]["flow_run_id"]])
).model_dump(mode="json"),
)
response = await client.post("/artifacts/count", json=flow_run_filter)
assert response.status_code == 200
assert response.json() == 2
async def test_count_artifacts_with_task_run_filter(
self,
artifacts,
client,
):
task_run_filter = dict(
task_runs=schemas.filters.TaskRunFilter(
id=schemas.filters.TaskRunFilterId(any_=[artifacts[0]["task_run_id"]])
).model_dump(mode="json"),
)
response = await client.post("/artifacts/count", json=task_run_filter)
assert response.status_code == 200
assert response.json() == 1
async def test_count_artifacts_by_flow_name(self, flow_artifacts, client):
flow_name = flow_artifacts[0]["name"]
flow_filter = dict(
flows=schemas.filters.FlowFilter(
name=schemas.filters.FlowFilterName(any_=[flow_name])
).model_dump(mode="json")
)
response = await client.post("/artifacts/count", json=flow_filter)
assert response.status_code == status.HTTP_200_OK
json = response.json()
assert json == 2
async def test_count_artifacts_by_deployment(self, flow_artifacts, client):
deployment_id = flow_artifacts[3]
deployment_filter = dict(
deployments=schemas.filters.DeploymentFilter(
id=schemas.filters.DeploymentFilterId(any_=[deployment_id])
).model_dump(mode="json")
)
response = await client.post("/artifacts/count", json=deployment_filter)
assert response.status_code == status.HTTP_200_OK
json = response.json()
assert json == 2
async def test_counting_latest_artifacts_by_flow_name(self, flow_artifacts, client):
flow_name = flow_artifacts[0]["name"]
flow_filter = dict(
flows=schemas.filters.FlowFilter(
name=schemas.filters.FlowFilterName(any_=[flow_name])
).model_dump(mode="json")
)
response = await client.post("/artifacts/latest/count", json=flow_filter)
assert response.status_code == status.HTTP_200_OK
json = response.json()
assert json == 1
async def test_counting_latest_artifacts_by_deployment(
self, flow_artifacts, client
):
deployment_id = flow_artifacts[3]
deployment_filter = dict(
deployments=schemas.filters.DeploymentFilter(
id=schemas.filters.DeploymentFilterId(any_=[deployment_id])
).model_dump(mode="json")
)
response = await client.post("/artifacts/latest/count", json=deployment_filter)
assert response.status_code == status.HTTP_200_OK
json = response.json()
assert json == 1
| TestCountArtifacts |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/hooks/redshift_data.py | {
"start": 1779,
"end": 1893
} | class ____(ValueError):
"""Raise an error that redshift data query was aborted."""
| RedshiftDataQueryAbortedError |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws/ecr/resources.py | {
"start": 1130,
"end": 1721
} | class ____(ECRPublicClient):
def get_login_password(self):
with Stubber(self.client) as stubber:
stubber.add_response(
method="get_authorization_token",
service_response={
"authorizationData": {
"authorizationToken": "token",
"expiresAt": datetime.datetime.now(),
}
},
)
result = super().get_login_password()
stubber.assert_no_pending_responses()
return result
@beta
| FakeECRPublicClient |
python | mlflow__mlflow | mlflow/data/meta_dataset.py | {
"start": 178,
"end": 3742
} | class ____(Dataset):
"""Dataset that only contains metadata.
This class is used to represent a dataset that only contains metadata, which is useful when
users only want to log metadata to MLflow without logging the actual data. For example, users
build a custom dataset from a text file publicly hosted in the Internet, and they want to log
the text file's URL to MLflow for future tracking instead of the dataset itself.
Args:
source: dataset source of type `DatasetSource`, indicates where the data is from.
name: name of the dataset. If not specified, a name is automatically generated.
digest: digest (hash, fingerprint) of the dataset. If not specified, a digest is
automatically computed.
schame: schema of the dataset.
.. code-block:: python
:caption: Create a MetaDataset
import mlflow
mlflow.set_experiment("/test-mlflow-meta-dataset")
source = mlflow.data.http_dataset_source.HTTPDatasetSource(
url="https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
)
ds = mlflow.data.meta_dataset.MetaDataset(source)
with mlflow.start_run() as run:
mlflow.log_input(ds)
.. code-block:: python
:caption: Create a MetaDataset with schema
import mlflow
mlflow.set_experiment("/test-mlflow-meta-dataset")
source = mlflow.data.http_dataset_source.HTTPDatasetSource(
url="https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
)
schema = Schema(
[
ColSpec(type=mlflow.types.DataType.string, name="text"),
ColSpec(type=mlflow.types.DataType.integer, name="label"),
]
)
ds = mlflow.data.meta_dataset.MetaDataset(source, schema=schema)
with mlflow.start_run() as run:
mlflow.log_input(ds)
"""
def __init__(
self,
source: DatasetSource,
name: str | None = None,
digest: str | None = None,
schema: Schema | None = None,
):
# Set `self._schema` before calling the superclass constructor because
# `self._compute_digest` depends on `self._schema`.
self._schema = schema
super().__init__(source=source, name=name, digest=digest)
def _compute_digest(self) -> str:
"""Computes a digest for the dataset.
The digest computation of `MetaDataset` is based on the dataset's name, source, source type,
and schema instead of the actual data. Basically we compute the sha256 hash of the config
dict.
"""
config = {
"name": self.name,
"source": self.source.to_json(),
"source_type": self.source._get_source_type(),
"schema": self.schema.to_dict() if self.schema else "",
}
return hashlib.sha256(json.dumps(config).encode("utf-8")).hexdigest()[:8]
@property
def schema(self) -> Any | None:
"""Returns the schema of the dataset."""
return self._schema
def to_dict(self) -> dict[str, str]:
"""Create config dictionary for the MetaDataset.
Returns a string dictionary containing the following fields: name, digest, source, source
type, schema, and profile.
"""
config = super().to_dict()
if self.schema:
schema = json.dumps({"mlflow_colspec": self.schema.to_dict()}) if self.schema else None
config["schema"] = schema
return config
| MetaDataset |
python | pandas-dev__pandas | pandas/tests/extension/list/array.py | {
"start": 491,
"end": 793
} | class ____(ExtensionDtype):
type = list
name = "list"
na_value = np.nan
def construct_array_type(self) -> type_t[ListArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return ListArray
| ListDtype |
python | ray-project__ray | rllib/examples/algorithms/classes/maml_lr_meta_learner.py | {
"start": 411,
"end": 1357
} | class ____(TorchMetaLearner):
"""A `TorchMetaLearner` to perform MAML learning.
This `TorchMetaLearner`
- defines a MSE loss for learning simple (here non-linear) prediction.
"""
@override(TorchMetaLearner)
def compute_loss_for_module(
self,
*,
module_id: ModuleID,
config: "AlgorithmConfig",
batch: Dict[str, Any],
fwd_out: Dict[str, TensorType],
others_loss_per_module: List[Dict[ModuleID, TensorType]] = None,
) -> TensorType:
"""Defines a simple MSE prediction loss for continuous task.
Note, MAML does not need the losses from the registered differentiable
learners (contained in `others_loss_per_module`) b/c it computes a test
loss on an unseen data batch.
"""
# Use a simple MSE loss for the meta learning task.
return torch.nn.functional.mse_loss(fwd_out["y_pred"], batch["y"])
| MAMLTorchMetaLearner |
python | huggingface__transformers | examples/pytorch/language-modeling/run_clm.py | {
"start": 2482,
"end": 5723
} | class ____:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": (
"The model checkpoint for weights initialization. Don't set if you want to train a model from scratch."
)
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_overrides: Optional[str] = field(
default=None,
metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
token: str = field(
default=None,
metadata={
"help": (
"The token to use as HTTP bearer authorization for remote files. If not specified, will use the token "
"generated when running `hf auth login` (stored in `~/.huggingface`)."
)
},
)
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether to trust the execution of code from datasets/models defined on the Hub."
" This option should only be set to `True` for repositories you trust and in which you have read the"
" code, as it will execute code present on the Hub on your local machine."
)
},
)
dtype: Optional[str] = field(
default=None,
metadata={
"help": (
"Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the "
"dtype will be automatically derived from the model's weights."
),
"choices": ["auto", "bfloat16", "float16", "float32"],
},
)
def __post_init__(self):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path"
)
@dataclass
| ModelArguments |
python | lxml__lxml | src/lxml/tests/test_unicode.py | {
"start": 361,
"end": 5386
} | class ____(HelperTestCase):
def test__str(self):
# test the testing framework, namely _str from common_imports
self.assertEqual('\x10', '\u0010')
self.assertEqual('\x10', '\U00000010')
self.assertEqual('\u1234', '\U00001234')
def test_unicode_xml(self):
tree = etree.XML('<p>%s</p>' % uni)
self.assertEqual(uni, tree.text)
@needs_libxml(2, 9, 5) # not sure, at least 2.9.4 fails
def test_wide_unicode_xml(self):
if sys.maxunicode < 1114111:
return # skip test
tree = etree.XML('<p>\U00026007</p>')
self.assertEqual(1, len(tree.text))
self.assertEqual('\U00026007',
tree.text)
def test_emoji_xml(self):
p = etree.XML('<p>😄</p>')
self.assertEqual('😄', p.text)
self.assertEqual(1, len(p.text))
def test_emoji_html(self):
html = etree.HTML('<html><body><p>😄</p></body></html>')
p = html[0][0]
self.assertEqual('p', p.tag)
self.assertEqual('😄', p.text)
self.assertEqual(1, len(p.text))
def test_unicode_xml_broken(self):
uxml = ('<?xml version="1.0" encoding="UTF-8"?>' +
'<p>%s</p>' % uni)
self.assertRaises(ValueError, etree.XML, uxml)
def test_unicode_tag(self):
el = etree.Element(uni)
self.assertEqual(uni, el.tag)
def test_unicode_tag_invalid(self):
# sadly, Klingon is not well-formed
self.assertRaises(ValueError, etree.Element, invalid_tag)
def test_unicode_nstag(self):
tag = "{http://abc/}%s" % uni
el = etree.Element(tag)
self.assertEqual(tag, el.tag)
def test_unicode_ns_invalid(self):
# namespace URIs must conform to RFC 3986
tag = "{http://%s/}abc" % uni
self.assertRaises(ValueError, etree.Element, tag)
def test_unicode_nstag_invalid(self):
# sadly, Klingon is not well-formed
tag = "{http://abc/}%s" % invalid_tag
self.assertRaises(ValueError, etree.Element, tag)
def test_unicode_qname(self):
qname = etree.QName(uni, uni)
tag = "{%s}%s" % (uni, uni)
self.assertEqual(qname.text, tag)
self.assertEqual(str(qname), tag)
def test_unicode_qname_invalid(self):
self.assertRaises(ValueError, etree.QName, invalid_tag)
def test_unicode_attr(self):
el = etree.Element('foo', {'bar': uni})
self.assertEqual(uni, el.attrib['bar'])
def test_unicode_comment(self):
el = etree.Comment(uni)
self.assertEqual(uni, el.text)
def test_unicode_repr1(self):
x = etree.Element('å')
# must not raise UnicodeEncodeError
repr(x)
def test_unicode_repr2(self):
x = etree.Comment('ö')
repr(x)
def test_unicode_repr3(self):
x = etree.ProcessingInstruction('Å', '\u0131')
repr(x)
def test_unicode_repr4(self):
x = etree.Entity('ä')
repr(x)
def test_unicode_text(self):
e = etree.Element('e')
def settext(text):
e.text = text
self.assertRaises(ValueError, settext, 'ab\ufffe')
self.assertRaises(ValueError, settext, 'ö\ffff')
self.assertRaises(ValueError, settext, '\u0123\ud800')
self.assertRaises(ValueError, settext, 'x\ud8ff')
self.assertRaises(ValueError, settext, '\U00010000\udfff')
self.assertRaises(ValueError, settext, 'abd\x00def')
# should not Raise
settext('\ud7ff\ue000\U00010000\U0010FFFFäöas')
for char_val in range(0xD800, 0xDFFF+1):
self.assertRaises(ValueError, settext, 'abc' + chr(char_val))
self.assertRaises(ValueError, settext, chr(char_val))
self.assertRaises(ValueError, settext, chr(char_val) + 'abc')
self.assertRaises(ValueError, settext, b'\xe4')
self.assertRaises(ValueError, settext, b'\x80')
self.assertRaises(ValueError, settext, b'\xff')
self.assertRaises(ValueError, settext, b'\x08')
self.assertRaises(ValueError, settext, b'\x19')
self.assertRaises(ValueError, settext, b'\x20\x00')
# should not Raise
settext(b'\x09\x0A\x0D\x20\x60\x7f')
def test_uniname(self):
Element = etree.Element
def el(name):
return Element(name)
self.assertRaises(ValueError, el, ':')
self.assertRaises(ValueError, el, '0a')
self.assertRaises(ValueError, el, '\u203f')
# should not Raise
el('\u0132')
def test_unicode_parse_stringio(self):
el = etree.parse(StringIO('<p>%s</p>' % uni)).getroot()
self.assertEqual(uni, el.text)
## def test_parse_fileobject_unicode(self):
## # parse unicode from unnamed file object (not supported by ElementTree)
## f = SillyFileLike(uxml)
## root = etree.parse(f).getroot()
## self.assertEqual(etree.tostring(root, 'UTF-8').decode('utf-8'),
## uxml)
| UnicodeTestCase |
python | kamyu104__LeetCode-Solutions | Python/maximum-increasing-triplet-value.py | {
"start": 101,
"end": 758
} | class ____(object):
def maximumTripletValue(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
left = SortedList()
right = [0]*len(nums)
for i in reversed(xrange(1, len(nums)-1)):
right[i] = max(right[i+1], nums[i+1])
result = 0
for i in xrange(1, len(nums)-1):
left.add(nums[i-1])
j = left.bisect_left(nums[i])
if j-1 >= 0 and right[i] > nums[i]:
result = max(result, left[j-1]-nums[i]+right[i])
return result
# Time: O(nlogn)
# Space: O(n)
from sortedcontainers import SortedList
# sorted list
| Solution |
python | ansible__ansible | lib/ansible/plugins/inventory/__init__.py | {
"start": 5885,
"end": 12588
} | class ____(AnsiblePlugin):
"""
Internal base implementation for inventory plugins.
Do not inherit from this directly, use one of its public subclasses instead.
Used to introduce an extra layer in the class hierarchy to allow Constructed to subclass this while remaining a mixin for existing inventory plugins.
"""
TYPE = 'generator'
# 3rd party plugins redefine this to
# use custom group name sanitization
# since constructed features enforce
# it by default.
_sanitize_group_name = staticmethod(to_safe_group_name)
def __init__(self) -> None:
super().__init__()
self._options = {}
self.display = display
# These attributes are set by the parse() method on this (base) class.
self.loader: DataLoader | None = None
self.inventory: InventoryData | None = None
self._vars: dict[str, t.Any] | None = None
trusted_by_default: bool = False
"""Inventory plugins that only source templates from trusted sources can set this True to have trust automatically applied to all templates."""
@functools.cached_property
def templar(self) -> _template.Templar:
return _template.Templar(loader=self.loader)
def parse(self, inventory: InventoryData, loader: DataLoader, path: str, cache: bool = True) -> None:
""" Populates inventory from the given data. Raises an error on any parse failure
:arg inventory: a copy of the previously accumulated inventory data,
to be updated with any new data this plugin provides.
The inventory can be empty if no other source/plugin ran successfully.
:arg loader: a reference to the DataLoader, which can read in YAML and JSON files,
it also has Vault support to automatically decrypt files.
:arg path: the string that represents the 'inventory source',
normally a path to a configuration file for this inventory,
but it can also be a raw string for this plugin to consume
:arg cache: a boolean that indicates if the plugin should use the cache or not
you can ignore if this plugin does not implement caching.
"""
self.loader = loader
self.inventory = inventory
self._vars = load_extra_vars(loader)
def verify_file(self, path):
""" Verify if file is usable by this plugin, base does minimal accessibility check
:arg path: a string that was passed as an inventory source,
it normally is a path to a config file, but this is not a requirement,
it can also be parsed itself as the inventory data to process.
So only call this base class if you expect it to be a file.
"""
valid = False
b_path = to_bytes(path, errors='surrogate_or_strict')
if (os.path.exists(b_path) and os.access(b_path, os.R_OK)):
valid = True
else:
self.display.vvv('Skipping due to inventory source not existing or not being readable by the current user')
return valid
def _populate_host_vars(self, hosts, variables, group=None, port=None):
if not isinstance(variables, Mapping):
raise AnsibleParserError("Invalid data from file, expected dictionary and got:\n\n%s" % to_native(variables))
for host in hosts:
self.inventory.add_host(host, group=group, port=port)
for k in variables:
self.inventory.set_variable(host, k, variables[k])
def _read_config_data(self, path):
""" validate config and set options as appropriate
:arg path: path to common yaml format config file for this plugin
"""
try:
# avoid loader cache so meta: refresh_inventory can pick up config changes
# if we read more than once, fs cache should be good enough
config = self.loader.load_from_file(path, cache='none', trusted_as_template=True)
except Exception as e:
raise AnsibleParserError(to_native(e))
# a plugin can be loaded via many different names with redirection- if so, we want to accept any of those names
valid_names = getattr(self, '_redirected_names') or [self.NAME]
if not config:
# no data
raise AnsibleParserError("%s is empty" % (to_native(path)))
elif config.get('plugin') not in valid_names:
# this is not my config file
raise AnsibleParserError("Incorrect plugin name in file: %s" % config.get('plugin', 'none found'))
elif not isinstance(config, Mapping):
# configs are dictionaries
raise AnsibleParserError('inventory source has invalid structure, it should be a dictionary, got: %s' % type(config))
self.set_options(direct=config, var_options=self._vars)
if 'cache' in self._options and self.get_option('cache'):
cache_option_keys = [('_uri', 'cache_connection'), ('_timeout', 'cache_timeout'), ('_prefix', 'cache_prefix')]
cache_options = dict((opt[0], self.get_option(opt[1])) for opt in cache_option_keys if self.get_option(opt[1]) is not None)
self._cache = get_cache_plugin(self.get_option('cache_plugin'), **cache_options)
return config
def _consume_options(self, data):
""" update existing options from alternate configuration sources not normally used by Ansible.
Many API libraries already have existing configuration sources, this allows plugin author to leverage them.
:arg data: key/value pairs that correspond to configuration options for this plugin
"""
for k in self._options:
if k in data:
self._options[k] = data.pop(k)
def _expand_hostpattern(self, hostpattern):
"""
Takes a single host pattern and returns a list of hostnames and an
optional port number that applies to all of them.
"""
# Can the given hostpattern be parsed as a host with an optional port
# specification?
try:
(pattern, port) = parse_address(hostpattern, allow_ranges=True)
except Exception:
# not a recognizable host pattern
pattern = hostpattern
port = None
# Once we have separated the pattern, we expand it into list of one or
# more hostnames, depending on whether it contains any [x:y] ranges.
if detect_range(pattern):
hostnames = expand_hostname_range(pattern)
else:
hostnames = [pattern]
return (hostnames, port)
| _BaseInventoryPlugin |
python | django__django | tests/backends/base/test_base.py | {
"start": 17875,
"end": 18865
} | class ____(TestCase):
databases = {"default", "other"}
def test_multi_database_init_connection_state_called_once(self):
for db in self.databases:
with self.subTest(database=db):
with patch.object(connections[db], "commit", return_value=None):
with patch.object(
connections[db],
"check_database_version_supported",
) as mocked_check_database_version_supported:
connections[db].init_connection_state()
after_first_calls = len(
mocked_check_database_version_supported.mock_calls
)
connections[db].init_connection_state()
self.assertEqual(
len(mocked_check_database_version_supported.mock_calls),
after_first_calls,
)
| MultiDatabaseTests |
python | joblib__joblib | joblib/_parallel_backends.py | {
"start": 13237,
"end": 18068
} | class ____(object):
"""A helper class for automagically batching jobs."""
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = 0.2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
# Batching counters default values
_DEFAULT_EFFECTIVE_BATCH_SIZE = 1
_DEFAULT_SMOOTHED_BATCH_DURATION = 0.0
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._effective_batch_size = self._DEFAULT_EFFECTIVE_BATCH_SIZE
self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION
def compute_batch_size(self):
"""Determine the optimal batch size"""
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if batch_duration > 0 and batch_duration < self.MIN_IDEAL_BATCH_DURATION:
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * self.MIN_IDEAL_BATCH_DURATION / batch_duration
)
# Multiply by two to limit oscilations between min and max.
ideal_batch_size *= 2
# dont increase the batch size too fast to limit huge batch sizes
# potentially leading to starving worker
batch_size = min(2 * old_batch_size, ideal_batch_size)
batch_size = max(batch_size, 1)
self._effective_batch_size = batch_size
if self.parallel.verbose >= 10:
self.parallel._print(
f"Batch computation too fast ({batch_duration}s.) "
f"Setting batch_size={batch_size}."
)
elif batch_duration > self.MAX_IDEAL_BATCH_DURATION and old_batch_size >= 2:
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
# decrease the batch size quickly to limit potential starving
ideal_batch_size = int(
old_batch_size * self.MIN_IDEAL_BATCH_DURATION / batch_duration
)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.parallel.verbose >= 10:
self.parallel._print(
f"Batch computation too slow ({batch_duration}s.) "
f"Setting batch_size={batch_size}."
)
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION
return batch_size
def batch_completed(self, batch_size, duration):
"""Callback indicate how long it took to run a batch"""
if batch_size == self._effective_batch_size:
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self._smoothed_batch_duration
if old_duration == self._DEFAULT_SMOOTHED_BATCH_DURATION:
# First record of duration for this batch size after the last
# reset.
new_duration = duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * duration
self._smoothed_batch_duration = new_duration
def reset_batch_stats(self):
"""Reset batch statistics to default values.
This avoids interferences with future jobs.
"""
self._effective_batch_size = self._DEFAULT_EFFECTIVE_BATCH_SIZE
self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION
| AutoBatchingMixin |
python | PyCQA__pylint | tests/functional/m/mapping_context.py | {
"start": 1931,
"end": 2108
} | class ____:
def __init__(self):
self._obj = {}
def __getattr__(self, attr):
return getattr(self._obj, attr)
test(**HasDynamicGetattr())
| HasDynamicGetattr |
python | spack__spack | lib/spack/spack/util/spack_yaml.py | {
"start": 1523,
"end": 3649
} | class ____(dict):
"""A dictionary that preserves YAML line information."""
__slots__ = ("line_info",)
def __init__(self, *args, line_info: str = "", **kwargs):
super().__init__(*args, **kwargs)
self.line_info = line_info
def _represent_dict_with_line_info(dumper, data):
return dumper.represent_dict(data)
def deepcopy_as_builtin(obj: Any, *, line_info: bool = False) -> Any:
"""Deep copies a YAML object as built-in types (dict, list, str, int, ...).
Args:
obj: object to be copied
line_info: if ``True``, add line information to the copied object
"""
if isinstance(obj, str):
return str(obj)
elif isinstance(obj, dict):
result = DictWithLineInfo()
result.update(
{
deepcopy_as_builtin(k): deepcopy_as_builtin(v, line_info=line_info)
for k, v in obj.items()
}
)
if line_info:
result.line_info = _line_info(obj)
return result
elif isinstance(obj, list):
return [deepcopy_as_builtin(x, line_info=line_info) for x in obj]
elif isinstance(obj, bool):
return bool(obj)
elif isinstance(obj, int):
return int(obj)
elif isinstance(obj, float):
return float(obj)
elif obj is None:
return obj
raise ValueError(f"cannot convert {type(obj)} to built-in type")
def markable(obj):
"""Whether an object can be marked."""
return type(obj) in markable_types
def mark(obj, node):
"""Add start and end markers to an object."""
if hasattr(node, "start_mark"):
obj._start_mark = node.start_mark
elif hasattr(node, "_start_mark"):
obj._start_mark = node._start_mark
if hasattr(node, "end_mark"):
obj._end_mark = node.end_mark
elif hasattr(node, "_end_mark"):
obj._end_mark = node._end_mark
def marked(obj):
"""Whether an object has been marked by spack_yaml."""
return (
hasattr(obj, "_start_mark")
and obj._start_mark
or hasattr(obj, "_end_mark")
and obj._end_mark
)
| DictWithLineInfo |
python | spack__spack | lib/spack/spack/vendor/macholib/mach_o.py | {
"start": 36846,
"end": 36993
} | class ____(Structure):
_fields_ = (("count", p_uint32),)
def describe(self):
return {"count": int(self.count)}
| linker_option_command |
python | pytorch__pytorch | test/test_as_strided.py | {
"start": 3088,
"end": 6030
} | class ____(TestCase):
def test_size_10_exhaustive(self) -> None:
"""Test that size 10 produces exactly the expected 54 states."""
expected_states = {
((2,), (1,)),
((2,), (2,)),
((2,), (3,)),
((2,), (4,)),
((2,), (5,)),
((2,), (6,)),
((2,), (7,)),
((2,), (8,)),
((2,), (9,)),
((2, 2), (2, 1)),
((2, 2), (3, 1)),
((2, 2), (3, 2)),
((2, 2), (4, 1)),
((2, 2), (4, 2)),
((2, 2), (4, 3)),
((2, 2), (5, 1)),
((2, 2), (5, 2)),
((2, 2), (5, 3)),
((2, 2), (5, 4)),
((2, 2), (6, 1)),
((2, 2), (6, 2)),
((2, 2), (6, 3)),
((2, 2), (8, 1)),
((2, 2, 2), (4, 2, 1)),
((2, 2, 2), (5, 2, 1)),
((2, 3), (3, 1)),
((2, 3), (4, 1)),
((2, 3), (5, 1)),
((2, 3), (5, 2)),
((2, 3), (6, 1)),
((2, 4), (4, 1)),
((2, 4), (5, 1)),
((2, 5), (5, 1)),
((3,), (1,)),
((3,), (2,)),
((3,), (3,)),
((3,), (4,)),
((3, 2), (2, 1)),
((3, 2), (3, 1)),
((3, 2), (3, 2)),
((3, 2), (4, 1)),
((3, 3), (3, 1)),
((4,), (1,)),
((4,), (2,)),
((4,), (3,)),
((4, 2), (2, 1)),
((5,), (1,)),
((5,), (2,)),
((5, 2), (2, 1)),
((6,), (1,)),
((7,), (1,)),
((8,), (1,)),
((9,), (1,)),
((10,), (1,)),
}
actual_states = enumerate_reachable_states(10)
self.assertEqual(len(actual_states), 54)
self.assertEqual(actual_states, expected_states)
def test_subset_property(self) -> None:
"""
Test that for sizes 2..10, each smaller tensor results in a strict
subset of possible states compared to the next one.
"""
prev_states: Optional[set[tuple[tuple[int, ...], tuple[int, ...]]]] = None
for size in range(2, 11):
current_states = enumerate_reachable_states(size)
if prev_states is not None:
# Check that prev_states is a strict subset of current_states
self.assertTrue(
prev_states.issubset(current_states),
f"States from size {size - 1} are not a subset of size {size}",
)
# Check that it's a strict subset (not equal)
self.assertTrue(
len(prev_states) < len(current_states),
f"States from size {size - 1} should be strictly fewer than size {size}",
)
prev_states = current_states
if __name__ == "__main__":
run_tests()
| TestAsStrided |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/schema.py | {
"start": 176887,
"end": 188276
} | class ____(ColumnCollectionConstraint):
"""A table-level FOREIGN KEY constraint.
Defines a single column or composite FOREIGN KEY ... REFERENCES
constraint. For a no-frills, single column foreign key, adding a
:class:`_schema.ForeignKey` to the definition of a :class:`_schema.Column`
is a
shorthand equivalent for an unnamed, single column
:class:`_schema.ForeignKeyConstraint`.
Examples of foreign key configuration are in :ref:`metadata_foreignkeys`.
"""
__visit_name__ = "foreign_key_constraint"
def __init__(
self,
columns: _typing_Sequence[_DDLColumnArgument],
refcolumns: _typing_Sequence[_DDLColumnReferenceArgument],
name: _ConstraintNameArgument = None,
onupdate: Optional[str] = None,
ondelete: Optional[str] = None,
deferrable: Optional[bool] = None,
initially: Optional[str] = None,
use_alter: bool = False,
link_to_name: bool = False,
match: Optional[str] = None,
table: Optional[Table] = None,
info: Optional[_InfoType] = None,
comment: Optional[str] = None,
**dialect_kw: Any,
) -> None:
r"""Construct a composite-capable FOREIGN KEY.
:param columns: A sequence of local column names. The named columns
must be defined and present in the parent Table. The names should
match the ``key`` given to each column (defaults to the name) unless
``link_to_name`` is True.
:param refcolumns: A sequence of foreign column names or Column
objects. The columns must all be located within the same Table.
:param name: Optional, the in-database name of the key.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
.. seealso::
:ref:`on_update_on_delete`
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
SET NULL and RESTRICT. Some dialects may allow for additional
syntaxes.
.. seealso::
:ref:`on_update_on_delete`
:param deferrable: Optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param initially: Optional string. If set, emit INITIALLY <value> when
issuing DDL for this constraint.
:param link_to_name: if True, the string name given in ``column`` is
the rendered name of the referenced column, not its locally assigned
``key``.
:param use_alter: If True, do not emit the DDL for this constraint as
part of the CREATE TABLE definition. Instead, generate it via an
ALTER TABLE statement issued after the full collection of tables
have been created, and drop it via an ALTER TABLE statement before
the full collection of tables are dropped.
The use of :paramref:`_schema.ForeignKeyConstraint.use_alter` is
particularly geared towards the case where two or more tables
are established within a mutually-dependent foreign key constraint
relationship; however, the :meth:`_schema.MetaData.create_all` and
:meth:`_schema.MetaData.drop_all`
methods will perform this resolution
automatically, so the flag is normally not needed.
.. seealso::
:ref:`use_alter`
:param match: Optional string. If set, emit MATCH <value> when issuing
DDL for this constraint. Typical values include SIMPLE, PARTIAL
and FULL.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
:param comment: Optional string that will render an SQL comment on
foreign key constraint creation.
.. versionadded:: 2.0
:param \**dialect_kw: Additional keyword arguments are dialect
specific, and passed in the form ``<dialectname>_<argname>``. See
the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
"""
Constraint.__init__(
self,
name=name,
deferrable=deferrable,
initially=initially,
info=info,
comment=comment,
**dialect_kw,
)
self.onupdate = onupdate
self.ondelete = ondelete
self.link_to_name = link_to_name
self.use_alter = use_alter
self.match = match
if len(set(columns)) != len(refcolumns):
if len(set(columns)) != len(columns):
# e.g. FOREIGN KEY (a, a) REFERENCES r (b, c)
raise exc.ArgumentError(
"ForeignKeyConstraint with duplicate source column "
"references are not supported."
)
else:
# e.g. FOREIGN KEY (a) REFERENCES r (b, c)
# paraphrasing
# https://www.postgresql.org/docs/current/static/ddl-constraints.html
raise exc.ArgumentError(
"ForeignKeyConstraint number "
"of constrained columns must match the number of "
"referenced columns."
)
# standalone ForeignKeyConstraint - create
# associated ForeignKey objects which will be applied to hosted
# Column objects (in col.foreign_keys), either now or when attached
# to the Table for string-specified names
self.elements = [
ForeignKey(
refcol,
_constraint=self,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
use_alter=self.use_alter,
link_to_name=self.link_to_name,
match=self.match,
deferrable=self.deferrable,
initially=self.initially,
**self.dialect_kwargs,
)
for refcol in refcolumns
]
ColumnCollectionMixin.__init__(self, *columns)
if table is not None:
if hasattr(self, "parent"):
assert table is self.parent
self._set_parent_with_dispatch(table)
def _append_element(self, column: Column[Any], fk: ForeignKey) -> None:
self._columns.add(column)
self.elements.append(fk)
columns: ReadOnlyColumnCollection[str, Column[Any]]
"""A :class:`_expression.ColumnCollection` representing the set of columns
for this constraint.
"""
elements: List[ForeignKey]
"""A sequence of :class:`_schema.ForeignKey` objects.
Each :class:`_schema.ForeignKey`
represents a single referring column/referred
column pair.
This collection is intended to be read-only.
"""
@property
def _elements(self) -> util.OrderedDict[str, ForeignKey]:
# legacy - provide a dictionary view of (column_key, fk)
return util.OrderedDict(zip(self.column_keys, self.elements))
@property
def _referred_schema(self) -> Optional[str]:
for elem in self.elements:
return elem._referred_schema
else:
return None
@property
def referred_table(self) -> Table:
"""The :class:`_schema.Table` object to which this
:class:`_schema.ForeignKeyConstraint` references.
This is a dynamically calculated attribute which may not be available
if the constraint and/or parent table is not yet associated with
a metadata collection that contains the referred table.
"""
return self.elements[0].column.table
def _validate_dest_table(self, table: Table) -> None:
table_keys = {
elem._table_key_within_construction() for elem in self.elements
}
if None not in table_keys and len(table_keys) > 1:
elem0, elem1 = sorted(table_keys)[0:2]
raise exc.ArgumentError(
f"ForeignKeyConstraint on "
f"{table.fullname}({self._col_description}) refers to "
f"multiple remote tables: {elem0} and {elem1}"
)
@property
def column_keys(self) -> _typing_Sequence[str]:
"""Return a list of string keys representing the local
columns in this :class:`_schema.ForeignKeyConstraint`.
This list is either the original string arguments sent
to the constructor of the :class:`_schema.ForeignKeyConstraint`,
or if the constraint has been initialized with :class:`_schema.Column`
objects, is the string ``.key`` of each element.
"""
if hasattr(self, "parent"):
return self._columns.keys()
else:
return [
col.key if isinstance(col, ColumnElement) else str(col)
for col in self._pending_colargs
]
@property
def _col_description(self) -> str:
return ", ".join(self.column_keys)
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
table = parent
assert isinstance(table, Table)
Constraint._set_parent(self, table)
ColumnCollectionConstraint._set_parent(self, table)
for col, fk in zip(self._columns, self.elements):
if not hasattr(fk, "parent") or fk.parent is not col:
fk._set_parent_with_dispatch(col)
self._validate_dest_table(table)
@util.deprecated(
"1.4",
"The :meth:`_schema.ForeignKeyConstraint.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(
self,
*,
schema: Optional[str] = None,
target_table: Optional[Table] = None,
**kw: Any,
) -> ForeignKeyConstraint:
return self._copy(schema=schema, target_table=target_table, **kw)
def _copy(
self,
*,
schema: Optional[str] = None,
target_table: Optional[Table] = None,
**kw: Any,
) -> ForeignKeyConstraint:
fkc = ForeignKeyConstraint(
[x.parent.key for x in self.elements],
[
x._get_colspec(
schema=schema,
table_name=(
target_table.name
if target_table is not None
and x._table_key_within_construction()
== x.parent.table.key
else None
),
_is_copy=True,
)
for x in self.elements
],
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
use_alter=self.use_alter,
deferrable=self.deferrable,
initially=self.initially,
link_to_name=self.link_to_name,
match=self.match,
comment=self.comment,
)
for self_fk, other_fk in zip(self.elements, fkc.elements):
self_fk._schema_item_copy(other_fk)
return self._schema_item_copy(fkc)
| ForeignKeyConstraint |
python | spyder-ide__spyder | spyder/api/widgets/mixins.py | {
"start": 26049,
"end": 30317
} | class ____(SpyderConfigurationAccessor):
"""
Mixin to transform an SVG to a QPixmap that is scaled according to the
factor set by users in Preferences.
"""
def svg_to_scaled_pixmap(self, svg_file, rescale=None, in_package=True):
"""
Transform svg to a QPixmap that is scaled according to the factor set
by users in Preferences. Uses the icon manager for proper colorization.
Parameters
----------
svg_file: str
Name of or path to the svg file.
rescale: float, optional
Rescale pixmap according to a factor between 0 and 1.
in_package: bool, optional
Get svg from the `images` folder in the Spyder package.
"""
if in_package:
image_path = get_image_path(svg_file)
# Get user's DPI scale factor
if self.get_conf('high_dpi_custom_scale_factor', section='main'):
scale_factors = self.get_conf(
'high_dpi_custom_scale_factors',
section='main'
)
scale_factor = float(scale_factors.split(":")[0])
else:
scale_factor = 1
# Check if the SVG has colorization classes before colorization
should_colorize = False
try:
svg_paths_data = SVGColorize.get_colored_paths(
image_path, ima.ICON_COLORS
)
if svg_paths_data and svg_paths_data.get('paths'):
# Check if any of the paths have colorization classes
# (not just default colors)
paths = svg_paths_data.get('paths', [])
for path in paths:
# If a path has a color that's not the default color,
# it means it has a colorization class
default_color = ima.ICON_COLORS.get(
'ICON_1', '#FF0000' # Get default color from palette
)
if path.get('color') != default_color:
should_colorize = True
break
except Exception:
should_colorize = False
# Try to use the icon manager for colorization only if SVG supports it
if should_colorize:
icon = ima.get_icon(svg_file)
if icon and not icon.isNull():
# Get the original SVG dimensions
pm = QPixmap(image_path)
width = pm.width()
height = pm.height()
# Apply rescale factor
if rescale is not None:
aspect_ratio = width / height
width = int(width * rescale)
height = int(width / aspect_ratio)
# Get a properly scaled pixmap from the icon
# Use the maximum dimension to maintain aspect ratio
max_dimension = max(
int(width * scale_factor),
int(height * scale_factor)
)
return icon.pixmap(max_dimension, max_dimension)
# Fallback to original method for icons without colorization classes.
# Get width and height
pm = QPixmap(image_path)
width = pm.width()
height = pm.height()
# Rescale but preserving aspect ratio
if rescale is not None:
aspect_ratio = width / height
width = int(width * rescale)
height = int(width / aspect_ratio)
# Paint image using svg renderer
image = QImage(
int(width * scale_factor), int(height * scale_factor),
QImage.Format_ARGB32_Premultiplied
)
image.fill(0)
painter = QPainter(image)
renderer = QSvgRenderer(image_path)
renderer.render(painter)
painter.end()
# This is also necessary to make the image look good for different
# scale factors
if scale_factor > 1.0:
image.setDevicePixelRatio(scale_factor)
# Create pixmap out of image
final_pm = QPixmap.fromImage(image)
final_pm = final_pm.copy(
0, 0, int(width * scale_factor), int(height * scale_factor)
)
return final_pm
| SvgToScaledPixmap |
python | huggingface__transformers | src/transformers/models/bert_generation/modeling_bert_generation.py | {
"start": 19118,
"end": 19990
} | class ____(PreTrainedModel):
config_class = BertGenerationConfig
base_model_prefix = "bert"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": BertGenerationLayer,
"attentions": BertGenerationSelfAttention,
"cross_attentions": BertGenerationCrossAttention,
}
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, BertGenerationOnlyLMHead):
init.zeros_(module.bias)
@auto_docstring(
custom_intro="""
The bare BertGeneration model transformer outputting raw hidden-states without any specific head on top.
"""
)
| BertGenerationPreTrainedModel |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_int.py | {
"start": 26701,
"end": 34932
} | class ____(__TestCase):
int_class = int # Override this in subclasses to reuse the suite.
def setUp(self):
super().setUp()
self._previous_limit = sys.get_int_max_str_digits()
sys.set_int_max_str_digits(2048)
def tearDown(self):
sys.set_int_max_str_digits(self._previous_limit)
super().tearDown()
def test_disabled_limit(self):
self.assertGreater(sys.get_int_max_str_digits(), 0)
self.assertLess(sys.get_int_max_str_digits(), 20_000)
with support.adjust_int_max_str_digits(0):
self.assertEqual(sys.get_int_max_str_digits(), 0)
i = self.int_class('1' * 20_000)
str(i)
self.assertGreater(sys.get_int_max_str_digits(), 0)
def test_max_str_digits_edge_cases(self):
"""Ignore the +/- sign and space padding."""
int_class = self.int_class
maxdigits = sys.get_int_max_str_digits()
int_class('1' * maxdigits)
int_class(' ' + '1' * maxdigits)
int_class('1' * maxdigits + ' ')
int_class('+' + '1' * maxdigits)
int_class('-' + '1' * maxdigits)
self.assertEqual(len(str(10 ** (maxdigits - 1))), maxdigits)
def check(self, i, base=None):
with self.assertRaises(ValueError):
if base is None:
self.int_class(i)
else:
self.int_class(i, base)
def test_max_str_digits(self):
maxdigits = sys.get_int_max_str_digits()
self.check('1' * (maxdigits + 1))
self.check(' ' + '1' * (maxdigits + 1))
self.check('1' * (maxdigits + 1) + ' ')
self.check('+' + '1' * (maxdigits + 1))
self.check('-' + '1' * (maxdigits + 1))
self.check('1' * (maxdigits + 1))
i = 10 ** maxdigits
with self.assertRaises(ValueError):
str(i)
def test_denial_of_service_prevented_int_to_str(self):
"""Regression test: ensure we fail before performing O(N**2) work."""
maxdigits = sys.get_int_max_str_digits()
assert maxdigits < 50_000, maxdigits # A test prerequisite.
huge_int = int(f'0x{"c"*65_000}', base=16) # 78268 decimal digits.
digits = 78_268
with (
support.adjust_int_max_str_digits(digits),
support.CPUStopwatch() as sw_convert):
huge_decimal = str(huge_int)
self.assertEqual(len(huge_decimal), digits)
# Ensuring that we chose a slow enough conversion to measure.
# It takes 0.1 seconds on a Zen based cloud VM in an opt build.
# Some OSes have a low res 1/64s timer, skip if hard to measure.
if sw_convert.seconds < sw_convert.clock_info.resolution * 2:
raise unittest.SkipTest('"slow" conversion took only '
f'{sw_convert.seconds} seconds.')
# We test with the limit almost at the size needed to check performance.
# The performant limit check is slightly fuzzy, give it a some room.
with support.adjust_int_max_str_digits(int(.995 * digits)):
with (
self.assertRaises(ValueError) as err,
support.CPUStopwatch() as sw_fail_huge):
str(huge_int)
self.assertIn('conversion', str(err.exception))
self.assertLessEqual(sw_fail_huge.seconds, sw_convert.seconds/2)
# Now we test that a conversion that would take 30x as long also fails
# in a similarly fast fashion.
extra_huge_int = int(f'0x{"c"*500_000}', base=16) # 602060 digits.
with (
self.assertRaises(ValueError) as err,
support.CPUStopwatch() as sw_fail_extra_huge):
# If not limited, 8 seconds said Zen based cloud VM.
str(extra_huge_int)
self.assertIn('conversion', str(err.exception))
self.assertLess(sw_fail_extra_huge.seconds, sw_convert.seconds/2)
def test_denial_of_service_prevented_str_to_int(self):
"""Regression test: ensure we fail before performing O(N**2) work."""
maxdigits = sys.get_int_max_str_digits()
assert maxdigits < 100_000, maxdigits # A test prerequisite.
digits = 133700
huge = '8'*digits
with (
support.adjust_int_max_str_digits(digits),
support.CPUStopwatch() as sw_convert):
int(huge)
# Ensuring that we chose a slow enough conversion to measure.
# It takes 0.1 seconds on a Zen based cloud VM in an opt build.
# Some OSes have a low res 1/64s timer, skip if hard to measure.
if sw_convert.seconds < sw_convert.clock_info.resolution * 2:
raise unittest.SkipTest('"slow" conversion took only '
f'{sw_convert.seconds} seconds.')
with support.adjust_int_max_str_digits(digits - 1):
with (
self.assertRaises(ValueError) as err,
support.CPUStopwatch() as sw_fail_huge):
int(huge)
self.assertIn('conversion', str(err.exception))
self.assertLessEqual(sw_fail_huge.seconds, sw_convert.seconds/2)
# Now we test that a conversion that would take 30x as long also fails
# in a similarly fast fashion.
extra_huge = '7'*1_200_000
with (
self.assertRaises(ValueError) as err,
support.CPUStopwatch() as sw_fail_extra_huge):
# If not limited, 8 seconds in the Zen based cloud VM.
int(extra_huge)
self.assertIn('conversion', str(err.exception))
self.assertLessEqual(sw_fail_extra_huge.seconds, sw_convert.seconds/2)
def test_power_of_two_bases_unlimited(self):
"""The limit does not apply to power of 2 bases."""
maxdigits = sys.get_int_max_str_digits()
for base in (2, 4, 8, 16, 32):
with self.subTest(base=base):
self.int_class('1' * (maxdigits + 1), base)
assert maxdigits < 100_000
self.int_class('1' * 100_000, base)
def test_underscores_ignored(self):
maxdigits = sys.get_int_max_str_digits()
triples = maxdigits // 3
s = '111' * triples
s_ = '1_11' * triples
self.int_class(s) # succeeds
self.int_class(s_) # succeeds
self.check(f'{s}111')
self.check(f'{s_}_111')
def test_sign_not_counted(self):
int_class = self.int_class
max_digits = sys.get_int_max_str_digits()
s = '5' * max_digits
i = int_class(s)
pos_i = int_class(f'+{s}')
assert i == pos_i
neg_i = int_class(f'-{s}')
assert -pos_i == neg_i
str(pos_i)
str(neg_i)
def _other_base_helper(self, base):
int_class = self.int_class
max_digits = sys.get_int_max_str_digits()
s = '2' * max_digits
i = int_class(s, base)
if base > 10:
with self.assertRaises(ValueError):
str(i)
elif base < 10:
str(i)
with self.assertRaises(ValueError) as err:
int_class(f'{s}1', base)
def test_int_from_other_bases(self):
base = 3
with self.subTest(base=base):
self._other_base_helper(base)
base = 36
with self.subTest(base=base):
self._other_base_helper(base)
def test_int_max_str_digits_is_per_interpreter(self):
# Changing the limit in one interpreter does not change others.
code = """if 1:
# Subinterpreters maintain and enforce their own limit
import sys
sys.set_int_max_str_digits(2323)
try:
int('3'*3333)
except ValueError:
pass
else:
raise AssertionError('Expected a int max str digits ValueError.')
"""
with support.adjust_int_max_str_digits(4000):
before_value = sys.get_int_max_str_digits()
self.assertEqual(support.run_in_subinterp(code), 0,
'subinterp code failure, check stderr.')
after_value = sys.get_int_max_str_digits()
self.assertEqual(before_value, after_value)
| IntStrDigitLimitsTests |
python | apache__airflow | airflow-core/tests/unit/cli/commands/test_connection_command.py | {
"start": 2294,
"end": 3816
} | class ____:
parser = cli_parser.get_parser()
EXPECTED_CONS = [
("airflow_db", "mysql"),
("google_cloud_default", "google_cloud_platform"),
("http_default", "http"),
("local_mysql", "mysql"),
("mongo_default", "mongo"),
("mssql_default", "mssql"),
("mysql_default", "mysql"),
("pinot_broker_default", "pinot"),
("postgres_default", "postgres"),
("presto_default", "presto"),
("sqlite_default", "sqlite"),
("trino_default", "trino"),
("vertica_default", "vertica"),
]
def setup_method(self):
clear_db_connections(add_default_connections_back=True)
def test_cli_connections_list_as_json(self):
args = self.parser.parse_args(["connections", "list", "--output", "json"])
with redirect_stdout(StringIO()) as stdout_io:
connection_command.connections_list(args)
print(stdout_io.getvalue())
stdout = stdout_io.getvalue()
for conn_id, conn_type in self.EXPECTED_CONS:
assert conn_type in stdout
assert conn_id in stdout
def test_cli_connections_filter_conn_id(self):
args = self.parser.parse_args(
["connections", "list", "--output", "json", "--conn-id", "http_default"]
)
with redirect_stdout(StringIO()) as capture:
connection_command.connections_list(args)
stdout = capture.getvalue()
assert "http_default" in stdout
| TestCliListConnections |
python | walkccc__LeetCode | solutions/1925. Count Square Sum Triples/1925.py | {
"start": 0,
"end": 256
} | class ____:
def countTriples(self, n: int) -> int:
ans = 0
squared = set()
for i in range(1, n + 1):
squared.add(i * i)
for a in squared:
for b in squared:
if a + b in squared:
ans += 1
return ans
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/util/compat.py | {
"start": 1443,
"end": 7949
} | class ____(typing.NamedTuple):
args: List[str]
varargs: Optional[str]
varkw: Optional[str]
defaults: Optional[Tuple[Any, ...]]
kwonlyargs: List[str]
kwonlydefaults: Optional[Dict[str, Any]]
annotations: Dict[str, Any]
def inspect_getfullargspec(func: Callable[..., Any]) -> FullArgSpec:
"""Fully vendored version of getfullargspec from Python 3.3."""
if inspect.ismethod(func):
func = func.__func__
if not inspect.isfunction(func):
raise TypeError(f"{func!r} is not a Python function")
co = func.__code__
if not inspect.iscode(co):
raise TypeError(f"{co!r} is not a code object")
nargs = co.co_argcount
names = co.co_varnames
nkwargs = co.co_kwonlyargcount
args = list(names[:nargs])
kwonlyargs = list(names[nargs : nargs + nkwargs])
nargs += nkwargs
varargs = None
if co.co_flags & inspect.CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & inspect.CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return FullArgSpec(
args,
varargs,
varkw,
func.__defaults__,
kwonlyargs,
func.__kwdefaults__,
func.__annotations__,
)
# python stubs don't have a public type for this. not worth
# making a protocol
def md5_not_for_security() -> Any:
return hashlib.md5(usedforsecurity=False)
def importlib_metadata_get(group):
ep = importlib_metadata.entry_points()
if typing.TYPE_CHECKING or hasattr(ep, "select"):
return ep.select(group=group)
else:
return ep.get(group, ())
def b(s):
return s.encode("latin-1")
def b64decode(x: str) -> bytes:
return base64.b64decode(x.encode("ascii"))
def b64encode(x: bytes) -> str:
return base64.b64encode(x).decode("ascii")
def decode_backslashreplace(text: bytes, encoding: str) -> str:
return text.decode(encoding, errors="backslashreplace")
def cmp(a, b):
return (a > b) - (a < b)
def _formatannotation(annotation, base_module=None):
"""vendored from python 3.7"""
if isinstance(annotation, str):
return annotation
if getattr(annotation, "__module__", None) == "typing":
return repr(annotation).replace("typing.", "").replace("~", "")
if isinstance(annotation, type):
if annotation.__module__ in ("builtins", base_module):
return repr(annotation.__qualname__)
return annotation.__module__ + "." + annotation.__qualname__
elif isinstance(annotation, typing.TypeVar):
return repr(annotation).replace("~", "")
return repr(annotation).replace("~", "")
def inspect_formatargspec(
args: List[str],
varargs: Optional[str] = None,
varkw: Optional[str] = None,
defaults: Optional[Sequence[Any]] = None,
kwonlyargs: Optional[Sequence[str]] = (),
kwonlydefaults: Optional[Mapping[str, Any]] = {},
annotations: Mapping[str, Any] = {},
formatarg: Callable[[str], str] = str,
formatvarargs: Callable[[str], str] = lambda name: "*" + name,
formatvarkw: Callable[[str], str] = lambda name: "**" + name,
formatvalue: Callable[[Any], str] = lambda value: "=" + repr(value),
formatreturns: Callable[[Any], str] = lambda text: " -> " + str(text),
formatannotation: Callable[[Any], str] = _formatannotation,
) -> str:
"""Copy formatargspec from python 3.7 standard library.
Python 3 has deprecated formatargspec and requested that Signature
be used instead, however this requires a full reimplementation
of formatargspec() in terms of creating Parameter objects and such.
Instead of introducing all the object-creation overhead and having
to reinvent from scratch, just copy their compatibility routine.
Ultimately we would need to rewrite our "decorator" routine completely
which is not really worth it right now, until all Python 2.x support
is dropped.
"""
kwonlydefaults = kwonlydefaults or {}
annotations = annotations or {}
def formatargandannotation(arg):
result = formatarg(arg)
if arg in annotations:
result += ": " + formatannotation(annotations[arg])
return result
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
else:
firstdefault = -1
for i, arg in enumerate(args):
spec = formatargandannotation(arg)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(formatargandannotation(varargs)))
else:
if kwonlyargs:
specs.append("*")
if kwonlyargs:
for kwonlyarg in kwonlyargs:
spec = formatargandannotation(kwonlyarg)
if kwonlydefaults and kwonlyarg in kwonlydefaults:
spec += formatvalue(kwonlydefaults[kwonlyarg])
specs.append(spec)
if varkw is not None:
specs.append(formatvarkw(formatargandannotation(varkw)))
result = "(" + ", ".join(specs) + ")"
if "return" in annotations:
result += formatreturns(formatannotation(annotations["return"]))
return result
def dataclass_fields(cls: Type[Any]) -> Iterable[dataclasses.Field[Any]]:
"""Return a sequence of all dataclasses.Field objects associated
with a class as an already processed dataclass.
The class must **already be a dataclass** for Field objects to be returned.
"""
if dataclasses.is_dataclass(cls):
return dataclasses.fields(cls)
else:
return []
def local_dataclass_fields(cls: Type[Any]) -> Iterable[dataclasses.Field[Any]]:
"""Return a sequence of all dataclasses.Field objects associated with
an already processed dataclass, excluding those that originate from a
superclass.
The class must **already be a dataclass** for Field objects to be returned.
"""
if dataclasses.is_dataclass(cls):
super_fields: Set[dataclasses.Field[Any]] = set()
for sup in cls.__bases__:
super_fields.update(dataclass_fields(sup))
return [f for f in dataclasses.fields(cls) if f not in super_fields]
else:
return []
if freethreading:
import threading
mini_gil = threading.RLock()
"""provide a threading.RLock() under python freethreading only"""
else:
import contextlib
mini_gil = contextlib.nullcontext() # type: ignore[assignment]
| FullArgSpec |
python | django__django | tests/expressions/models.py | {
"start": 2985,
"end": 3119
} | class ____(models.Model):
uuid = models.UUIDField(null=True)
uuid_fk = models.ForeignKey(UUIDPK, models.CASCADE, null=True)
| UUID |
python | django__django | django/views/generic/dates.py | {
"start": 19903,
"end": 20079
} | class ____(MultipleObjectTemplateResponseMixin, BaseDayArchiveView):
"""List of objects published on a given day."""
template_name_suffix = "_archive_day"
| DayArchiveView |
python | encode__django-rest-framework | tests/test_routers.py | {
"start": 2187,
"end": 2709
} | class ____(viewsets.ViewSet):
@action(detail=False, url_path='list/(?P<kwarg>[0-9]{4})')
def regex_url_path_list(self, request, *args, **kwargs):
kwarg = self.kwargs.get('kwarg', '')
return Response({'kwarg': kwarg})
@action(detail=True, url_path='detail/(?P<kwarg>[0-9]{4})')
def regex_url_path_detail(self, request, *args, **kwargs):
pk = self.kwargs.get('pk', '')
kwarg = self.kwargs.get('kwarg', '')
return Response({'pk': pk, 'kwarg': kwarg})
| RegexUrlPathViewSet |
python | mlflow__mlflow | mlflow/types/llm.py | {
"start": 25905,
"end": 27514
} | class ____(_BaseDataclass):
"""
The full response object returned by the chat endpoint.
Args:
choices (List[:py:class:`ChatChoice`]): A list of :py:class:`ChatChoice` objects
containing the generated responses
usage (:py:class:`TokenUsageStats`): An object describing the tokens used by the request.
**Optional**, defaults to ``None``.
id (str): The ID of the response. **Optional**, defaults to ``None``
model (str): The name of the model used. **Optional**, defaults to ``None``
object (str): The object type. Defaults to 'chat.completion'
created (int): The time the response was created.
**Optional**, defaults to the current time.
custom_outputs (Dict[str, Any]): An field that can contain arbitrary additional context.
The dictionary values must be JSON-serializable.
**Optional**, defaults to ``None``
"""
choices: list[ChatChoice]
usage: TokenUsageStats | None = None
id: str | None = None
model: str | None = None
object: str = "chat.completion"
created: int = field(default_factory=lambda: int(time.time()))
custom_outputs: dict[str, Any] | None = None
def __post_init__(self):
self._validate_field("id", str, False)
self._validate_field("object", str, True)
self._validate_field("created", int, True)
self._validate_field("model", str, False)
self._convert_dataclass_list("choices", ChatChoice)
self._convert_dataclass("usage", TokenUsageStats, False)
@dataclass
| ChatCompletionResponse |
python | getsentry__sentry | src/sentry/integrations/slack/webhooks/action.py | {
"start": 6635,
"end": 29940
} | class ____(Endpoint):
owner = ApiOwner.ECOSYSTEM
publish_status = {
"POST": ApiPublishStatus.PRIVATE,
}
authentication_classes = ()
permission_classes = ()
slack_request_class = SlackActionRequest
def respond_ephemeral(self, text: str) -> Response:
return self.respond({"response_type": "ephemeral", "replace_original": False, "text": text})
def api_error(
self,
slack_request: SlackActionRequest,
group: Group,
user: RpcUser,
error: ApiClient.ApiError,
action_type: str,
) -> Response:
from sentry.integrations.slack.views.unlink_identity import build_unlinking_url
_logger.info(
"slack.action.api-error",
extra={
**slack_request.get_logging_data(group),
"response": str(error.body),
"action_type": action_type,
},
)
channel_id = None
response_url = None
# the channel ID and response URL are in a different place if it's coming from a modal
view = slack_request.data.get("view")
if view:
private_metadata = view.get("private_metadata")
if private_metadata:
data = orjson.loads(private_metadata)
channel_id = data.get("channel_id")
response_url = data.get("orig_response_url")
user_id = slack_request.user_id
channel = channel_id or slack_request.channel_id
resp_url = response_url or slack_request.response_url
if user_id is None or channel is None or resp_url is None:
text = DEFAULT_ERROR_MESSAGE
# keeping this separate from above since its a different condition
elif error.status_code != 403:
text = DEFAULT_ERROR_MESSAGE
else:
text = UNLINK_IDENTITY_MESSAGE.format(
associate_url=build_unlinking_url(
slack_request.integration.id,
slack_id=user_id,
channel_id=channel,
response_url=resp_url,
),
user_email=user.email,
org_name=group.organization.name,
)
return self.respond_ephemeral(text)
@staticmethod
def _unpack_error_text(validation_error: serializers.ValidationError) -> str:
detail = validation_error.detail
while True:
if isinstance(detail, dict):
detail = list(detail.values())
element = detail[0]
if isinstance(element, str):
return element
detail = element
def record_event(
self, interaction_type: MessagingInteractionType, group: Group, request: Request
) -> MessagingInteractionEvent:
user = request.user
return MessagingInteractionEvent(
interaction_type,
SlackMessagingSpec(),
user=(user if isinstance(user, User) else None),
organization=(group.project.organization if group else None),
)
def validation_error(
self,
slack_request: SlackActionRequest,
group: Group,
error: serializers.ValidationError,
action_type: str,
) -> Response:
_logger.info(
"slack.action.validation-error",
extra={
**slack_request.get_logging_data(group),
"response": str(error.detail),
"action_type": action_type,
},
)
text: str = self._unpack_error_text(error)
return self.respond_ephemeral(text)
def on_assign(
self,
request: Request,
user: RpcUser,
group: Group,
action: MessageAction | BlockKitMessageAction,
) -> None:
if not (action.selected_options and len(action.selected_options)):
# Short-circuit if action is invalid
return
assignee = action.selected_options[0]["value"]
if assignee == "none":
assignee = None
update_group(
group,
user,
{
"assignedTo": assignee,
"integration": ActivityIntegration.SLACK.value,
},
request,
)
analytics.record(SlackIntegrationAssign(actor_id=user.id))
def on_status(
self,
request: Request,
user: RpcUser,
group: Group,
action: MessageAction | BlockKitMessageAction,
) -> None:
status_data = (action.value or "").split(":", 2)
if not len(status_data):
return
status: MutableMapping[str, Any] = {
"status": status_data[0],
}
# sub-status only applies to ignored/archived issues
if len(status_data) > 1 and status_data[0] == "ignored":
status["substatus"] = status_data[1]
if status["substatus"] == "archived_until_condition_met":
status.update({"statusDetails": {"ignoreCount": int(status_data[2])}})
resolve_type = status_data[-1]
if resolve_type == "inNextRelease":
status.update({"statusDetails": {"inNextRelease": True}})
elif resolve_type == "inCurrentRelease":
status.update({"statusDetails": {"inRelease": "latest"}})
update_group(group, user, status, request)
analytics.record(
SlackIntegrationStatus(
organization_id=group.project.organization.id,
status=status["status"],
resolve_type=resolve_type,
user_id=user.id,
)
)
def _handle_group_actions(
self,
slack_request: SlackActionRequest,
request: Request,
action_list: Sequence[BlockKitMessageAction],
) -> Response:
from sentry.integrations.slack.views.link_identity import build_linking_url
group = get_group(slack_request)
if not group:
return self.respond(status=403)
rule = get_rule(slack_request, group.organization, group.type)
identity = slack_request.get_identity()
# Determine the acting user by Slack identity.
identity_user = slack_request.get_identity_user()
if not identity or not identity_user:
# if we don't have user_id or channel_id, we can't link the identity
if not slack_request.user_id or not slack_request.channel_id:
return self.respond_ephemeral(NO_IDENTITY_MESSAGE)
associate_url = build_linking_url(
integration=slack_request.integration,
slack_id=slack_request.user_id,
channel_id=slack_request.channel_id,
response_url=slack_request.response_url,
)
return self.respond_ephemeral(LINK_IDENTITY_MESSAGE.format(associate_url=associate_url))
original_tags_from_request = slack_request.get_tags()
if slack_request.type == "view_submission":
# TODO: if we use modals for something other than resolve and archive, this will need to be more specific
with self.record_event(
MessagingInteractionType.VIEW_SUBMISSION, group, request
).capture() as lifecycle:
lifecycle.add_extras(
{
"integration_id": slack_request.integration.id,
"organization_id": group.project.organization_id,
}
)
# Masquerade a status action
selection = None
view = slack_request.data.get("view")
if view:
state = view.get("state")
if state:
values = state.get("values")
if values:
for value in values:
for val in values[value]:
selection = values[value][val]["selected_option"]["value"]
if selection:
break
if not selection:
lifecycle.record_failure(MessageInteractionFailureReason.MISSING_ACTION)
return self.respond()
lifecycle.add_extra("selection", selection)
status_action = MessageAction(name="status", value=selection)
try:
self.on_status(request, identity_user, group, status_action)
except client.ApiError as error:
lifecycle.record_failure(error)
return self.api_error(
slack_request, group, identity_user, error, "status_dialog"
)
view = View(**slack_request.data["view"])
assert view.private_metadata is not None
private_metadata = orjson.loads(view.private_metadata)
original_tags_from_request = set(private_metadata.get("tags", {}))
blocks = SlackIssuesMessageBuilder(
group,
identity=identity,
actions=[status_action],
tags=original_tags_from_request,
rules=[rule] if rule else None,
issue_details=True,
skip_fallback=True,
).build()
# use the original response_url to update the link attachment
try:
webhook_client = WebhookClient(private_metadata["orig_response_url"])
webhook_client.send(
blocks=blocks.get("blocks"), delete_original=False, replace_original=True
)
except SlackApiError as e:
lifecycle.record_failure(e)
return self.respond()
# Usually we'll want to respond with the updated attachment including
# the list of actions taken. However, when opening a dialog we do not
# have anything to update the message with and will use the
# response_url later to update it.
defer_attachment_update = False
# Handle interaction actions
for action in action_list:
try:
if action.name in ("status", "unresolved:ongoing"):
with self.record_event(
MessagingInteractionType.STATUS, group, request
).capture():
self.on_status(request, identity_user, group, action)
elif (
action.name == "assign"
): # TODO: remove this as it is replaced by the options-load endpoint
with self.record_event(
MessagingInteractionType.ASSIGN, group, request
).capture():
self.on_assign(request, identity_user, group, action)
elif action.name == "resolve_dialog":
with self.record_event(
MessagingInteractionType.RESOLVE_DIALOG, group, request
).capture():
_ResolveDialog().open_dialog(slack_request, group)
defer_attachment_update = True
elif action.name == "archive_dialog":
with self.record_event(
MessagingInteractionType.ARCHIVE_DIALOG, group, request
).capture():
_ArchiveDialog().open_dialog(slack_request, group)
defer_attachment_update = True
except client.ApiError as error:
return self.api_error(slack_request, group, identity_user, error, action.name)
except serializers.ValidationError as error:
return self.validation_error(slack_request, group, error, action.name)
if defer_attachment_update:
return self.respond()
# Reload group as it may have been mutated by the action
group = Group.objects.get(id=group.id)
response = SlackIssuesMessageBuilder(
group,
identity=identity,
actions=action_list,
tags=original_tags_from_request,
rules=[rule] if rule else None,
).build()
# XXX(isabella): for actions on link unfurls, we omit the fallback text from the
# response so the unfurling endpoint understands the payload
if (
slack_request.data.get("container")
and slack_request.data["container"].get("is_app_unfurl")
and "text" in response
):
del response["text"]
if not slack_request.data.get("response_url"):
# XXX: when you click an option in a modal dropdown it submits the request even though "Submit" has not been clicked
return self.respond()
response_url = slack_request.data["response_url"]
webhook_client = WebhookClient(response_url)
try:
webhook_client.send(
blocks=response.get("blocks"),
text=response.get("text"),
delete_original=False,
replace_original=True,
)
except SlackApiError:
_logger.info(
"slack.webhook.update_status.response-error",
extra={
"integration_id": slack_request.integration.id,
"blocks": response.get("blocks"),
},
)
return self.respond(response)
def handle_unfurl(self, slack_request: SlackActionRequest, action: str) -> Response:
organization_integrations = integration_service.get_organization_integrations(
integration_id=slack_request.integration.id, limit=1
)
if len(organization_integrations) > 0:
analytics.record(
SlackIntegrationChartUnfurlAction(
organization_id=organization_integrations[0].id,
action=action,
)
)
payload = {"delete_original": "true"}
try:
requests_.post(slack_request.response_url, json=payload)
except ApiError:
_logger.exception("slack.action.response-error")
return self.respond(status=403)
return self.respond()
@classmethod
def get_action_option(cls, slack_request: SlackActionRequest) -> str | None:
action_option = None
for action_data in slack_request.data.get("actions", []):
# Get the _first_ value in the action list.
value = action_data.get("value")
if value and not action_option:
action_option = value
return action_option
@classmethod
def get_action_list(cls, slack_request: SlackActionRequest) -> list[BlockKitMessageAction]:
action_data = slack_request.data.get("actions")
if not action_data or not isinstance(action_data, list):
return []
action_list = []
for action_data in action_data:
routing_data = decode_action_id(action_data.get("action_id", ""))
action_name = routing_data.action
if not action_name:
continue
if action_data.get("type") in ("static_select", "external_select"):
action = BlockKitMessageAction(
name=action_name,
label=action_data["selected_option"]["text"]["text"],
type=action_data["type"],
value=action_data["selected_option"]["value"],
action_id=action_data["action_id"],
block_id=action_data["block_id"],
selected_options=[
{"value": action_data.get("selected_option", {}).get("value")}
],
)
# TODO: selected_options is kinda ridiculous, I think this is built to handle multi-select?
else:
action = BlockKitMessageAction(
name=action_name,
label=action_data["text"]["text"],
type=action_data["type"],
value=action_data["value"],
action_id=action_data["action_id"],
block_id=action_data["block_id"],
)
action_list.append(action)
return action_list
def post(self, request: Request) -> Response:
try:
slack_request = self.slack_request_class(request)
slack_request.validate()
except SlackRequestError as e:
_logger.info(
"slack.action.request-error", extra={"error": str(e), "status_code": e.status}
)
return self.respond(status=e.status)
_logger.info(
"slack.action.request",
extra={
"trigger_id": slack_request.data.get("trigger_id"),
"integration_id": slack_request.integration.id,
"request_data": slack_request.data,
},
)
# Set organization scope
bind_org_context_from_integration(slack_request.integration.id)
sentry_sdk.set_tag("integration_id", slack_request.integration.id)
# Actions list may be empty when receiving a dialog response.
action_option = self.get_action_option(slack_request=slack_request)
# If a user is just clicking a button link we return a 200
if action_option in (
"sentry_docs_link_clicked",
"grace_period_warning",
"integration_disabled_slack",
"trial_end_warning",
"link_clicked",
):
return self.respond()
if action_option in UNFURL_ACTION_OPTIONS:
return self.handle_unfurl(slack_request, action_option)
if action_option in ["approve_member", "reject_member"]:
return self.handle_member_approval(slack_request, action_option)
if action_option in NOTIFICATION_SETTINGS_ACTION_OPTIONS:
return self.handle_enable_notifications(slack_request)
action_list = self.get_action_list(slack_request=slack_request)
return self._handle_group_actions(slack_request, request, action_list)
def handle_enable_notifications(self, slack_request: SlackActionRequest) -> Response:
identity_user = slack_request.get_identity_user()
if not identity_user:
return self.respond_with_text(NO_IDENTITY_MESSAGE)
notifications_service.enable_all_settings_for_provider(
external_provider=ExternalProviderEnum.SLACK,
user_id=identity_user.id,
)
return self.respond_with_text(ENABLE_SLACK_SUCCESS_MESSAGE)
def handle_member_approval(self, slack_request: SlackActionRequest, action: str) -> Response:
identity_user = slack_request.get_identity_user()
response_url = slack_request.data["response_url"]
webhook_client = WebhookClient(response_url)
if not identity_user:
webhook_client.send(
text=NO_IDENTITY_MESSAGE, response_type="in_channel", replace_original=False
)
return self.respond()
member_id = slack_request.callback_data["member_id"]
try:
member = OrganizationMember.objects.get_member_invite_query(member_id).get()
except OrganizationMember.DoesNotExist:
# member request is gone, likely someone else rejected it
member_email = slack_request.callback_data["member_email"]
webhook_client.send(
text=f"Member invitation for {member_email} no longer exists.",
response_type="in_channel",
replace_original=False,
)
return self.respond()
organization = member.organization
if not organization.has_access(identity_user):
webhook_client.send(
text=NO_ACCESS_MESSAGE,
response_type="in_channel",
replace_original=False,
)
return self.respond()
# row should exist because we have access
member_of_approver = OrganizationMember.objects.get(
user_id=identity_user.id, organization=organization
)
access = from_member(member_of_approver)
if not access.has_scope("member:admin"):
webhook_client.send(
text=NO_PERMISSION_MESSAGE, replace_original=False, response_type="in_channel"
)
return self.respond()
# validate the org options and check against allowed_roles
allowed_roles = member_of_approver.get_allowed_org_roles_to_invite()
try:
member.validate_invitation(identity_user, allowed_roles)
except UnableToAcceptMemberInvitationException as err:
webhook_client.send(text=str(err), replace_original=False, response_type="in_channel")
return self.respond()
original_status = InviteStatus(member.invite_status)
try:
if action == "approve_member":
member.approve_member_invitation(
identity_user, referrer=IntegrationProviderSlug.SLACK.value
)
else:
member.reject_member_invitation(identity_user)
except Exception:
# shouldn't error but if it does, respond to the user
_logger.exception(
"slack.action.member-invitation-error",
extra={
"organization_id": organization.id,
"member_id": member.id,
},
)
webhook_client.send(
text=DEFAULT_ERROR_MESSAGE, replace_original=False, response_type="in_channel"
)
return self.respond()
if original_status == InviteStatus.REQUESTED_TO_BE_INVITED:
invite_type = "Invite"
else:
invite_type = "Join"
if action == "approve_member":
event = SlackIntegrationApproveMemberInvitation(
actor_id=identity_user.id,
organization_id=member.organization_id,
invitation_type=invite_type.lower(),
invited_member_id=member.id,
)
verb = "approved"
else:
event = SlackIntegrationRejectMemberInvitation(
actor_id=identity_user.id,
organization_id=member.organization_id,
invitation_type=invite_type.lower(),
invited_member_id=member.id,
)
verb = "rejected"
analytics.record(event)
manage_url = member.organization.absolute_url(
reverse("sentry-organization-members", args=[member.organization.slug])
)
message = SUCCESS_MESSAGE.format(
email=member.email,
invite_type=invite_type,
url=manage_url,
verb=verb,
)
webhook_client.send(text=message, replace_original=False, response_type="in_channel")
return self.respond()
| SlackActionEndpoint |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_with.py | {
"start": 6446,
"end": 8416
} | class ____(object):
def setUp(self):
self.TEST_EXCEPTION = RuntimeError("test exception")
super().setUp()
def assertInWithManagerInvariants(self, mock_manager):
self.assertTrue(mock_manager.enter_called)
self.assertFalse(mock_manager.exit_called)
self.assertEqual(mock_manager.exit_args, None)
def assertAfterWithManagerInvariants(self, mock_manager, exit_args):
self.assertTrue(mock_manager.enter_called)
self.assertTrue(mock_manager.exit_called)
self.assertEqual(mock_manager.exit_args, exit_args)
def assertAfterWithManagerInvariantsNoError(self, mock_manager):
self.assertAfterWithManagerInvariants(mock_manager,
(None, None, None))
def assertInWithGeneratorInvariants(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertFalse(mock_generator.stopped)
def assertAfterWithGeneratorInvariantsNoError(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertTrue(mock_generator.stopped)
def raiseTestException(self):
raise self.TEST_EXCEPTION
def assertAfterWithManagerInvariantsWithError(self, mock_manager,
exc_type=None):
self.assertTrue(mock_manager.enter_called)
self.assertTrue(mock_manager.exit_called)
if exc_type is None:
self.assertEqual(mock_manager.exit_args[1], self.TEST_EXCEPTION)
exc_type = type(self.TEST_EXCEPTION)
self.assertEqual(mock_manager.exit_args[0], exc_type)
# Test the __exit__ arguments. Issue #7853
self.assertIsInstance(mock_manager.exit_args[1], exc_type)
self.assertIsNot(mock_manager.exit_args[2], None)
def assertAfterWithGeneratorInvariantsWithError(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertTrue(mock_generator.stopped)
| ContextmanagerAssertionMixin |
python | pypa__pip | src/pip/_vendor/truststore/_windows.py | {
"start": 3999,
"end": 17993
} | class ____(Structure):
_fields_ = (
("cbSize", DWORD),
("hRestrictedRoot", HCERTSTORE),
("hRestrictedTrust", HCERTSTORE),
("hRestrictedOther", HCERTSTORE),
("cAdditionalStore", DWORD),
("rghAdditionalStore", c_void_p),
("dwFlags", DWORD),
("dwUrlRetrievalTimeout", DWORD),
("MaximumCachedCertificates", DWORD),
("CycleDetectionModulus", DWORD),
("hExclusiveRoot", HCERTSTORE),
("hExclusiveTrustedPeople", HCERTSTORE),
("dwExclusiveFlags", DWORD),
)
PCERT_CHAIN_ENGINE_CONFIG = POINTER(CERT_CHAIN_ENGINE_CONFIG)
PHCERTCHAINENGINE = POINTER(HCERTCHAINENGINE)
X509_ASN_ENCODING = 0x00000001
PKCS_7_ASN_ENCODING = 0x00010000
CERT_STORE_PROV_MEMORY = b"Memory"
CERT_STORE_ADD_USE_EXISTING = 2
USAGE_MATCH_TYPE_OR = 1
OID_PKIX_KP_SERVER_AUTH = c_char_p(b"1.3.6.1.5.5.7.3.1")
CERT_CHAIN_REVOCATION_CHECK_END_CERT = 0x10000000
CERT_CHAIN_REVOCATION_CHECK_CHAIN = 0x20000000
CERT_CHAIN_POLICY_IGNORE_ALL_NOT_TIME_VALID_FLAGS = 0x00000007
CERT_CHAIN_POLICY_IGNORE_INVALID_BASIC_CONSTRAINTS_FLAG = 0x00000008
CERT_CHAIN_POLICY_ALLOW_UNKNOWN_CA_FLAG = 0x00000010
CERT_CHAIN_POLICY_IGNORE_INVALID_NAME_FLAG = 0x00000040
CERT_CHAIN_POLICY_IGNORE_WRONG_USAGE_FLAG = 0x00000020
CERT_CHAIN_POLICY_IGNORE_INVALID_POLICY_FLAG = 0x00000080
CERT_CHAIN_POLICY_IGNORE_ALL_REV_UNKNOWN_FLAGS = 0x00000F00
CERT_CHAIN_POLICY_ALLOW_TESTROOT_FLAG = 0x00008000
CERT_CHAIN_POLICY_TRUST_TESTROOT_FLAG = 0x00004000
SECURITY_FLAG_IGNORE_CERT_CN_INVALID = 0x00001000
AUTHTYPE_SERVER = 2
CERT_CHAIN_POLICY_SSL = 4
FORMAT_MESSAGE_FROM_SYSTEM = 0x00001000
FORMAT_MESSAGE_IGNORE_INSERTS = 0x00000200
# Flags to set for SSLContext.verify_mode=CERT_NONE
CERT_CHAIN_POLICY_VERIFY_MODE_NONE_FLAGS = (
CERT_CHAIN_POLICY_IGNORE_ALL_NOT_TIME_VALID_FLAGS
| CERT_CHAIN_POLICY_IGNORE_INVALID_BASIC_CONSTRAINTS_FLAG
| CERT_CHAIN_POLICY_ALLOW_UNKNOWN_CA_FLAG
| CERT_CHAIN_POLICY_IGNORE_INVALID_NAME_FLAG
| CERT_CHAIN_POLICY_IGNORE_WRONG_USAGE_FLAG
| CERT_CHAIN_POLICY_IGNORE_INVALID_POLICY_FLAG
| CERT_CHAIN_POLICY_IGNORE_ALL_REV_UNKNOWN_FLAGS
| CERT_CHAIN_POLICY_ALLOW_TESTROOT_FLAG
| CERT_CHAIN_POLICY_TRUST_TESTROOT_FLAG
)
wincrypt = WinDLL("crypt32.dll")
kernel32 = WinDLL("kernel32.dll")
def _handle_win_error(result: bool, _: Any, args: Any) -> Any:
if not result:
# Note, actually raises OSError after calling GetLastError and FormatMessage
raise WinError()
return args
CertCreateCertificateChainEngine = wincrypt.CertCreateCertificateChainEngine
CertCreateCertificateChainEngine.argtypes = (
PCERT_CHAIN_ENGINE_CONFIG,
PHCERTCHAINENGINE,
)
CertCreateCertificateChainEngine.errcheck = _handle_win_error
CertOpenStore = wincrypt.CertOpenStore
CertOpenStore.argtypes = (LPCSTR, DWORD, HCRYPTPROV_LEGACY, DWORD, c_void_p)
CertOpenStore.restype = HCERTSTORE
CertOpenStore.errcheck = _handle_win_error
CertAddEncodedCertificateToStore = wincrypt.CertAddEncodedCertificateToStore
CertAddEncodedCertificateToStore.argtypes = (
HCERTSTORE,
DWORD,
c_char_p,
DWORD,
DWORD,
PCCERT_CONTEXT,
)
CertAddEncodedCertificateToStore.restype = BOOL
CertCreateCertificateContext = wincrypt.CertCreateCertificateContext
CertCreateCertificateContext.argtypes = (DWORD, c_char_p, DWORD)
CertCreateCertificateContext.restype = PCERT_CONTEXT
CertCreateCertificateContext.errcheck = _handle_win_error
CertGetCertificateChain = wincrypt.CertGetCertificateChain
CertGetCertificateChain.argtypes = (
HCERTCHAINENGINE,
PCERT_CONTEXT,
LPFILETIME,
HCERTSTORE,
PCERT_CHAIN_PARA,
DWORD,
c_void_p,
PCCERT_CHAIN_CONTEXT,
)
CertGetCertificateChain.restype = BOOL
CertGetCertificateChain.errcheck = _handle_win_error
CertVerifyCertificateChainPolicy = wincrypt.CertVerifyCertificateChainPolicy
CertVerifyCertificateChainPolicy.argtypes = (
c_ulong,
PCERT_CHAIN_CONTEXT,
PCERT_CHAIN_POLICY_PARA,
PCERT_CHAIN_POLICY_STATUS,
)
CertVerifyCertificateChainPolicy.restype = BOOL
CertCloseStore = wincrypt.CertCloseStore
CertCloseStore.argtypes = (HCERTSTORE, DWORD)
CertCloseStore.restype = BOOL
CertCloseStore.errcheck = _handle_win_error
CertFreeCertificateChain = wincrypt.CertFreeCertificateChain
CertFreeCertificateChain.argtypes = (PCERT_CHAIN_CONTEXT,)
CertFreeCertificateContext = wincrypt.CertFreeCertificateContext
CertFreeCertificateContext.argtypes = (PCERT_CONTEXT,)
CertFreeCertificateChainEngine = wincrypt.CertFreeCertificateChainEngine
CertFreeCertificateChainEngine.argtypes = (HCERTCHAINENGINE,)
FormatMessageW = kernel32.FormatMessageW
FormatMessageW.argtypes = (
DWORD,
LPCVOID,
DWORD,
DWORD,
LPWSTR,
DWORD,
c_void_p,
)
FormatMessageW.restype = DWORD
def _verify_peercerts_impl(
ssl_context: ssl.SSLContext,
cert_chain: list[bytes],
server_hostname: str | None = None,
) -> None:
"""Verify the cert_chain from the server using Windows APIs."""
# If the peer didn't send any certificates then
# we can't do verification. Raise an error.
if not cert_chain:
raise ssl.SSLCertVerificationError("Peer sent no certificates to verify")
pCertContext = None
hIntermediateCertStore = CertOpenStore(CERT_STORE_PROV_MEMORY, 0, None, 0, None)
try:
# Add intermediate certs to an in-memory cert store
for cert_bytes in cert_chain[1:]:
CertAddEncodedCertificateToStore(
hIntermediateCertStore,
X509_ASN_ENCODING | PKCS_7_ASN_ENCODING,
cert_bytes,
len(cert_bytes),
CERT_STORE_ADD_USE_EXISTING,
None,
)
# Cert context for leaf cert
leaf_cert = cert_chain[0]
pCertContext = CertCreateCertificateContext(
X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, leaf_cert, len(leaf_cert)
)
# Chain params to match certs for serverAuth extended usage
cert_enhkey_usage = CERT_ENHKEY_USAGE()
cert_enhkey_usage.cUsageIdentifier = 1
cert_enhkey_usage.rgpszUsageIdentifier = (c_char_p * 1)(OID_PKIX_KP_SERVER_AUTH)
cert_usage_match = CERT_USAGE_MATCH()
cert_usage_match.Usage = cert_enhkey_usage
chain_params = CERT_CHAIN_PARA()
chain_params.RequestedUsage = cert_usage_match
chain_params.cbSize = sizeof(chain_params)
pChainPara = pointer(chain_params)
if ssl_context.verify_flags & ssl.VERIFY_CRL_CHECK_CHAIN:
chain_flags = CERT_CHAIN_REVOCATION_CHECK_CHAIN
elif ssl_context.verify_flags & ssl.VERIFY_CRL_CHECK_LEAF:
chain_flags = CERT_CHAIN_REVOCATION_CHECK_END_CERT
else:
chain_flags = 0
try:
# First attempt to verify using the default Windows system trust roots
# (default chain engine).
_get_and_verify_cert_chain(
ssl_context,
None,
hIntermediateCertStore,
pCertContext,
pChainPara,
server_hostname,
chain_flags=chain_flags,
)
except ssl.SSLCertVerificationError as e:
# If that fails but custom CA certs have been added
# to the SSLContext using load_verify_locations,
# try verifying using a custom chain engine
# that trusts the custom CA certs.
custom_ca_certs: list[bytes] | None = ssl_context.get_ca_certs(
binary_form=True
)
if custom_ca_certs:
try:
_verify_using_custom_ca_certs(
ssl_context,
custom_ca_certs,
hIntermediateCertStore,
pCertContext,
pChainPara,
server_hostname,
chain_flags=chain_flags,
)
# Raise the original error, not the new error.
except ssl.SSLCertVerificationError:
raise e from None
else:
raise
finally:
CertCloseStore(hIntermediateCertStore, 0)
if pCertContext:
CertFreeCertificateContext(pCertContext)
def _get_and_verify_cert_chain(
ssl_context: ssl.SSLContext,
hChainEngine: HCERTCHAINENGINE | None,
hIntermediateCertStore: HCERTSTORE,
pPeerCertContext: c_void_p,
pChainPara: PCERT_CHAIN_PARA, # type: ignore[valid-type]
server_hostname: str | None,
chain_flags: int,
) -> None:
ppChainContext = None
try:
# Get cert chain
ppChainContext = pointer(PCERT_CHAIN_CONTEXT())
CertGetCertificateChain(
hChainEngine, # chain engine
pPeerCertContext, # leaf cert context
None, # current system time
hIntermediateCertStore, # additional in-memory cert store
pChainPara, # chain-building parameters
chain_flags,
None, # reserved
ppChainContext, # the resulting chain context
)
pChainContext = ppChainContext.contents
# Verify cert chain
ssl_extra_cert_chain_policy_para = SSL_EXTRA_CERT_CHAIN_POLICY_PARA()
ssl_extra_cert_chain_policy_para.cbSize = sizeof(
ssl_extra_cert_chain_policy_para
)
ssl_extra_cert_chain_policy_para.dwAuthType = AUTHTYPE_SERVER
ssl_extra_cert_chain_policy_para.fdwChecks = 0
if ssl_context.check_hostname is False:
ssl_extra_cert_chain_policy_para.fdwChecks = (
SECURITY_FLAG_IGNORE_CERT_CN_INVALID
)
if server_hostname:
ssl_extra_cert_chain_policy_para.pwszServerName = c_wchar_p(server_hostname)
chain_policy = CERT_CHAIN_POLICY_PARA()
chain_policy.pvExtraPolicyPara = cast(
pointer(ssl_extra_cert_chain_policy_para), c_void_p
)
if ssl_context.verify_mode == ssl.CERT_NONE:
chain_policy.dwFlags |= CERT_CHAIN_POLICY_VERIFY_MODE_NONE_FLAGS
chain_policy.cbSize = sizeof(chain_policy)
pPolicyPara = pointer(chain_policy)
policy_status = CERT_CHAIN_POLICY_STATUS()
policy_status.cbSize = sizeof(policy_status)
pPolicyStatus = pointer(policy_status)
CertVerifyCertificateChainPolicy(
CERT_CHAIN_POLICY_SSL,
pChainContext,
pPolicyPara,
pPolicyStatus,
)
# Check status
error_code = policy_status.dwError
if error_code:
# Try getting a human readable message for an error code.
error_message_buf = create_unicode_buffer(1024)
error_message_chars = FormatMessageW(
FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
None,
error_code,
0,
error_message_buf,
sizeof(error_message_buf),
None,
)
# See if we received a message for the error,
# otherwise we use a generic error with the
# error code and hope that it's search-able.
if error_message_chars <= 0:
error_message = f"Certificate chain policy error {error_code:#x} [{policy_status.lElementIndex}]"
else:
error_message = error_message_buf.value.strip()
err = ssl.SSLCertVerificationError(error_message)
err.verify_message = error_message
err.verify_code = error_code
raise err from None
finally:
if ppChainContext:
CertFreeCertificateChain(ppChainContext.contents)
def _verify_using_custom_ca_certs(
ssl_context: ssl.SSLContext,
custom_ca_certs: list[bytes],
hIntermediateCertStore: HCERTSTORE,
pPeerCertContext: c_void_p,
pChainPara: PCERT_CHAIN_PARA, # type: ignore[valid-type]
server_hostname: str | None,
chain_flags: int,
) -> None:
hChainEngine = None
hRootCertStore = CertOpenStore(CERT_STORE_PROV_MEMORY, 0, None, 0, None)
try:
# Add custom CA certs to an in-memory cert store
for cert_bytes in custom_ca_certs:
CertAddEncodedCertificateToStore(
hRootCertStore,
X509_ASN_ENCODING | PKCS_7_ASN_ENCODING,
cert_bytes,
len(cert_bytes),
CERT_STORE_ADD_USE_EXISTING,
None,
)
# Create a custom cert chain engine which exclusively trusts
# certs from our hRootCertStore
cert_chain_engine_config = CERT_CHAIN_ENGINE_CONFIG()
cert_chain_engine_config.cbSize = sizeof(cert_chain_engine_config)
cert_chain_engine_config.hExclusiveRoot = hRootCertStore
pConfig = pointer(cert_chain_engine_config)
phChainEngine = pointer(HCERTCHAINENGINE())
CertCreateCertificateChainEngine(
pConfig,
phChainEngine,
)
hChainEngine = phChainEngine.contents
# Get and verify a cert chain using the custom chain engine
_get_and_verify_cert_chain(
ssl_context,
hChainEngine,
hIntermediateCertStore,
pPeerCertContext,
pChainPara,
server_hostname,
chain_flags,
)
finally:
if hChainEngine:
CertFreeCertificateChainEngine(hChainEngine)
CertCloseStore(hRootCertStore, 0)
@contextlib.contextmanager
def _configure_context(ctx: ssl.SSLContext) -> typing.Iterator[None]:
check_hostname = ctx.check_hostname
verify_mode = ctx.verify_mode
ctx.check_hostname = False
_set_ssl_context_verify_mode(ctx, ssl.CERT_NONE)
try:
yield
finally:
ctx.check_hostname = check_hostname
_set_ssl_context_verify_mode(ctx, verify_mode)
| CERT_CHAIN_ENGINE_CONFIG |
python | pandas-dev__pandas | setup.py | {
"start": 8869,
"end": 21977
} | class ____(Command):
"""numpy's build_src command interferes with Cython's build_ext."""
user_options = []
def initialize_options(self) -> None:
self.py_modules_dict = {}
def finalize_options(self) -> None:
pass
def run(self) -> None:
pass
cmdclass["clean"] = CleanCommand
cmdclass["build_ext"] = CheckingBuildExt
if _CYTHON_INSTALLED:
suffix = ".pyx"
cmdclass["cython"] = CythonCommand
else:
suffix = ".c"
cmdclass["build_src"] = DummyBuildSrc
# ----------------------------------------------------------------------
# Preparation of compiler arguments
debugging_symbols_requested = "--with-debugging-symbols" in sys.argv
if debugging_symbols_requested:
sys.argv.remove("--with-debugging-symbols")
if sys.byteorder == "big":
endian_macro = [("__BIG_ENDIAN__", "1")]
else:
endian_macro = [("__LITTLE_ENDIAN__", "1")]
extra_compile_args = []
extra_link_args = []
if is_platform_windows():
if debugging_symbols_requested:
extra_compile_args.append("/Z7")
extra_link_args.append("/DEBUG")
else:
# PANDAS_CI=1 is set in CI
if os.environ.get("PANDAS_CI", "0") == "1":
extra_compile_args.append("-Werror")
if debugging_symbols_requested:
extra_compile_args.append("-g3")
extra_compile_args.append("-UNDEBUG")
extra_compile_args.append("-O0")
# Build for at least macOS 10.9 when compiling on a 10.9 system or above,
# overriding CPython distuitls behaviour which is to target the version that
# python was built for. This may be overridden by setting
# MACOSX_DEPLOYMENT_TARGET before calling setup.py
if is_platform_mac():
if "MACOSX_DEPLOYMENT_TARGET" not in os.environ:
current_system = platform.mac_ver()[0]
python_target = get_config_vars().get(
"MACOSX_DEPLOYMENT_TARGET", current_system
)
target_macos_version = "10.9"
parsed_macos_version = parse_version(target_macos_version)
if (
parse_version(str(python_target))
< parsed_macos_version
<= parse_version(current_system)
):
os.environ["MACOSX_DEPLOYMENT_TARGET"] = target_macos_version
if sys.version_info[:2] == (3, 8): # GH 33239
extra_compile_args.append("-Wno-error=deprecated-declarations")
# https://github.com/pandas-dev/pandas/issues/35559
extra_compile_args.append("-Wno-error=unreachable-code")
# enable coverage by building cython files by setting the environment variable
# "PANDAS_CYTHON_COVERAGE" (with a Truthy value) or by running build_ext
# with `--with-cython-coverage`enabled
linetrace = os.environ.get("PANDAS_CYTHON_COVERAGE", False) # noqa: PLW1508
if "--with-cython-coverage" in sys.argv:
linetrace = True
sys.argv.remove("--with-cython-coverage")
# Note: if not using `cythonize`, coverage can be enabled by
# pinning `ext.cython_directives = directives` to each ext in extensions.
# github.com/cython/cython/wiki/enhancements-compilerdirectives#in-setuppy
directives = {"linetrace": False, "language_level": 3, "always_allow_keywords": True}
macros = []
if linetrace:
# https://pypkg.com/pypi/pytest-cython/f/tests/example-project/setup.py
directives["linetrace"] = True
macros = [("CYTHON_TRACE", "1"), ("CYTHON_TRACE_NOGIL", "1")]
# silence build warnings about deprecated API usage
# we can't do anything about these warnings because they stem from
# cython+numpy version mismatches.
macros.append(("NPY_NO_DEPRECATED_API", "0"))
# ----------------------------------------------------------------------
# Specification of Dependencies
# TODO(cython#4518): Need to check to see if e.g. `linetrace` has changed and
# possibly re-compile.
def maybe_cythonize(extensions, *args, **kwargs):
"""
Render tempita templates before calling cythonize. This is skipped for
* clean
* sdist
"""
if "clean" in sys.argv or "sdist" in sys.argv:
# See https://github.com/cython/cython/issues/1495
return extensions
elif not _CYTHON_INSTALLED:
# GH#28836 raise a helfpul error message
if _CYTHON_VERSION:
raise RuntimeError(
f"Cannot cythonize with old Cython version ({_CYTHON_VERSION} "
f"installed, needs {min_cython_ver})"
)
raise RuntimeError("Cannot cythonize without Cython installed.")
# reuse any parallel arguments provided for compilation to cythonize
parser = argparse.ArgumentParser()
parser.add_argument("--parallel", "-j", type=int, default=1)
parsed, _ = parser.parse_known_args()
kwargs["nthreads"] = parsed.parallel
build_ext.render_templates(_pxifiles)
if debugging_symbols_requested:
kwargs["gdb_debug"] = True
return cythonize(extensions, *args, **kwargs)
def srcpath(name=None, suffix=".pyx", subdir="src"):
return pjoin("pandas", subdir, name + suffix)
lib_depends = ["pandas/_libs/include/pandas/parse_helper.h"]
tseries_depends = [
"pandas/_libs/include/pandas/datetime/pd_datetime.h",
]
ext_data = {
"_libs.algos": {
"pyxfile": "_libs/algos",
"depends": _pxi_dep["algos"],
},
"_libs.arrays": {"pyxfile": "_libs/arrays"},
"_libs.groupby": {"pyxfile": "_libs/groupby"},
"_libs.hashing": {"pyxfile": "_libs/hashing", "depends": []},
"_libs.hashtable": {
"pyxfile": "_libs/hashtable",
"depends": (
[
"pandas/_libs/include/pandas/vendored/klib/khash_python.h",
"pandas/_libs/include/pandas/vendored/klib/khash.h",
]
+ _pxi_dep["hashtable"]
),
},
"_libs.index": {
"pyxfile": "_libs/index",
"depends": _pxi_dep["index"],
},
"_libs.indexing": {"pyxfile": "_libs/indexing"},
"_libs.internals": {"pyxfile": "_libs/internals"},
"_libs.interval": {
"pyxfile": "_libs/interval",
"depends": _pxi_dep["interval"],
},
"_libs.join": {"pyxfile": "_libs/join"},
"_libs.lib": {
"pyxfile": "_libs/lib",
"depends": lib_depends + tseries_depends,
},
"_libs.missing": {"pyxfile": "_libs/missing", "depends": tseries_depends},
"_libs.parsers": {
"pyxfile": "_libs/parsers",
"depends": [
"pandas/_libs/src/parser/tokenizer.h",
"pandas/_libs/src/parser/io.h",
"pandas/_libs/src/pd_parser.h",
],
},
"_libs.ops": {"pyxfile": "_libs/ops"},
"_libs.ops_dispatch": {"pyxfile": "_libs/ops_dispatch"},
"_libs.properties": {"pyxfile": "_libs/properties"},
"_libs.reshape": {"pyxfile": "_libs/reshape", "depends": []},
"_libs.sparse": {"pyxfile": "_libs/sparse", "depends": _pxi_dep["sparse"]},
"_libs.tslib": {
"pyxfile": "_libs/tslib",
"depends": tseries_depends,
},
"_libs.tslibs.base": {"pyxfile": "_libs/tslibs/base"},
"_libs.tslibs.ccalendar": {"pyxfile": "_libs/tslibs/ccalendar"},
"_libs.tslibs.dtypes": {"pyxfile": "_libs/tslibs/dtypes"},
"_libs.tslibs.conversion": {
"pyxfile": "_libs/tslibs/conversion",
"depends": tseries_depends,
},
"_libs.tslibs.fields": {
"pyxfile": "_libs/tslibs/fields",
"depends": tseries_depends,
},
"_libs.tslibs.nattype": {"pyxfile": "_libs/tslibs/nattype"},
"_libs.tslibs.np_datetime": {
"pyxfile": "_libs/tslibs/np_datetime",
"depends": tseries_depends,
},
"_libs.tslibs.offsets": {
"pyxfile": "_libs/tslibs/offsets",
"depends": tseries_depends,
},
"_libs.tslibs.parsing": {
"pyxfile": "_libs/tslibs/parsing",
"sources": ["pandas/_libs/src/parser/tokenizer.c"],
},
"_libs.tslibs.period": {
"pyxfile": "_libs/tslibs/period",
"depends": tseries_depends,
},
"_libs.tslibs.strptime": {
"pyxfile": "_libs/tslibs/strptime",
"depends": tseries_depends,
},
"_libs.tslibs.timedeltas": {
"pyxfile": "_libs/tslibs/timedeltas",
"depends": tseries_depends,
},
"_libs.tslibs.timestamps": {
"pyxfile": "_libs/tslibs/timestamps",
"depends": tseries_depends,
},
"_libs.tslibs.timezones": {"pyxfile": "_libs/tslibs/timezones"},
"_libs.tslibs.tzconversion": {
"pyxfile": "_libs/tslibs/tzconversion",
"depends": tseries_depends,
},
"_libs.tslibs.vectorized": {
"pyxfile": "_libs/tslibs/vectorized",
"depends": tseries_depends,
},
"_libs.testing": {"pyxfile": "_libs/testing"},
"_libs.window.aggregations": {
"pyxfile": "_libs/window/aggregations",
"language": "c++",
"suffix": ".cpp",
"depends": ["pandas/_libs/include/pandas/skiplist.h"],
},
"_libs.window.indexers": {"pyxfile": "_libs/window/indexers"},
"_libs.writers": {"pyxfile": "_libs/writers"},
"_libs.sas": {"pyxfile": "_libs/sas"},
"_libs.byteswap": {"pyxfile": "_libs/byteswap"},
}
extensions = []
for name, data in ext_data.items():
source_suffix = suffix if suffix == ".pyx" else data.get("suffix", ".c")
sources = [srcpath(data["pyxfile"], suffix=source_suffix, subdir="")]
sources.extend(data.get("sources", []))
include = ["pandas/_libs/include", numpy.get_include()]
undef_macros = []
if (
sys.platform == "zos"
and data.get("language") == "c++"
and os.path.basename(os.environ.get("CXX", "/bin/xlc++")) in ("xlc", "xlc++")
):
data.get("macros", macros).append(("__s390__", "1"))
extra_compile_args.append("-qlanglvl=extended0x:nolibext")
undef_macros.append("_POSIX_THREADS")
obj = Extension(
f"pandas.{name}",
sources=sources,
depends=data.get("depends", []),
include_dirs=include,
language=data.get("language", "c"),
define_macros=data.get("macros", macros),
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
undef_macros=undef_macros,
)
extensions.append(obj)
# ----------------------------------------------------------------------
# ujson
if suffix == ".pyx":
# undo dumb setuptools bug clobbering .pyx sources back to .c
for ext in extensions:
if ext.sources[0].endswith((".c", ".cpp")):
root, _ = os.path.splitext(ext.sources[0])
ext.sources[0] = root + suffix
ujson_ext = Extension(
"pandas._libs.json",
depends=[
"pandas/_libs/include/pandas/vendored/ujson/lib/ultrajson.h",
"pandas/_libs/include/pandas/datetime/pd_datetime.h",
],
sources=(
[
"pandas/_libs/src/vendored/ujson/python/ujson.c",
"pandas/_libs/src/vendored/ujson/python/objToJSON.c",
"pandas/_libs/src/vendored/ujson/python/JSONtoObj.c",
"pandas/_libs/src/vendored/ujson/lib/ultrajsonenc.c",
"pandas/_libs/src/vendored/ujson/lib/ultrajsondec.c",
]
),
include_dirs=[
"pandas/_libs/include",
numpy.get_include(),
],
extra_compile_args=(extra_compile_args),
extra_link_args=extra_link_args,
define_macros=macros,
)
extensions.append(ujson_ext)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# pd_datetime
pd_dt_ext = Extension(
"pandas._libs.pandas_datetime",
depends=["pandas/_libs/tslibs/datetime/pd_datetime.h"],
sources=(
[
"pandas/_libs/src/vendored/numpy/datetime/np_datetime.c",
"pandas/_libs/src/vendored/numpy/datetime/np_datetime_strings.c",
"pandas/_libs/src/datetime/date_conversions.c",
"pandas/_libs/src/datetime/pd_datetime.c",
]
),
include_dirs=[
"pandas/_libs/include",
numpy.get_include(),
],
extra_compile_args=(extra_compile_args),
extra_link_args=extra_link_args,
define_macros=macros,
)
extensions.append(pd_dt_ext)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# pd_datetime
pd_parser_ext = Extension(
"pandas._libs.pandas_parser",
depends=["pandas/_libs/include/pandas/parser/pd_parser.h"],
sources=(
[
"pandas/_libs/src/parser/tokenizer.c",
"pandas/_libs/src/parser/io.c",
"pandas/_libs/src/parser/pd_parser.c",
]
),
include_dirs=[
"pandas/_libs/include",
],
extra_compile_args=(extra_compile_args),
extra_link_args=extra_link_args,
define_macros=macros,
)
extensions.append(pd_parser_ext)
# ----------------------------------------------------------------------
if __name__ == "__main__":
# Freeze to support parallel compilation when using spawn instead of fork
multiprocessing.freeze_support()
setup(
version=versioneer.get_version(),
ext_modules=maybe_cythonize(extensions, compiler_directives=directives),
cmdclass=cmdclass,
)
| DummyBuildSrc |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-structured-data/llama_index/readers/structured_data/base.py | {
"start": 236,
"end": 5208
} | class ____(BaseReader):
"""
Updated BaseReader parser to support JSON, JSONL, CSV and Excel (.xlsx) files.
...
Args:
col_joiner (str): The string to join the columns with. Defaults to ', '.
col_index (str, int, or list): The list of columns to be used as index.
col_metadata (None, str, int, or list): The list of columns to be used as metadata.
...
"""
def __init__(
self,
*args: Any,
col_joiner: str = ", ",
pandas_config: dict = {},
col_index: Union[str, int, List],
col_metadata: Optional[Union[str, int, List]] = None,
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
self._col_joiner = col_joiner
self._pandas_config = pandas_config
self._col_index = col_index
self._col_metadata = col_metadata
def _load_dataframe(
self, file: Path, fs: Optional[AbstractFileSystem] = None
) -> pd.DataFrame:
file_extension = file.suffix.lower()
read_funcs = {
".csv": lambda f: pd.read_csv(f),
".xlsx": lambda f: pd.read_excel(f),
".json": lambda f: pd.read_json(f, encoding="utf-8"),
".jsonl": lambda f: pd.read_json(f, encoding="utf-8", lines=True),
}
if file_extension not in read_funcs:
raise ValueError(
f"Unsupported file extension '{file_extension}'. Supported extensions are 'json', 'csv', 'xlsx', and 'jsonl'."
)
if fs:
with fs.open(file) as f:
df = read_funcs[file_extension](f, **self._pandas_config)
else:
df = read_funcs[file_extension](file, **self._pandas_config)
return df
def _validate_column(self, index_name, column_index, df):
if isinstance(column_index, int):
assert -len(df.columns) < column_index < len(df.columns), (
f"The {index_name} {column_index} exceeds the range of columns in the dataframe: ({len(df.columns)})"
)
elif isinstance(column_index, str):
assert column_index in df.columns, (
f"The {index_name} must be in the dataframe"
)
else:
if all(isinstance(item, int) for item in column_index):
assert all(
-len(df.columns) < item < len(df.columns) for item in column_index
), (
f"Some items in {index_name} exceed the range of columns in the dataframe: ({len(df.columns)})"
)
elif all(isinstance(item, str) for item in column_index):
assert set(column_index).issubset(df.columns), (
f"All columns in {index_name} must be in the dataframe"
)
else:
raise ValueError(
"Not support int and str columns both in column configs."
)
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
fs: Optional[AbstractFileSystem] = None,
) -> List[Document]:
"""Parse file."""
df = self._load_dataframe(file, fs)
assert self._col_index, f"The col_index must be specified"
self._validate_column("col_index", self._col_index, df)
if isinstance(self._col_index, int) or (
isinstance(self._col_index, list)
and all(isinstance(item, int) for item in self._col_index)
):
df_text = df.iloc[:, self._col_index]
else:
df_text = df[self._col_index]
if isinstance(df_text, pd.DataFrame):
text_list = df_text.apply(
lambda row: self._col_joiner.join(row.astype(str).tolist()), axis=1
).tolist()
elif isinstance(df_text, pd.Series):
text_list = df_text.tolist()
if not self._col_metadata:
return [
Document(text=text_tuple, metadata=(extra_info or {}))
for text_tuple in text_list
]
else:
self._validate_column("col_metadata", self._col_metadata, df)
if isinstance(self._col_metadata, int) or (
isinstance(self._col_metadata, list)
and all(isinstance(item, int) for item in self._col_metadata)
):
df_metadata = df.iloc[:, self._col_metadata]
else:
df_metadata = df[self._col_metadata]
if isinstance(df_metadata, pd.Series):
df_metadata = pd.DataFrame(df_metadata)
metadata_list = df_metadata.to_dict(orient="records")
return [
Document(
text=text_tuple, metadata={**(metadata_tuple), **(extra_info or {})}
)
for text_tuple, metadata_tuple in zip(text_list, metadata_list)
]
| StructuredDataReader |
python | google__jax | tests/typing_test.py | {
"start": 1592,
"end": 1805
} | class ____:
dtype: np.dtype
def __init__(self, dt):
self.dtype = np.dtype(dt)
float32_dtype = np.dtype("float32")
# Avoid test parameterization because we want to statically check these annotations.
| HasDType |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-convex/source_convex/source.py | {
"start": 2953,
"end": 9349
} | class ____(HttpStream, IncrementalMixin):
def __init__(
self,
deployment_url: str,
access_key: str,
fmt: str,
table_name: str,
json_schema: Dict[str, Any],
):
self.deployment_url = deployment_url
self.fmt = fmt
self.table_name = table_name
if json_schema:
json_schema["additionalProperties"] = True
json_schema["properties"]["_ab_cdc_lsn"] = {"type": "number"}
json_schema["properties"]["_ab_cdc_updated_at"] = {"type": "string"}
json_schema["properties"]["_ab_cdc_deleted_at"] = {"anyOf": [{"type": "string"}, {"type": "null"}]}
else:
json_schema = {}
self.json_schema = json_schema
self._snapshot_cursor_value: Optional[str] = None
self._snapshot_has_more = True
self._delta_cursor_value: Optional[int] = None
self._delta_has_more = True
super().__init__(TokenAuthenticator(access_key, "Convex"))
@property
def name(self) -> str:
return self.table_name
@property
def url_base(self) -> str:
return self.deployment_url
def get_json_schema(self) -> Mapping[str, Any]: # type: ignore[override]
return self.json_schema
primary_key = "_id"
cursor_field = "_ts"
# Checkpoint stream reads after this many records. This prevents re-reading of data if the stream fails for any reason.
state_checkpoint_interval = 128
@property
def state(self) -> MutableMapping[str, Any]:
value: ConvexState = {
"snapshot_cursor": self._snapshot_cursor_value,
"snapshot_has_more": self._snapshot_has_more,
"delta_cursor": self._delta_cursor_value,
}
return cast(MutableMapping[str, Any], value)
@state.setter
def state(self, value: MutableMapping[str, Any]) -> None:
state = cast(ConvexState, value)
self._snapshot_cursor_value = state["snapshot_cursor"]
self._snapshot_has_more = state["snapshot_has_more"]
self._delta_cursor_value = state["delta_cursor"]
def next_page_token(self, response: requests.Response) -> Optional[ConvexState]:
if response.status_code != 200:
raise Exception(format_http_error("Failed request", response))
resp_json = response.json()
if self._snapshot_has_more:
self._snapshot_cursor_value = resp_json["cursor"]
self._snapshot_has_more = resp_json["hasMore"]
self._delta_cursor_value = resp_json["snapshot"]
else:
self._delta_cursor_value = resp_json["cursor"]
self._delta_has_more = resp_json["hasMore"]
has_more = self._snapshot_has_more or self._delta_has_more
return cast(ConvexState, self.state) if has_more else None
def path(
self,
stream_state: Optional[Mapping[str, Any]] = None,
stream_slice: Optional[Mapping[str, Any]] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> str:
# https://docs.convex.dev/http-api/#sync
if self._snapshot_has_more:
return "/api/list_snapshot"
else:
return "/api/document_deltas"
def parse_response(
self,
response: requests.Response,
stream_state: Mapping[str, Any],
stream_slice: Optional[Mapping[str, Any]] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> Iterable[Mapping[str, Any]]:
if response.status_code != 200:
raise Exception(format_http_error("Failed request", response))
resp_json = response.json()
return list(resp_json["values"])
def request_params(
self,
stream_state: Optional[Mapping[str, Any]],
stream_slice: Optional[Mapping[str, Any]] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> MutableMapping[str, Any]:
params: Dict[str, Any] = {"tableName": self.table_name, "format": self.fmt}
if self._snapshot_has_more:
if self._snapshot_cursor_value:
params["cursor"] = self._snapshot_cursor_value
if self._delta_cursor_value:
params["snapshot"] = self._delta_cursor_value
else:
if self._delta_cursor_value:
params["cursor"] = self._delta_cursor_value
return params
def request_headers(
self,
stream_state: Optional[Mapping[str, Any]],
stream_slice: Optional[Mapping[str, Any]] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> Dict[str, str]:
"""
Custom headers for each HTTP request, not including Authorization.
"""
return {
"Convex-Client": f"airbyte-export-{CONVEX_CLIENT_VERSION}",
}
def get_updated_state(self, current_stream_state: ConvexState, latest_record: Mapping[str, Any]) -> ConvexState:
"""
This (deprecated) method is still used by AbstractSource to update state between calls to `read_records`.
"""
return cast(ConvexState, self.state)
def read_records(self, sync_mode: SyncMode, *args: Any, **kwargs: Any) -> Iterator[Any]:
self._delta_has_more = sync_mode == SyncMode.incremental
for read_record in super().read_records(sync_mode, *args, **kwargs):
record = dict(read_record)
ts_ns = record["_ts"]
ts_seconds = ts_ns / 1e9 # convert from nanoseconds.
# equivalent of java's `new Timestamp(transactionMillis).toInstant().toString()`
ts_datetime = datetime.utcfromtimestamp(ts_seconds)
ts = ts_datetime.isoformat()
# DebeziumEventUtils.CDC_LSN
record["_ab_cdc_lsn"] = ts_ns
# DebeziumEventUtils.CDC_DELETED_AT
record["_ab_cdc_updated_at"] = ts
record["_deleted"] = "_deleted" in record and record["_deleted"]
# DebeziumEventUtils.CDC_DELETED_AT
record["_ab_cdc_deleted_at"] = ts if record["_deleted"] else None
yield record
def format_http_error(context: str, resp: requests.Response) -> str:
try:
err = resp.json()
return f"{context}: {resp.status_code}: {err['code']}: {err['message']}"
except (JSONDecodeError, KeyError):
return f"{context}: {resp.text}"
| ConvexStream |
python | doocs__leetcode | solution/0800-0899/0821.Shortest Distance to a Character/Solution.py | {
"start": 0,
"end": 428
} | class ____:
def shortestToChar(self, s: str, c: str) -> List[int]:
n = len(s)
ans = [n] * n
pre = -inf
for i, ch in enumerate(s):
if ch == c:
pre = i
ans[i] = min(ans[i], i - pre)
suf = inf
for i in range(n - 1, -1, -1):
if s[i] == c:
suf = i
ans[i] = min(ans[i], suf - i)
return ans
| Solution |
python | django__django | tests/view_tests/tests/test_debug.py | {
"start": 2382,
"end": 18537
} | class ____(SimpleTestCase):
def test_files(self):
with self.assertLogs("django.request", "ERROR"):
response = self.client.get("/raises/")
self.assertEqual(response.status_code, 500)
data = {
"file_data.txt": SimpleUploadedFile("file_data.txt", b"haha"),
}
with self.assertLogs("django.request", "ERROR"):
response = self.client.post("/raises/", data)
self.assertContains(response, "file_data.txt", status_code=500)
self.assertNotContains(response, "haha", status_code=500)
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
with self.assertLogs("django.security", "WARNING"):
response = self.client.get("/raises400/")
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_400_bad_request(self):
# When DEBUG=True, technical_500_template() is called.
with self.assertLogs("django.request", "WARNING") as cm:
response = self.client.get("/raises400_bad_request/")
self.assertContains(response, '<div class="context" id="', status_code=400)
self.assertEqual(
cm.records[0].getMessage(),
"Malformed request syntax: /raises400_bad_request/",
)
# Ensure no 403.html template exists to test the default case.
@override_settings(
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
}
]
)
def test_403(self):
response = self.client.get("/raises403/")
self.assertContains(response, "<h1>403 Forbidden</h1>", status_code=403)
# Set up a test 403.html template.
@override_settings(
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"OPTIONS": {
"loaders": [
(
"django.template.loaders.locmem.Loader",
{
"403.html": (
"This is a test template for a 403 error "
"({{ exception }})."
),
},
),
],
},
}
]
)
def test_403_template(self):
response = self.client.get("/raises403/")
self.assertContains(response, "test template", status_code=403)
self.assertContains(response, "(Insufficient Permissions).", status_code=403)
def test_404(self):
response = self.client.get("/raises404/")
self.assertNotContains(
response,
'<pre class="exception_value">',
status_code=404,
)
self.assertContains(
response,
"<p>The current path, <code>not-in-urls</code>, didn’t match any "
"of these.</p>",
status_code=404,
html=True,
)
def test_404_not_in_urls(self):
response = self.client.get("/not-in-urls")
self.assertNotContains(response, "Raised by:", status_code=404)
self.assertNotContains(
response,
'<pre class="exception_value">',
status_code=404,
)
self.assertContains(
response, "Django tried these URL patterns", status_code=404
)
self.assertContains(
response,
"<code>technical404/ [name='my404']</code>",
status_code=404,
html=True,
)
self.assertContains(
response,
"<p>The current path, <code>not-in-urls</code>, didn’t match any "
"of these.</p>",
status_code=404,
html=True,
)
# Pattern and view name of a RegexURLPattern appear.
self.assertContains(
response, r"^regex-post/(?P<pk>[0-9]+)/$", status_code=404
)
self.assertContains(response, "[name='regex-post']", status_code=404)
# Pattern and view name of a RoutePattern appear.
self.assertContains(response, r"path-post/<int:pk>/", status_code=404)
self.assertContains(response, "[name='path-post']", status_code=404)
@override_settings(ROOT_URLCONF=WithoutEmptyPathUrls)
def test_404_empty_path_not_in_urls(self):
response = self.client.get("/")
self.assertContains(
response,
"<p>The empty path didn’t match any of these.</p>",
status_code=404,
html=True,
)
def test_technical_404(self):
response = self.client.get("/technical404/")
self.assertContains(response, '<header id="summary">', status_code=404)
self.assertContains(response, '<main id="info">', status_code=404)
self.assertContains(response, '<footer id="explanation">', status_code=404)
self.assertContains(
response,
'<pre class="exception_value">Testing technical 404.</pre>',
status_code=404,
html=True,
)
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(
response,
"<td>view_tests.views.technical404</td>",
status_code=404,
)
self.assertContains(
response,
"<p>The current path, <code>technical404/</code>, matched the "
"last one.</p>",
status_code=404,
html=True,
)
def test_classbased_technical_404(self):
response = self.client.get("/classbased404/")
self.assertContains(
response,
'<th scope="row">Raised by:</th><td>view_tests.views.Http404View</td>',
status_code=404,
html=True,
)
def test_technical_500(self):
with self.assertLogs("django.request", "ERROR"):
response = self.client.get("/raises500/")
self.assertContains(response, '<header id="summary">', status_code=500)
self.assertContains(response, '<main id="info">', status_code=500)
self.assertContains(response, '<footer id="explanation">', status_code=500)
self.assertContains(
response,
'<th scope="row">Raised during:</th><td>view_tests.views.raises500</td>',
status_code=500,
html=True,
)
with self.assertLogs("django.request", "ERROR"):
response = self.client.get("/raises500/", headers={"accept": "text/plain"})
self.assertContains(
response,
"Raised during: view_tests.views.raises500",
status_code=500,
)
def test_technical_500_content_type_negotiation(self):
for accepts, content_type in [
("text/plain", "text/plain; charset=utf-8"),
("text/html", "text/html"),
("text/html,text/plain;q=0.9", "text/html"),
("text/plain,text/html;q=0.9", "text/plain; charset=utf-8"),
("text/*", "text/html"),
]:
with self.subTest(accepts=accepts):
with self.assertLogs("django.request", "ERROR"):
response = self.client.get(
"/raises500/", headers={"accept": accepts}
)
self.assertEqual(response.status_code, 500)
self.assertEqual(response["Content-Type"], content_type)
def test_classbased_technical_500(self):
with self.assertLogs("django.request", "ERROR"):
response = self.client.get("/classbased500/")
self.assertContains(
response,
'<th scope="row">Raised during:</th>'
"<td>view_tests.views.Raises500View</td>",
status_code=500,
html=True,
)
with self.assertLogs("django.request", "ERROR"):
response = self.client.get(
"/classbased500/", headers={"accept": "text/plain"}
)
self.assertContains(
response,
"Raised during: view_tests.views.Raises500View",
status_code=500,
)
def test_non_l10ned_numeric_ids(self):
"""
Numeric IDs and fancy traceback context blocks line numbers shouldn't
be localized.
"""
with self.settings(DEBUG=True):
with self.assertLogs("django.request", "ERROR"):
response = self.client.get("/raises500/")
# We look for a HTML fragment of the form
# '<div class="context" id="c38123208">',
# not '<div class="context" id="c38,123,208"'.
self.assertContains(response, '<div class="context" id="', status_code=500)
match = re.search(
b'<div class="context" id="(?P<id>[^"]+)">', response.content
)
self.assertIsNotNone(match)
id_repr = match["id"]
self.assertFalse(
re.search(b"[^c0-9]", id_repr),
"Numeric IDs in debug response HTML page shouldn't be localized "
"(value: %s)." % id_repr.decode(),
)
def test_template_exceptions(self):
with self.assertLogs("django.request", "ERROR"):
try:
self.client.get(reverse("template_exception"))
except Exception:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertNotEqual(
raising_loc.find('raise Exception("boom")'),
-1,
"Failed to find 'raise Exception' in last frame of "
"traceback, instead found: %s" % raising_loc,
)
@skipIf(
sys.platform == "win32",
"Raises OSError instead of TemplateDoesNotExist on Windows.",
)
def test_safestring_in_exception(self):
with self.assertLogs("django.request", "ERROR"):
response = self.client.get("/safestring_exception/")
self.assertNotContains(
response,
"<script>alert(1);</script>",
status_code=500,
html=True,
)
self.assertContains(
response,
"<script>alert(1);</script>",
count=3,
status_code=500,
html=True,
)
def test_template_loader_postmortem(self):
"""Tests for not existing file"""
template_name = "notfound.html"
with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile:
tempdir = os.path.dirname(tmpfile.name)
template_path = os.path.join(tempdir, template_name)
with (
override_settings(
TEMPLATES=[
{
"BACKEND": (
"django.template.backends.django.DjangoTemplates"
),
"DIRS": [tempdir],
}
]
),
self.assertLogs("django.request", "ERROR"),
):
response = self.client.get(
reverse(
"raises_template_does_not_exist", kwargs={"path": template_name}
)
)
self.assertContains(
response,
"%s (Source does not exist)" % template_path,
status_code=500,
count=2,
)
# Assert as HTML.
self.assertContains(
response,
"<li><code>django.template.loaders.filesystem.Loader</code>: "
"%s (Source does not exist)</li>"
% os.path.join(tempdir, "notfound.html"),
status_code=500,
html=True,
)
def test_no_template_source_loaders(self):
"""
Make sure if you don't specify a template, the debug view doesn't blow
up.
"""
with self.assertLogs("django.request", "ERROR"):
with self.assertRaises(TemplateDoesNotExist):
self.client.get("/render_no_template/")
@override_settings(ROOT_URLCONF="view_tests.default_urls")
def test_default_urlconf_template(self):
"""
Make sure that the default URLconf template is shown instead of the
technical 404 page, if the user has not altered their URLconf yet.
"""
response = self.client.get("/")
self.assertContains(
response, "<h1>The install worked successfully! Congratulations!</h1>"
)
@override_settings(
ROOT_URLCONF="view_tests.default_urls", FORCE_SCRIPT_NAME="/FORCED_PREFIX"
)
def test_default_urlconf_script_name(self):
response = self.client.request(**{"path": "/FORCED_PREFIX/"})
self.assertContains(
response, "<h1>The install worked successfully! Congratulations!</h1>"
)
@override_settings(ROOT_URLCONF="view_tests.default_urls")
def test_default_urlconf_technical_404(self):
response = self.client.get("/favicon.ico")
self.assertContains(
response,
"<code>\nadmin/\n[namespace='admin']\n</code>",
status_code=404,
html=True,
)
@override_settings(ROOT_URLCONF="view_tests.regression_21530_urls")
def test_regression_21530(self):
"""
Regression test for bug #21530.
If the admin app include is replaced with exactly one url
pattern, then the technical 404 template should be displayed.
The bug here was that an AttributeError caused a 500 response.
"""
response = self.client.get("/")
self.assertContains(
response, "Page not found <small>(404)</small>", status_code=404
)
def test_template_encoding(self):
"""
The templates are loaded directly, not via a template loader, and
should be opened as utf-8 charset as is the default specified on
template engines.
"""
with mock.patch.object(DebugPath, "open") as m:
default_urlconf(None)
m.assert_called_once_with(encoding="utf-8")
m.reset_mock()
technical_404_response(mock.MagicMock(), mock.Mock())
m.assert_called_once_with(encoding="utf-8")
def test_technical_404_converter_raise_404(self):
with mock.patch.object(IntConverter, "to_python", side_effect=Http404):
response = self.client.get("/path-post/1/")
self.assertContains(response, "Page not found", status_code=404)
def test_exception_reporter_from_request(self):
with self.assertLogs("django.request", "ERROR"):
response = self.client.get("/custom_reporter_class_view/")
self.assertContains(response, "custom traceback text", status_code=500)
@override_settings(
DEFAULT_EXCEPTION_REPORTER="view_tests.views.CustomExceptionReporter"
)
def test_exception_reporter_from_settings(self):
with self.assertLogs("django.request", "ERROR"):
response = self.client.get("/raises500/")
self.assertContains(response, "custom traceback text", status_code=500)
@override_settings(
DEFAULT_EXCEPTION_REPORTER="view_tests.views.TemplateOverrideExceptionReporter"
)
def test_template_override_exception_reporter(self):
with self.assertLogs("django.request", "ERROR"):
response = self.client.get("/raises500/")
self.assertContains(
response,
"<h1>Oh no, an error occurred!</h1>",
status_code=500,
html=True,
)
with self.assertLogs("django.request", "ERROR"):
response = self.client.get("/raises500/", headers={"accept": "text/plain"})
self.assertContains(response, "Oh dear, an error occurred!", status_code=500)
| DebugViewTests |
python | ray-project__ray | python/ray/llm/tests/serve/cpu/deployments/test_prefix_tree.py | {
"start": 16220,
"end": 23426
} | class ____:
def test_remove_single_leaf_node_pruned(self, tree: PrefixTree) -> None:
"""Test _remove_tenant_single_node for a leaf node; node should be pruned."""
tree.add_tenants(["tenant_1"], 0)
tree.insert("hello", "tenant_1", 1)
hello_node = tree.root.edge_label_to_child["h"]
assert hello_node.tenant_to_last_access_time == {"tenant_1": 1}
assert tree.tenant_to_char_count == {"tenant_1": 5}
assert tree.root.edge_label_to_child == {"h": hello_node}
removed_chars = tree._remove_tenant_single_node("tenant_1", hello_node)
assert removed_chars == 5
assert hello_node.tenant_to_last_access_time == {}
assert tree.tenant_to_char_count == {"tenant_1": 0}
assert tree.root.edge_label_to_child == {} # Node pruned
def test_remove_single_leaf_node_not_pruned(self, tree: PrefixTree) -> None:
"""Test _remove_tenant_single_node for a leaf node; node should not be pruned."""
tree.add_tenants(["tenant_1", "tenant_2"], 0)
tree.insert("hello", "tenant_1", 1)
tree.insert("hello", "tenant_2", 2)
hello_node = tree.root.edge_label_to_child["h"]
assert hello_node.tenant_to_last_access_time == {"tenant_1": 1, "tenant_2": 2}
assert tree.tenant_to_char_count == {"tenant_1": 5, "tenant_2": 5}
assert tree.root.edge_label_to_child == {"h": hello_node}
removed_chars = tree._remove_tenant_single_node("tenant_1", hello_node)
assert removed_chars == 5
assert hello_node.tenant_to_last_access_time == {"tenant_2": 2}
assert tree.tenant_to_char_count == {"tenant_1": 0, "tenant_2": 5}
assert tree.root.edge_label_to_child == {"h": hello_node} # Node not pruned
def test_remove_single_node_with_non_existent_tenant(
self, tree: PrefixTree
) -> None:
"""Test _remove_tenant_single_node for a non-existent tenant is a no-op."""
tree.add_tenants(["tenant_1"], 0)
tree.insert("hello", "tenant_1", 1)
hello_node = tree.root.edge_label_to_child["h"]
removed_chars = tree._remove_tenant_single_node(
"non_existent_tenant", hello_node
)
assert removed_chars == 0
def test_remove_single_node_with_non_matching_tenant(
self, tree: PrefixTree
) -> None:
"""Test _remove_tenant_single_node if node doesn't belong to specified tenant is a no-op."""
tree.add_tenants(["tenant_1", "tenant_2"], 0)
tree.insert("hello", "tenant_1", 1)
tree.insert("world", "tenant_2", 2) # Node for tenant_2
hello_node = tree.root.edge_label_to_child["h"] # Belongs to tenant_1
removed_chars = tree._remove_tenant_single_node(
"tenant_2", hello_node
) # Try removing tenant_2 from tenant_1's node
assert removed_chars == 0
def test_remove_tenant(self, tree: PrefixTree) -> None:
"""Test remove_tenant for a tree with multiple tenants only removes the specified tenant."""
tree.add_tenants(["tenant_1", "tenant_2"], 0)
tree.insert("hello", "tenant_1", 1)
tree.insert("foobar", "tenant_1", 2)
tree.insert("helloworld", "tenant_2", 3)
removed_chars = tree.remove_tenants(["tenant_1"])
assert removed_chars == {"tenant_1": 11}
hello_node = tree.root.edge_label_to_child["h"]
assert hello_node.tenant_to_last_access_time == {"tenant_2": 3}
assert tree.tenant_to_char_count == {"tenant_2": 10}
assert set(tree.tenant_to_lru_tail.keys()) == {"tenant_2"}
tenant_2_lru_texts = get_lru_texts_from_tree(tree, "tenant_2")
assert tenant_2_lru_texts == ["", "world", "hello"]
def test_remove_non_existent_tenant(self, tree: PrefixTree) -> None:
"""Test remove_tenant for a non-existent tenant returns 0."""
tree.add_tenants(["tenant_1"], 0)
tree.insert("hello", "tenant_1", 1)
removed_chars = tree.remove_tenants(["non_existent_tenant"])
assert removed_chars == {"non_existent_tenant": 0}
def test_remove_tenant_prunes_nodes(self, tree: PrefixTree) -> None:
"""Test remove_tenant prunes nodes that become tenant-less and childless."""
tree.add_tenants(["tenant_1", "tenant_2"], 0)
tree.insert("helloworld", "tenant_1", 1) # Creates "helloworld"
tree.insert(
"hellothere", "tenant_2", 2
) # Splits into "hello" -> "world" and "hello" -> "there"
tree.remove_tenants(["tenant_1"])
# "world" node should be pruned. "hello" and "there" remain for tenant_2.
hello_node = tree.root.edge_label_to_child["h"]
assert set(hello_node.edge_label_to_child.keys()) == {"t"}
assert hello_node.edge_label_to_child["t"].text == "there"
assert hello_node.edge_label_to_child["t"].tenant_to_last_access_time == {
"tenant_2": 2
}
def test_remove_tenants(self, tree: PrefixTree) -> None:
"""Test remove_tenants for multiple tenants with different structures."""
tree.add_tenants(["tenant_1", "tenant_2", "tenant_3"], 0)
tree.insert("hello", "tenant_1", 1) # 5 chars
tree.insert("foobar", "tenant_1", 2) # 6 chars
tree.insert("helloworld", "tenant_2", 3) # 10 chars
tree.insert("test", "tenant_3", 4) # 4 chars
removed_chars = tree.remove_tenants(["tenant_1", "tenant_3"])
# Check return value contains correct char counts
assert removed_chars == {"tenant_1": 11, "tenant_3": 4}
# Check tree state is correct
assert "tenant_1" not in tree.tenant_to_char_count
assert "tenant_3" not in tree.tenant_to_char_count
assert "tenant_2" in tree.tenant_to_char_count
assert tree.tenant_to_char_count == {"tenant_2": 10}
# Check nodes are correctly maintained
assert (
"h" in tree.root.edge_label_to_child
) # hello node still exists for tenant_2
assert "t" not in tree.root.edge_label_to_child # test node removed
assert "f" not in tree.root.edge_label_to_child # foobar node removed
# Check LRU structure
assert set(tree.tenant_to_lru_tail.keys()) == {"tenant_2"}
tenant_2_lru_texts = get_lru_texts_from_tree(tree, "tenant_2")
assert tenant_2_lru_texts == ["", "world", "hello"]
def test_remove_tenants_with_nonexistent(self, tree: PrefixTree) -> None:
"""Test remove_tenants with a mix of existing and non-existent tenants."""
tree.add_tenants(["tenant_1", "tenant_2"], 0)
tree.insert("hello", "tenant_1", 1)
tree.insert("world", "tenant_2", 2)
removed_chars = tree.remove_tenants(["tenant_1", "nonexistent", "alsonotfound"])
# Check return value
assert removed_chars == {"tenant_1": 5, "nonexistent": 0, "alsonotfound": 0}
# Check tree state
assert "tenant_1" not in tree.tenant_to_char_count
assert tree.tenant_to_char_count == {"tenant_2": 5}
assert "h" not in tree.root.edge_label_to_child # hello node removed
assert "w" in tree.root.edge_label_to_child # world node still exists
| TestPrefixTreeRemove |
python | zarr-developers__zarr-python | src/zarr/codecs/bytes.py | {
"start": 682,
"end": 4051
} | class ____(ArrayBytesCodec):
"""bytes codec"""
is_fixed_size = True
endian: Endian | None
def __init__(self, *, endian: Endian | str | None = default_system_endian) -> None:
endian_parsed = None if endian is None else parse_enum(endian, Endian)
object.__setattr__(self, "endian", endian_parsed)
@classmethod
def from_dict(cls, data: dict[str, JSON]) -> Self:
_, configuration_parsed = parse_named_configuration(
data, "bytes", require_configuration=False
)
configuration_parsed = configuration_parsed or {}
return cls(**configuration_parsed) # type: ignore[arg-type]
def to_dict(self) -> dict[str, JSON]:
if self.endian is None:
return {"name": "bytes"}
else:
return {"name": "bytes", "configuration": {"endian": self.endian.value}}
def evolve_from_array_spec(self, array_spec: ArraySpec) -> Self:
if not isinstance(array_spec.dtype, HasEndianness):
if self.endian is not None:
return replace(self, endian=None)
elif self.endian is None:
raise ValueError(
"The `endian` configuration needs to be specified for multi-byte data types."
)
return self
async def _decode_single(
self,
chunk_bytes: Buffer,
chunk_spec: ArraySpec,
) -> NDBuffer:
assert isinstance(chunk_bytes, Buffer)
# TODO: remove endianness enum in favor of literal union
endian_str = self.endian.value if self.endian is not None else None
if isinstance(chunk_spec.dtype, HasEndianness):
dtype = replace(chunk_spec.dtype, endianness=endian_str).to_native_dtype() # type: ignore[call-arg]
else:
dtype = chunk_spec.dtype.to_native_dtype()
as_array_like = chunk_bytes.as_array_like()
if isinstance(as_array_like, NDArrayLike):
as_nd_array_like = as_array_like
else:
as_nd_array_like = np.asanyarray(as_array_like)
chunk_array = chunk_spec.prototype.nd_buffer.from_ndarray_like(
as_nd_array_like.view(dtype=dtype)
)
# ensure correct chunk shape
if chunk_array.shape != chunk_spec.shape:
chunk_array = chunk_array.reshape(
chunk_spec.shape,
)
return chunk_array
async def _encode_single(
self,
chunk_array: NDBuffer,
chunk_spec: ArraySpec,
) -> Buffer | None:
assert isinstance(chunk_array, NDBuffer)
if (
chunk_array.dtype.itemsize > 1
and self.endian is not None
and self.endian != chunk_array.byteorder
):
# type-ignore is a numpy bug
# see https://github.com/numpy/numpy/issues/26473
new_dtype = chunk_array.dtype.newbyteorder(self.endian.name) # type: ignore[arg-type]
chunk_array = chunk_array.astype(new_dtype)
nd_array = chunk_array.as_ndarray_like()
# Flatten the nd-array (only copy if needed) and reinterpret as bytes
nd_array = nd_array.ravel().view(dtype="B")
return chunk_spec.prototype.buffer.from_array_like(nd_array)
def compute_encoded_size(self, input_byte_length: int, _chunk_spec: ArraySpec) -> int:
return input_byte_length
| BytesCodec |
python | huggingface__transformers | src/transformers/models/cohere/modular_cohere.py | {
"start": 5753,
"end": 8659
} | class ____(LlamaAttention):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: CohereConfig, layer_idx: Optional[int] = None):
super().__init__(config, layer_idx)
self.use_qk_norm = config.use_qk_norm
if self.use_qk_norm:
# When sharding the model using Tensor Parallelism, need to be careful to use n_local_heads
self.q_norm = CohereLayerNorm(
hidden_size=(config.num_attention_heads, self.head_dim), eps=config.layer_norm_eps
)
self.k_norm = CohereLayerNorm(
hidden_size=(config.num_key_value_heads, self.head_dim), eps=config.layer_norm_eps
)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape)
key_states = self.k_proj(hidden_states).view(hidden_shape)
value_states = self.v_proj(hidden_states).view(hidden_shape)
if self.use_qk_norm: # main diff from Llama
query_states = self.q_norm(query_states)
key_states = self.k_norm(key_states)
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; position_ids needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| CohereAttention |
python | sympy__sympy | sympy/solvers/diophantine/diophantine.py | {
"start": 29759,
"end": 30323
} | class ____(DiophantineEquationType):
"""
Representation of an inhomogeneous general quadratic.
No solver is currently implemented for this equation type.
"""
name = 'inhomogeneous_general_quadratic'
def matches(self):
if not (self.total_degree == 2 and self.dimension >= 3):
return False
if not self.homogeneous_order:
return True
# there may be Pow keys like x**2 or Mul keys like x*y
return any(k.is_Mul for k in self.coeff) and not self.homogeneous
| InhomogeneousGeneralQuadratic |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.