language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | PyCQA__pyflakes | pyflakes/checker.py | {
"start": 16972,
"end": 17098
} | class ____(Scope):
"""Scope for a module."""
_futures_allowed = True
_annotations_future_enabled = False
| ModuleScope |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowing5.py | {
"start": 532,
"end": 753
} | class ____(Generic[T]): ...
def func3(val: A[Any]):
x: A[int] = val
reveal_type(x, expected_text="A[int]")
def func4(val: A[list[Any]]):
x: A[list[int]] = val
reveal_type(x, expected_text="A[list[int]]")
| A |
python | coleifer__peewee | tests/mysql_ext.py | {
"start": 634,
"end": 763
} | class ____(TestModel):
first = CharField()
last = CharField()
dob = DateField(default=datetime.date(2000, 1, 1))
| Person |
python | getsentry__sentry | src/sentry/relay/config/measurements.py | {
"start": 525,
"end": 2600
} | class ____(TypedDict):
name: str
unit: MeasurementUnit
#: List used to distinguish between user-defined and built-in measurements.
#: NOTE: This is redundant with `ALL_MEASUREMENT_METRICS`, which can be removed
#: once all Relay instances understand the new format.
BUILTIN_MEASUREMENTS: Sequence[BuiltinMeasurementKey] = [
{"name": "app_start_cold", "unit": "millisecond"},
{"name": "app_start_warm", "unit": "millisecond"},
{"name": "cls", "unit": "none"},
{"name": "connection.rtt", "unit": "millisecond"},
{"name": "fcp", "unit": "millisecond"},
{"name": "fid", "unit": "millisecond"},
{"name": "fp", "unit": "millisecond"},
{"name": "frames_frozen_rate", "unit": "ratio"},
{"name": "frames_frozen", "unit": "none"},
{"name": "frames_slow_rate", "unit": "ratio"},
{"name": "frames_slow", "unit": "none"},
{"name": "frames_total", "unit": "none"},
{"name": "inp", "unit": "millisecond"},
{"name": "lcp", "unit": "millisecond"},
{"name": "stall_count", "unit": "none"},
{"name": "stall_longest_time", "unit": "millisecond"},
{"name": "stall_percentage", "unit": "ratio"},
{"name": "stall_total_time", "unit": "millisecond"},
{"name": "ttfb.requesttime", "unit": "millisecond"},
{"name": "ttfb", "unit": "millisecond"},
{"name": "time_to_full_display", "unit": "millisecond"},
{"name": "time_to_initial_display", "unit": "millisecond"},
{"name": "score.cls", "unit": "ratio"},
{"name": "score.fcp", "unit": "ratio"},
{"name": "score.fid", "unit": "ratio"},
{"name": "score.lcp", "unit": "ratio"},
{"name": "score.ttfb", "unit": "ratio"},
{"name": "score.inp", "unit": "ratio"},
{"name": "score.total", "unit": "ratio"},
{"name": "score.weight.cls", "unit": "ratio"},
{"name": "score.weight.fcp", "unit": "ratio"},
{"name": "score.weight.fid", "unit": "ratio"},
{"name": "score.weight.lcp", "unit": "ratio"},
{"name": "score.weight.ttfb", "unit": "ratio"},
{"name": "score.weight.inp", "unit": "ratio"},
]
| BuiltinMeasurementKey |
python | fastai__fastai | fastai/callback/tensorboard.py | {
"start": 665,
"end": 1685
} | class ____(Callback):
order = Recorder.order+1
"Base class for tensorboard callbacks"
def __init__(self): self.run_projector = False
def after_pred(self):
if self.run_projector: self.feat = _add_projector_features(self.learn, self.h, self.feat)
def after_validate(self):
if not self.run_projector: return
self.run_projector = False
self._remove()
_write_projector_embedding(self.learn, self.writer, self.feat)
def after_fit(self):
if self.run: self.writer.close()
def _setup_projector(self):
self.run_projector = True
self.h = hook_output(self.learn.model[1][1] if not self.layer else self.layer)
self.feat = {}
def _setup_writer(self): self.writer = SummaryWriter(log_dir=self.log_dir)
def __del__(self): self._remove()
def _remove(self):
if getattr(self, 'h', None): self.h.remove()
# %% ../../nbs/70a_callback.tensorboard.ipynb 21
| TensorBoardBaseCallback |
python | sqlalchemy__sqlalchemy | test/orm/test_froms.py | {
"start": 11278,
"end": 13793
} | class ____(QueryTest, AssertsCompiledSQL):
"""compare a bunch of select() tests with the equivalent Query using
straight table/columns.
Results should be the same as Query should act as a select() pass-
thru for ClauseElement entities.
"""
__dialect__ = "default"
def test_select(self):
addresses, users = self.tables.addresses, self.tables.users
sess = fixture_session()
self.assert_compile(
sess.query(users)
.select_from(users.select().subquery())
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.statement,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM "
"(SELECT users.id AS id, users.name AS name FROM users) "
"AS anon_1, users",
)
self.assert_compile(
sess.query(users, exists(text("1")).select_from(addresses))
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.statement,
"SELECT users.id AS users_id, users.name AS users_name, EXISTS "
"(SELECT 1 FROM addresses) AS anon_1 FROM users",
)
# a little tedious here, adding labels to work around Query's
# auto-labelling.
s = (
sess.query(
addresses.c.id.label("id"),
addresses.c.email_address.label("email"),
)
.filter(addresses.c.user_id == users.c.id)
.correlate(users)
.statement.alias()
)
self.assert_compile(
sess.query(users, s.c.email)
.select_from(users.join(s, s.c.id == users.c.id))
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.statement,
"SELECT users.id AS users_id, users.name AS users_name, "
"anon_1.email AS anon_1_email "
"FROM users JOIN (SELECT addresses.id AS id, "
"addresses.email_address AS email FROM addresses, users "
"WHERE addresses.user_id = users.id) AS anon_1 "
"ON anon_1.id = users.id",
)
x = func.lala(users.c.id).label("foo")
self.assert_compile(
sess.query(x).filter(x == 5).statement,
"SELECT lala(users.id) AS foo FROM users WHERE "
"lala(users.id) = :param_1",
)
self.assert_compile(
sess.query(func.sum(x).label("bar")).statement,
"SELECT sum(lala(users.id)) AS bar FROM users",
)
| RawSelectTest |
python | weaviate__weaviate-python-client | weaviate/client_executor.py | {
"start": 781,
"end": 10944
} | class ____(Generic[ConnectionType]):
_connection_type: Type[ConnectionType]
def __init__(
self,
connection_params: Optional[ConnectionParams] = None,
embedded_options: Optional[EmbeddedOptions] = None,
auth_client_secret: Optional[AuthCredentials] = None,
additional_headers: Optional[dict] = None,
additional_config: Optional[AdditionalConfig] = None,
skip_init_checks: bool = False,
) -> None:
"""Initialise a WeaviateClient/WeaviateClientAsync class instance to use when interacting with Weaviate.
Use this specific initializer when you want to create a custom Client specific to your Weaviate setup.
To simplify connections to Weaviate Cloud or local instances, use the weaviate.connect_to_weaviate_cloud
or weaviate.connect_to_local helper functions.
Args:
connection_params: The connection parameters to use for the underlying HTTP requests.
embedded_options: The options to use when provisioning an embedded Weaviate instance.
auth_client_secret: Authenticate to weaviate by using one of the given authentication modes:
- `weaviate.auth.AuthBearerToken` to use existing access and (optionally, but recommended) refresh tokens
- `weaviate.auth.AuthClientPassword` to use username and password for oidc Resource Owner Password flow
- `weaviate.auth.AuthClientCredentials` to use a client secret for oidc client credential flow
additional_headers: Additional headers to include in the requests. Can be used to set OpenAI/HuggingFace/Cohere etc. keys.
[Here](https://weaviate.io/developers/weaviate/modules/reader-generator-modules/generative-openai#providing-the-key-to-weaviate) is an
example of how to set API keys within this parameter.
additional_config: Additional and advanced configuration options for Weaviate.
skip_init_checks: If set to `True` then the client will not perform any checks including ensuring that weaviate has started.
This is useful for air-gapped environments and high-performance setups.
"""
connection_params, embedded_db = self.__parse_connection_params_and_embedded_db(
connection_params, embedded_options
)
config = additional_config or AdditionalConfig()
self._connection = self._connection_type( # pyright: ignore reportIncompatibleVariableOverride
connection_params=connection_params,
auth_client_secret=auth_client_secret,
timeout_config=config.timeout,
additional_headers=additional_headers,
embedded_db=embedded_db,
connection_config=config.connection,
proxies=config.proxies,
trust_env=config.trust_env,
skip_init_checks=skip_init_checks,
)
self.integrations = _Integrations(self._connection)
def __parse_connection_params_and_embedded_db(
self,
connection_params: Optional[ConnectionParams],
embedded_options: Optional[EmbeddedOptions],
) -> Tuple[ConnectionParams, Optional[EmbeddedV4]]:
if connection_params is None and embedded_options is None:
raise TypeError("Either connection_params or embedded_options must be present.")
elif connection_params is not None and embedded_options is not None:
raise TypeError(
f"connection_params is not expected to be set when using embedded_options but connection_params was {connection_params}"
)
if embedded_options is not None:
_validate_input(
_ValidateArgument([EmbeddedOptions], "embedded_options", embedded_options)
)
embedded_db = EmbeddedV4(options=embedded_options)
embedded_db.start()
return (
ConnectionParams(
http=ProtocolParams(
host="localhost", port=embedded_db.options.port, secure=False
),
grpc=ProtocolParams(
host="localhost", port=embedded_options.grpc_port, secure=False
),
),
embedded_db,
)
if not isinstance(connection_params, ConnectionParams):
raise TypeError(
f"connection_params is expected to be a ConnectionParams object but is {type(connection_params)}"
)
return connection_params, None
async def __close_async(self) -> None:
await executor.aresult(self._connection.close("async"))
def close(self) -> executor.Result[None]:
"""In order to clean up any resources used by the client, call this method when you are done with it.
If you do not do this, memory leaks may occur due to stale connections.
This method also closes the embedded database if one was started.
"""
if isinstance(self._connection, ConnectionAsync):
return self.__close_async()
return executor.result(self._connection.close("sync"))
def connect(self) -> executor.Result[None]:
"""Connect to the Weaviate instance performing all the necessary checks.
If you have specified `skip_init_checks` in the constructor then this method will not perform any runtime checks
to ensure that Weaviate is running and ready to accept requests. This is useful for air-gapped environments and high-performance setups.
This method is idempotent and will only perform the checks once. Any subsequent calls do nothing while `client.is_connected() == True`.
Raises:
weaviate.exceptions.WeaviateConnectionError: If the network connection to weaviate fails.
weaviate.exceptions.UnexpectedStatusCodeError: If weaviate reports a none OK status.
"""
return executor.execute(
response_callback=lambda _: None,
method=self._connection.connect,
)
def is_live(self) -> executor.Result[bool]:
def resp(res: Response) -> bool:
return res.status_code == 200
def exc(e: Exception) -> bool:
print(e)
return False
return executor.execute(
response_callback=resp,
exception_callback=exc,
method=self._connection.get,
path="/.well-known/live",
)
def is_ready(self) -> executor.Result[bool]:
def resp(res: Response) -> bool:
return res.status_code == 200
def exc(e: Exception) -> bool:
print(e)
return False
return executor.execute(
response_callback=resp,
exception_callback=exc,
method=self._connection.get,
path="/.well-known/ready",
)
def graphql_raw_query(self, gql_query: str) -> executor.Result[_RawGQLReturn]:
"""Allows to send graphQL string queries, this should only be used for weaviate-features that are not yet supported.
Be cautious of injection risks when generating query strings.
Args:
gql_query: GraphQL query as a string.
Returns:
A dict with the response from the GraphQL query.
Raises:
TypeError: If `gql_query` is not of type str.
weaviate.exceptions.WeaviateConnectionError: If the network connection to weaviate fails.
weaviate.exceptions.UnexpectedStatusCodeError: If weaviate reports a none OK status.
"""
_validate_input(_ValidateArgument([str], "gql_query", gql_query))
json_query = {"query": gql_query}
def resp(response: Response) -> _RawGQLReturn:
res = _decode_json_response_dict(response, "GQL query")
assert res is not None
errors: Optional[Dict[str, Any]] = res.get("errors")
data_raw: Optional[Dict[str, _GQLEntryReturnType]] = res.get("data")
if data_raw is not None:
return _RawGQLReturn(
aggregate=data_raw.get("Aggregate", {}),
explore=data_raw.get("Explore", {}),
get=data_raw.get("Get", {}),
errors=errors,
)
return _RawGQLReturn(aggregate={}, explore={}, get={}, errors=errors)
def exc(e: Exception) -> _RawGQLReturn:
raise e
return executor.execute(
response_callback=resp,
exception_callback=exc,
method=self._connection.post,
path="/graphql",
weaviate_object=json_query,
error_msg="Raw GQL query failed",
status_codes=_ExpectedStatusCodes(ok_in=[200], error="GQL query"),
is_gql_query=True,
)
def get_meta(self) -> executor.Result[dict]:
"""Get the meta endpoint description of weaviate.
Returns:
The `dict` describing the weaviate configuration.
Raises:
weaviate.exceptions.UnexpectedStatusCodeError: If Weaviate reports a none OK status.
"""
return executor.execute(
response_callback=executor.do_nothing,
method=self._connection.get_meta,
)
def get_open_id_configuration(
self,
) -> executor.Result[Optional[Dict[str, Any]]]:
"""Get the openid-configuration.
Returns:
The configuration or `None` if not configured.
Raises:
weaviate.exceptions.UnexpectedStatusCodeError: If Weaviate reports a none OK status.
"""
return executor.execute(
response_callback=executor.do_nothing,
method=self._connection.get_open_id_configuration,
)
@executor.no_wrapping
def is_connected(self) -> bool:
"""Check if the client is connected to Weaviate.
Returns:
`True` if the client is connected to Weaviate with an open connection pool, `False` otherwise.
"""
return self._connection.is_connected()
| _WeaviateClientExecutor |
python | pennersr__django-allauth | allauth/account/views.py | {
"start": 20162,
"end": 21530
} | class ____(NextRedirectMixin, AjaxCapableProcessFormViewMixin, FormView):
template_name = "account/password_reset." + app_settings.TEMPLATE_EXTENSION
form_class = ResetPasswordForm
success_url = reverse_lazy("account_reset_password_done")
def get_success_url(self):
if not app_settings.PASSWORD_RESET_BY_CODE_ENABLED:
return super().get_success_url()
return self.passthrough_next_url(reverse("account_confirm_password_reset_code"))
def get_form_class(self):
return get_form_class(app_settings.FORMS, "reset_password", self.form_class)
def form_valid(self, form):
r429 = ratelimit.consume_or_429(
self.request,
action="reset_password",
key=form.cleaned_data["email"].lower(),
)
if r429:
return r429
form.save(self.request)
return super().form_valid(form)
def get_context_data(self, **kwargs):
ret = super().get_context_data(**kwargs)
login_url = self.passthrough_next_url(reverse("account_login"))
# NOTE: For backwards compatibility
ret["password_reset_form"] = ret.get("form")
# (end NOTE)
ret.update({"login_url": login_url})
return ret
password_reset = PasswordResetView.as_view()
@method_decorator(login_not_required, name="dispatch")
| PasswordResetView |
python | tensorflow__tensorflow | tensorflow/python/distribute/parallel_device/parallel_device_test.py | {
"start": 4538,
"end": 19610
} | class ____(_VirtualDeviceTestCase, parameterized.TestCase):
def test_register_parallel_device(self):
with self.device:
c = constant_op.constant(1.)
d = constant_op.constant(2.)
e = c + d
outputs = self.device.unpack(e)
self.assertAllClose([3., 3.], outputs)
self.assertIn(self.device.components[0], outputs[0].backing_device)
self.assertIn(self.device.components[1], outputs[1].backing_device)
def test_no_implicit_copyon(self):
a1 = constant_op.constant(1.)
a2 = constant_op.constant(2.)
with self.device:
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"First pack non-parallel tensors for each device"):
a1 + a2 # pylint:disable=pointless-statement
def test_error_message_length(self):
x = array_ops.ones([3, 3, 3, 3, 3, 3])
with self.device:
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r"TensorHandle\((.|\n){1,150}\[...\], shape="):
array_ops.identity(x)
def test_one_replica_eager_control_flow(self):
device = parallel_device.ParallelDevice(components=[
"/job:localhost/device:{}:0".format(self.device_type),
])
x = constant_op.constant([2, 3, 4])
with device:
x = device.pack([x])
if math_ops.reduce_any(math_ops.equal(x, constant_op.constant(4))):
y = constant_op.constant(1)
else:
y = constant_op.constant(2)
self.assertAllEqual([1], device.unpack(y))
@parameterized.named_parameters(
("variable", variables.Variable),
("tensor", lambda x: x))
def test_string_representation(self, transform):
x = self.device.pack(
[constant_op.constant([5., 6.]),
constant_op.constant([6., 7.])])
with self.device:
x = transform(x)
parallel_str = str(x)
self.assertIn("5", parallel_str)
self.assertIn("7", parallel_str)
self.assertIn(self.device_type + ":0", parallel_str)
self.assertIn(self.device_type + ":1", parallel_str)
parallel_repr = repr(x)
self.assertIn("5", parallel_repr)
self.assertIn("7", parallel_repr)
self.assertIn(self.device_type + ":0", parallel_repr)
self.assertIn(self.device_type + ":1", parallel_repr)
def test_device_id(self):
device_ids = self.device.unpack(self.device.device_ids)
self.assertAllClose([0, 1], device_ids)
# TODO(allenl): Should device IDs be int64 so they can be placed on GPUs?
# Currently backing_device is CPU.
self.assertIn(self.device.components[0], device_ids[0].device)
self.assertIn(self.device.components[1], device_ids[1].device)
def test_zeros(self):
with self.device:
x = array_ops.zeros([array_ops.identity(constant_op.constant(10))])
for component in self.device.unpack(x):
self.assertAllClose([0.] * 10, component)
def test_generator(self):
with self.device:
g_same = stateful_random_ops.Generator.from_seed(0)
g_different = stateful_random_ops.Generator.from_seed(
self.device.device_ids)
same = g_same.normal([10])
different = g_different.normal([10])
same_unpacked = self.device.unpack(same)
different_unpacked = self.device.unpack(different)
for same_component, different_component in zip(same_unpacked[1:],
different_unpacked[1:]):
self.assertAllClose(same_component, same_unpacked[0])
self.assertNotAllClose(different_component, different_unpacked[0])
def test_collective_reduce(self):
x = self.device.pack(
[constant_op.constant(-1.5),
constant_op.constant(3.5)])
with self.device:
reduced = _collective_sum(x, num_replicas=2)
outputs = self.device.unpack(reduced)
self.assertAllClose([2., 2.], outputs)
self.assertIn(self.device.components[0], outputs[0].backing_device)
self.assertIn(self.device.components[1], outputs[1].backing_device)
def test_collective_reduce_in_function(self):
x = self.device.pack(
[constant_op.constant(-1.5),
constant_op.constant(3.5)])
with self.device:
@def_function.function
def reduce(t):
return _collective_sum(t, num_replicas=2)
reduced = reduce(x)
outputs = self.device.unpack(reduced)
self.assertAllClose([2., 2.], outputs)
self.assertIn(self.device.components[0], outputs[0].backing_device)
self.assertIn(self.device.components[1], outputs[1].backing_device)
def test_collective_reduce_async_scope(self):
# Note that ops on the parallel device currently don't execute
# asynchronously. The test is just that we don't get deadlocks.
x = self.device.pack(
[constant_op.constant(-1.5),
constant_op.constant(3.5)])
with context.async_scope(), self.device:
reduced = _collective_sum(x, num_replicas=2)
outputs = self.device.unpack(reduced)
self.assertAllClose([2., 2.], outputs)
self.assertIn(self.device.components[0], outputs[0].backing_device)
self.assertIn(self.device.components[1], outputs[1].backing_device)
def test_collective_reduce_async_context(self):
previous = config.get_synchronous_execution()
try:
context._reset_context()
config.set_synchronous_execution(False)
self.setUp()
# Note that ops on the parallel device currently don't execute
# asynchronously. The test is just that we don't get deadlocks.
x = self.device.pack(
[constant_op.constant(-1.5),
constant_op.constant(3.5)])
with self.device:
reduced = _collective_sum(x, num_replicas=2)
outputs = self.device.unpack(reduced)
self.assertAllClose([2., 2.], outputs)
self.assertIn(self.device.components[0], outputs[0].backing_device)
self.assertIn(self.device.components[1], outputs[1].backing_device)
finally:
context._reset_context()
config.set_synchronous_execution(previous)
def test_collective_broadcast_in_function(self):
if self.device_type == "TPU":
self.skipTest("ParallelDevice broadcast collectives on TPUs need work")
@def_function.function
def broadcast_send_recv(device_id):
c = constant_op.constant([2])
@def_function.function
def send():
s0 = collective_ops.broadcast_send(
c * 3, c.shape, c.dtype, group_size=2, group_key=1, instance_key=1)
with ops.control_dependencies([s0.op]):
return array_ops.identity(c)
@def_function.function
def recv():
r0 = collective_ops.broadcast_recv(
c.shape, c.dtype, group_size=2, group_key=1, instance_key=1)
return r0
return control_flow_switch_case.switch_case(
device_id, branch_fns={
0: send,
1: recv
})
with self.device:
result = broadcast_send_recv(self.device.device_ids)
self.assertAllClose([[2], [6]], self.device.unpack(result))
def test_use_in_graph_error_is_informative(self):
@def_function.function
def uses_parallel():
with self.device:
return self.device.unpack(array_ops.ones([]))
with self.assertRaisesRegex(NotImplementedError, "inside `tf.function`"):
uses_parallel()
def test_checkpointing(self):
self.skipTest("b/216201668: revisit parallel device and checkpointing.")
prefix = os.path.join(self.get_temp_dir(), "ckpt")
different_values = self.device.pack(
[constant_op.constant(-1.),
constant_op.constant(3.)])
with self.device:
v = variables.Variable(different_values)
checkpoint = tracking.Checkpoint(v=v)
save_path = checkpoint.save(prefix)
with self.device:
v.assign(constant_op.constant(0.))
checkpoint.restore(save_path).assert_consumed()
with self.device:
outputs = self.device.unpack(v)
self.assertAllClose([-1., 3.], outputs)
with self.device:
restore_on_create = tracking.Checkpoint()
restore_on_create.restore(save_path)
restore_on_create.v = variables.Variable(0.)
outputs = self.device.unpack(restore_on_create.v)
self.assertAllClose([-1., 3.], outputs)
# Changing the number of devices / restoring into a single-device copy is OK
single_device = tracking.Checkpoint(v=variables.Variable(0.))
status = single_device.restore(save_path)
status.assert_existing_objects_matched()
self.assertAllClose(-1., single_device.v)
with self.assertRaisesRegex(AssertionError, "parallel_component_1"):
# There are parts of the variable that aren't restored into a
# single-device copy.
status.assert_consumed()
def test_pack_composite(self):
if self.device_type != "CPU":
self.skipTest("Iterator GetNext doesn't work on accelerators.")
datasets = [
dataset_ops.Dataset.from_tensor_slices(
[i + 1, (i + 1) * 2, (i + 1) * 3])
for i in range(len(self.device.components))]
parallel_dataset = self.device.pack(datasets)
with self.device:
iterator = iter(parallel_dataset)
parallel_sample = next(iterator)
component_iterators = self.device.unpack(iterator)
self.assertEqual(2, next(component_iterators[0]).numpy())
self.assertEqual(1, self.device.unpack(parallel_sample)[0].numpy())
self.assertEqual(4, next(component_iterators[1]).numpy())
self.assertEqual(2, self.device.unpack(parallel_sample)[1].numpy())
def test_pack_structure(self):
x_parts = [{"a": constant_op.constant(float(i))}
for i in range(len(self.device.components))]
x = self.device.pack(x_parts)
self.assertAllClose([{"a": 0.}, {"a": 1.}], self.device.unpack(x))
def test_pack_variable_value(self):
x_parts = [variables.Variable(i)
for i in range(len(self.device.components))]
x = self.device.pack(x_parts)
with self.device:
x1 = self.device.pack(x_parts)
for v in x_parts:
v.assign(-10) # Mutating the variable does not affect previous reads.
self.assertAllClose([0, 1], self.device.unpack(x))
self.assertAllClose([0, 1], self.device.unpack(x1))
def test_unpack_variable_value(self):
x_parts = [constant_op.constant(i)
for i in range(len(self.device.components))]
x = self.device.pack(x_parts)
with self.device:
v = variables.Variable(x)
v_unpacked = self.device.unpack(v)
v.assign(-10) # Mutating the variable does not affect previous reads.
self.assertAllClose([0, 1], v_unpacked)
def test_saved_model(self):
self.skipTest("b/216201668: revisit parallel device and saved model")
different_values = self.device.pack(
[constant_op.constant(-1.),
constant_op.constant(3.)])
with self.device:
m = module.Module()
m.v = variables.Variable(different_values)
m.f = def_function.function(lambda: m.v * 2.)
self.assertAllClose([-2., 6.], self.device.unpack(m.f()))
saved_model_path = os.path.join(self.get_temp_dir(), "saved_model")
save.save(m, saved_model_path)
context._reset_context()
self.setUp()
single_device_loaded = load.load(saved_model_path)
self.assertAllClose(-2., single_device_loaded.f())
assign_value = self.device.pack(
[constant_op.constant(.1), constant_op.constant(.2)])
with self.device:
parallel_loaded = load.load(saved_model_path)
self.assertAllClose([-2., 6.], self.device.unpack(parallel_loaded.f()))
self.assertAllClose([-1., 3.], self.device.unpack(parallel_loaded.v))
parallel_loaded.v.assign(assign_value)
self.assertAllClose([.2, .4], self.device.unpack(parallel_loaded.f()))
def _assert_close_to_non_parallel(self, computation):
"""Asserts that replication of `computation` works and is equivalent."""
with self.device:
parallel_result = computation()
non_parallel_result = computation()
# The computations should have the same number and structure of Tensor
# objects, even though the tensors themselves will be on different devices
# and represent different numbers of values.
nest.assert_same_structure(parallel_result, non_parallel_result)
non_parallel_flat = nest.flatten(non_parallel_result)
parallel_flat = nest.flatten(parallel_result)
self.assertGreater(len(parallel_flat), 0)
for non_parallel, parallel in zip(non_parallel_flat, parallel_flat):
self.assertEqual(self.device._name, parallel.device)
self.assertNotEqual(self.device._name, non_parallel.device)
for parallel_component in self.device.unpack(parallel):
self.assertAllClose(non_parallel, parallel_component)
def test_capturing(self):
with self.device:
x = constant_op.constant([1., 2.])
x = array_ops.identity(x)
@def_function.function
def f(y):
return x + y
y = array_ops.ones([2])
parallel_result = f(y)
self.assertAllClose([[2., 3.]] * 2, self.device.unpack(parallel_result))
def test_euclidean_norm(self):
def _test_fn():
with backprop.GradientTape() as tape:
x = array_ops.ones([5, 5])
tape.watch(x)
y = math_ops.reduce_euclidean_norm(x, axis=constant_op.constant(1))
return y, tape.gradient(y, x)
self._assert_close_to_non_parallel(_test_fn)
def test_reduce_sum(self):
def _test_fn():
with backprop.GradientTape() as tape:
x = array_ops.ones([5, 5])
tape.watch(x)
y = math_ops.reduce_sum(x, axis=constant_op.constant(1))
return y, tape.gradient(y, x)
self._assert_close_to_non_parallel(_test_fn)
def test_variable_created_in_function(self):
captured_value = constant_op.constant(2.)
class M(module.Module):
def __init__(self):
self.v = None
self.w = None
self.x = None
self.z = None
@def_function.function(autograph=False)
def __call__(self, x):
if self.v is None:
with ops.init_scope():
initial_value = constant_op.constant(2.)
self.z = variables.Variable(initial_value)
self.x = variables.Variable(captured_value)
self.w = variables.Variable(lambda: constant_op.constant(2.))
self.v = variables.Variable(constant_op.constant(2.))
return x * self.v * self.w * self.x * self.z
with self.device:
m = M()
packed_outputs = m(array_ops.ones([]))
outputs = self.device.unpack(packed_outputs)
self.assertAllClose([16., 16.], outputs)
def test_different_shapes(self):
x = self.device.pack(
[constant_op.constant([1., 2.]),
constant_op.constant([5.])])
with self.device:
y = x * 2.
self.assertEqual([None], y.shape.as_list())
self.assertAllClose([[2., 4.], [10.]], self.device.unpack(y))
different_axes = self.device.pack(
[constant_op.constant([1., 2.]),
constant_op.constant([[5.]])])
with self.assertRaisesRegex(Exception,
"components do not all have the same rank"):
different_axes.shape # pylint: disable=pointless-statement
| ParallelDeviceTests |
python | django__django | django/http/request.py | {
"start": 1266,
"end": 17360
} | class ____:
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
# WARNING: The `WSGIRequest` subclass doesn't call `super`.
# Any variable assignment made here should also happen in
# `WSGIRequest.__init__()`.
self.GET = QueryDict(mutable=True)
self.POST = QueryDict(mutable=True)
self.COOKIES = {}
self.META = {}
self.FILES = MultiValueDict()
self.path = ""
self.path_info = ""
self.method = None
self.resolver_match = None
self.content_type = None
self.content_params = None
def __repr__(self):
if self.method is None or not self.get_full_path():
return "<%s>" % self.__class__.__name__
return "<%s: %s %r>" % (
self.__class__.__name__,
self.method,
self.get_full_path(),
)
@cached_property
def headers(self):
return HttpHeaders(self.META)
@cached_property
def accepted_types(self):
"""
Return a list of MediaType instances, in order of preference (quality).
"""
header_value = self.headers.get("Accept", "*/*")
return sorted(
(
media_type
for token in header_value.split(",")
if token.strip() and (media_type := MediaType(token)).quality != 0
),
key=operator.attrgetter("quality", "specificity"),
reverse=True,
)
@cached_property
def accepted_types_by_precedence(self):
"""
Return a list of MediaType instances, in order of precedence
(specificity).
"""
return sorted(
self.accepted_types,
key=operator.attrgetter("specificity", "quality"),
reverse=True,
)
def accepted_type(self, media_type):
"""
Return the MediaType instance which best matches the given media type.
"""
media_type = MediaType(media_type)
return next(
(
accepted_type
for accepted_type in self.accepted_types_by_precedence
if media_type.match(accepted_type)
),
None,
)
def get_preferred_type(self, media_types):
"""Select the preferred media type from the provided options."""
if not media_types or not self.accepted_types:
return None
desired_types = [
(accepted_type, media_type)
for media_type in media_types
if (accepted_type := self.accepted_type(media_type)) is not None
]
if not desired_types:
return None
# Of the desired media types, select the one which is preferred.
return min(desired_types, key=lambda t: self.accepted_types.index(t[0]))[1]
def accepts(self, media_type):
"""Does the client accept a response in the given media type?"""
return self.accepted_type(media_type) is not None
def _set_content_type_params(self, meta):
"""Set content_type, content_params, and encoding."""
self.content_type, self.content_params = parse_header_parameters(
meta.get("CONTENT_TYPE", "")
)
if "charset" in self.content_params:
try:
codecs.lookup(self.content_params["charset"])
except LookupError:
pass
else:
self.encoding = self.content_params["charset"]
def _get_raw_host(self):
"""
Return the HTTP host using the environment or request headers. Skip
allowed hosts protection, so may return an insecure host.
"""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and ("HTTP_X_FORWARDED_HOST" in self.META):
host = self.META["HTTP_X_FORWARDED_HOST"]
elif "HTTP_HOST" in self.META:
host = self.META["HTTP_HOST"]
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META["SERVER_NAME"]
server_port = self.get_port()
if server_port != ("443" if self.is_secure() else "80"):
host = "%s:%s" % (host, server_port)
return host
def get_host(self):
"""Return the HTTP host using the environment or request headers."""
host = self._get_raw_host()
# Allow variants of localhost if ALLOWED_HOSTS is empty and DEBUG=True.
allowed_hosts = settings.ALLOWED_HOSTS
if settings.DEBUG and not allowed_hosts:
allowed_hosts = [".localhost", "127.0.0.1", "[::1]"]
domain, port = split_domain_port(host)
if domain and validate_host(domain, allowed_hosts):
return host
else:
msg = "Invalid HTTP_HOST header: %r." % host
if domain:
msg += " You may need to add %r to ALLOWED_HOSTS." % domain
else:
msg += (
" The domain name provided is not valid according to RFC 1034/1035."
)
raise DisallowedHost(msg)
def get_port(self):
"""Return the port number for the request as a string."""
if settings.USE_X_FORWARDED_PORT and "HTTP_X_FORWARDED_PORT" in self.META:
port = self.META["HTTP_X_FORWARDED_PORT"]
else:
port = self.META["SERVER_PORT"]
return str(port)
def get_full_path(self, force_append_slash=False):
return self._get_full_path(self.path, force_append_slash)
def get_full_path_info(self, force_append_slash=False):
return self._get_full_path(self.path_info, force_append_slash)
def _get_full_path(self, path, force_append_slash):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return "%s%s%s" % (
escape_uri_path(path),
"/" if force_append_slash and not path.endswith("/") else "",
(
("?" + iri_to_uri(self.META.get("QUERY_STRING", "")))
if self.META.get("QUERY_STRING", "")
else ""
),
)
def get_signed_cookie(self, key, default=RAISE_ERROR, salt="", max_age=None):
"""
Attempt to return a signed cookie. If the signature fails or the
cookie has expired, raise an exception, unless the `default` argument
is provided, in which case return that value.
"""
try:
cookie_value = self.COOKIES[key]
except KeyError:
if default is not RAISE_ERROR:
return default
else:
raise
try:
value = signing.get_cookie_signer(salt=key + salt).unsign(
cookie_value, max_age=max_age
)
except signing.BadSignature:
if default is not RAISE_ERROR:
return default
else:
raise
return value
def build_absolute_uri(self, location=None):
"""
Build an absolute URI from the location and the variables available in
this request. If no ``location`` is specified, build the absolute URI
using request.get_full_path(). If the location is absolute, convert it
to an RFC 3987 compliant URI and return it. If location is relative or
is scheme-relative (i.e., ``//example.com/``), urljoin() it to a base
URL constructed from the request variables.
"""
if location is None:
# Make it an absolute url (but schemeless and domainless) for the
# edge case that the path starts with '//'.
location = "//%s" % self.get_full_path()
else:
# Coerce lazy locations.
location = str(location)
bits = urlsplit(location)
if not (bits.scheme and bits.netloc):
# Handle the simple, most common case. If the location is absolute
# and a scheme or host (netloc) isn't provided, skip an expensive
# urljoin() as long as no path segments are '.' or '..'.
if (
bits.path.startswith("/")
and not bits.scheme
and not bits.netloc
and "/./" not in bits.path
and "/../" not in bits.path
):
# If location starts with '//' but has no netloc, reuse the
# schema and netloc from the current request. Strip the double
# slashes and continue as if it wasn't specified.
location = self._current_scheme_host + location.removeprefix("//")
else:
# Join the constructed URL with the provided location, which
# allows the provided location to apply query strings to the
# base path.
location = urljoin(self._current_scheme_host + self.path, location)
return iri_to_uri(location)
@cached_property
def _current_scheme_host(self):
return "{}://{}".format(self.scheme, self.get_host())
def _get_scheme(self):
"""
Hook for subclasses like WSGIRequest to implement. Return 'http' by
default.
"""
return "http"
@property
def scheme(self):
if settings.SECURE_PROXY_SSL_HEADER:
try:
header, secure_value = settings.SECURE_PROXY_SSL_HEADER
except ValueError:
raise ImproperlyConfigured(
"The SECURE_PROXY_SSL_HEADER setting must be a tuple containing "
"two values."
)
header_value = self.META.get(header)
if header_value is not None:
header_value, *_ = header_value.split(",", 1)
return "https" if header_value.strip() == secure_value else "http"
return self._get_scheme()
def is_secure(self):
return self.scheme == "https"
@property
def encoding(self):
return self._encoding
@encoding.setter
def encoding(self, val):
"""
Set the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, remove and recreate it on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, "GET"):
del self.GET
if hasattr(self, "_post"):
del self._post
def _initialize_handlers(self):
self._upload_handlers = [
uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS
]
@property
def upload_handlers(self):
if not self._upload_handlers:
# If there are no upload handlers defined, initialize them from
# settings.
self._initialize_handlers()
return self._upload_handlers
@upload_handlers.setter
def upload_handlers(self, upload_handlers):
if hasattr(self, "_files"):
raise AttributeError(
"You cannot set the upload handlers after the upload has been "
"processed."
)
self._upload_handlers = upload_handlers
def parse_file_upload(self, META, post_data):
"""Return a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning=(
"You cannot alter upload handlers after the upload has been "
"processed."
),
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
@property
def body(self):
if not hasattr(self, "_body"):
if self._read_started:
raise RawPostDataException(
"You cannot access body after reading from request's data stream"
)
# Limit the maximum request data size that will be handled
# in-memory.
if (
settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None
and int(self.META.get("CONTENT_LENGTH") or 0)
> settings.DATA_UPLOAD_MAX_MEMORY_SIZE
):
raise RequestDataTooBig(
"Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE."
)
try:
self._body = self.read()
except OSError as e:
raise UnreadablePostError(*e.args) from e
finally:
self._stream.close()
self._stream = BytesIO(self._body)
return self._body
def _mark_post_parse_error(self):
self._post = QueryDict()
self._files = MultiValueDict()
def _load_post_and_files(self):
"""
Populate self._post and self._files if the content-type is a form type
"""
if self.method != "POST":
self._post, self._files = (
QueryDict(encoding=self._encoding),
MultiValueDict(),
)
return
if self._read_started and not hasattr(self, "_body"):
self._mark_post_parse_error()
return
if self.content_type == "multipart/form-data":
if hasattr(self, "_body"):
# Use already read data
data = BytesIO(self._body)
else:
data = self
try:
self._post, self._files = self.parse_file_upload(self.META, data)
except (MultiPartParserError, TooManyFilesSent):
# An error occurred while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
self._mark_post_parse_error()
raise
elif self.content_type == "application/x-www-form-urlencoded":
# According to RFC 1866, the "application/x-www-form-urlencoded"
# content type does not have a charset and should be always treated
# as UTF-8.
if self._encoding is not None and self._encoding.lower() != "utf-8":
raise BadRequest(
"HTTP requests with the 'application/x-www-form-urlencoded' "
"content type must be UTF-8 encoded."
)
self._post = QueryDict(self.body, encoding="utf-8")
self._files = MultiValueDict()
else:
self._post, self._files = (
QueryDict(encoding=self._encoding),
MultiValueDict(),
)
def close(self):
if hasattr(self, "_files"):
for f in chain.from_iterable(list_[1] for list_ in self._files.lists()):
f.close()
# File-like and iterator interface.
#
# Expects self._stream to be set to an appropriate source of bytes by
# a corresponding request subclass (e.g. WSGIRequest).
# Also when request data has already been read by request.POST or
# request.body, self._stream points to a BytesIO instance
# containing that data.
def read(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.read(*args, **kwargs)
except OSError as e:
raise UnreadablePostError(*e.args) from e
def readline(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.readline(*args, **kwargs)
except OSError as e:
raise UnreadablePostError(*e.args) from e
def __iter__(self):
return iter(self.readline, b"")
def readlines(self):
return list(self)
| HttpRequest |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 86360,
"end": 86804
} | class ____(Tail, Blockwise):
"""Take the last `n` rows of every partition
Typically used after `Partitions(..., [-1])` to take
the last `n` rows of an entire collection.
"""
_preserves_partitioning_information = True
def _divisions(self):
return self.frame.divisions
def _task(self, name: Key, index: int) -> Task:
return Task(name, M.tail, TaskRef((self.frame._name, index)), self.n)
| BlockwiseTail |
python | scipy__scipy | scipy/linalg/_matfuncs_inv_ssq.py | {
"start": 630,
"end": 681
} | class ____(np.linalg.LinAlgError):
pass
| LogmError |
python | gevent__gevent | src/greentest/3.10/test_subprocess.py | {
"start": 157687,
"end": 160085
} | class ____(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises(NONEXISTING_ERRORS):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
def test_broken_pipe_cleanup(self):
"""Broken pipe error should not prevent wait() (Issue 21619)"""
proc = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
bufsize=support.PIPE_MAX_SIZE*2)
proc = proc.__enter__()
# Prepare to send enough data to overflow any OS pipe buffering and
# guarantee a broken pipe error. Data is held in BufferedWriter
# buffer until closed.
proc.stdin.write(b'x' * support.PIPE_MAX_SIZE)
self.assertIsNone(proc.returncode)
# EPIPE expected under POSIX; EINVAL under Windows
self.assertRaises(OSError, proc.__exit__, None, None, None)
self.assertEqual(proc.returncode, 0)
self.assertTrue(proc.stdin.closed)
if __name__ == "__main__":
unittest.main()
| ContextManagerTests |
python | huggingface__transformers | src/transformers/models/omdet_turbo/modeling_omdet_turbo.py | {
"start": 38595,
"end": 38908
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.mlp = OmDetTurboMLPWithDropout(config)
self.res1 = OmDetTurboResidualLayer(config)
def forward(self, x):
mlp_out = self.mlp(x)
x = self.res1(x, mlp_out)
return x
| OmDetTurboTaskEncoder |
python | PrefectHQ__prefect | src/integrations/prefect-kubernetes/tests/test_observer.py | {
"start": 10299,
"end": 17488
} | class ____:
"""Tests for the logging configuration logic in start_observer()"""
@pytest.mark.usefixtures("mock_events_client", "mock_orchestration_client")
def test_json_formatter_configures_kopf_logger(
self, monkeypatch: pytest.MonkeyPatch
):
"""
Test that when Prefect uses JSON formatting, kopf logger gets its own
handler with KopfObjectJsonFormatter and propagation is disabled.
"""
# Stop any existing observer first
stop_observer()
# Set up Prefect to use JSON formatting
monkeypatch.setenv("PREFECT_LOGGING_HANDLERS_CONSOLE_FORMATTER", "json")
# Import and setup logging fresh to pick up env var
from prefect.logging.configuration import PROCESS_LOGGING_CONFIG, setup_logging
PROCESS_LOGGING_CONFIG.clear()
setup_logging(incremental=False)
# Clear any existing kopf logger configuration
kopf_logger = logging.getLogger("kopf")
kopf_logger.handlers.clear()
kopf_logger.propagate = True
# Start the observer which should configure kopf logging
try:
start_observer()
sleep(0.5) # Give it time to configure
# Verify kopf logger has its own handler
assert len(kopf_logger.handlers) > 0, "kopf logger should have a handler"
# Verify the handler has the correct formatter
handler = kopf_logger.handlers[0]
assert isinstance(handler.formatter, KopfObjectJsonFormatter), (
f"Expected KopfObjectJsonFormatter, got {type(handler.formatter)}"
)
# Verify propagation is disabled
assert kopf_logger.propagate is False, (
"kopf logger propagation should be disabled"
)
finally:
stop_observer()
monkeypatch.delenv("PREFECT_LOGGING_HANDLERS_CONSOLE_FORMATTER")
@pytest.mark.usefixtures("mock_events_client", "mock_orchestration_client")
def test_standard_formatter_uses_default_behavior(
self, monkeypatch: pytest.MonkeyPatch
):
"""
Test that when Prefect uses standard formatting (default),
kopf logger uses default propagation behavior.
"""
# Stop any existing observer first
stop_observer()
# Use default logging configuration (standard formatter)
from prefect.logging.configuration import PROCESS_LOGGING_CONFIG, setup_logging
PROCESS_LOGGING_CONFIG.clear()
setup_logging(incremental=False)
# Clear any existing kopf logger configuration
kopf_logger = logging.getLogger("kopf")
kopf_logger.handlers.clear()
kopf_logger.propagate = True
# Start the observer
try:
start_observer()
sleep(0.5)
# Verify kopf logger doesn't have a dedicated handler added by start_observer
# (it should propagate to root logger since we're using standard formatting)
assert len(kopf_logger.handlers) == 0, (
"kopf logger should not have handlers with standard formatting"
)
# Verify propagation is still enabled (default behavior)
assert kopf_logger.propagate is True, (
"kopf logger propagation should remain enabled with standard formatting"
)
finally:
stop_observer()
@pytest.mark.usefixtures("mock_events_client", "mock_orchestration_client")
def test_no_duplicate_logs_with_json_formatting(
self, monkeypatch: pytest.MonkeyPatch
):
"""
Test that kopf logs don't appear duplicated when JSON formatting is enabled.
"""
# Stop any existing observer first
stop_observer()
# Set up JSON formatting
monkeypatch.setenv("PREFECT_LOGGING_HANDLERS_CONSOLE_FORMATTER", "json")
from prefect.logging.configuration import PROCESS_LOGGING_CONFIG, setup_logging
PROCESS_LOGGING_CONFIG.clear()
setup_logging(incremental=False)
# Clear kopf logger
kopf_logger = logging.getLogger("kopf.test")
kopf_logger.handlers.clear()
kopf_logger.propagate = True
try:
start_observer()
sleep(0.5)
# Create a custom handler to capture logs
# (caplog won't work since propagation is disabled)
captured_logs: list[logging.LogRecord] = []
class CaptureHandler(logging.Handler):
def emit(self, record: logging.LogRecord):
captured_logs.append(record)
capture_handler = CaptureHandler()
kopf_logger.addHandler(capture_handler)
# Emit a test message
kopf_logger.warning("Test message for duplicate check")
# Count how many times the message appears
matching_records = [
r
for r in captured_logs
if "Test message for duplicate check" in r.message
]
assert len(matching_records) == 1, (
f"Expected 1 log message, got {len(matching_records)}"
)
finally:
stop_observer()
monkeypatch.delenv("PREFECT_LOGGING_HANDLERS_CONSOLE_FORMATTER")
@pytest.mark.usefixtures("mock_events_client", "mock_orchestration_client")
def test_kopf_logs_visible_with_json_formatting(
self, monkeypatch: pytest.MonkeyPatch
):
"""
Test that kopf logs are actually emitted and visible when JSON formatting is enabled.
"""
# Stop any existing observer first
stop_observer()
# Set up JSON formatting
monkeypatch.setenv("PREFECT_LOGGING_HANDLERS_CONSOLE_FORMATTER", "json")
from prefect.logging.configuration import PROCESS_LOGGING_CONFIG, setup_logging
PROCESS_LOGGING_CONFIG.clear()
setup_logging(incremental=False)
# Clear kopf logger
kopf_logger = logging.getLogger("kopf.test")
kopf_logger.handlers.clear()
kopf_logger.propagate = True
try:
start_observer()
sleep(0.5)
# Create a string buffer to capture output
log_capture = StringIO()
test_handler = logging.StreamHandler(log_capture)
test_handler.setFormatter(KopfObjectJsonFormatter())
kopf_logger.addHandler(test_handler)
# Emit a test log message
kopf_logger.warning("Test message for visibility check")
# Get the captured output
log_output = log_capture.getvalue()
# Verify the message was emitted
assert "Test message for visibility check" in log_output, (
"kopf log message should be visible in output"
)
# Verify it's JSON formatted
assert '"message"' in log_output or '"msg"' in log_output, (
"Log output should be JSON formatted"
)
finally:
stop_observer()
monkeypatch.delenv("PREFECT_LOGGING_HANDLERS_CONSOLE_FORMATTER")
| TestLoggingConfiguration |
python | optuna__optuna | optuna/samplers/nsgaii/_crossovers/_sbx.py | {
"start": 295,
"end": 6524
} | class ____(BaseCrossover):
"""Simulated Binary Crossover operation used by :class:`~optuna.samplers.NSGAIISampler`.
Generates a child from two parent individuals
according to the polynomial probability distribution.
In the paper, SBX has only one argument, ``eta``,
and generate two child individuals.
However, Optuna can only return one child individual in one crossover operation,
so it uses the ``uniform_crossover_prob`` and ``use_child_gene_prob`` arguments
to make two individuals into one.
- `Deb, K. and R. Agrawal.
“Simulated Binary Crossover for Continuous Search Space.”
Complex Syst. 9 (1995): n. pag.
<https://www.complex-systems.com/abstracts/v09_i02_a02/>`__
Args:
eta:
Distribution index. A small value of ``eta`` allows distant solutions
to be selected as children solutions. If not specified, takes default
value of ``2`` for single objective functions and ``20`` for multi objective.
uniform_crossover_prob:
``uniform_crossover_prob`` is the probability of uniform crossover
between two individuals selected as candidate child individuals.
This argument is whether or not two individuals are
crossover to make one child individual.
If the ``uniform_crossover_prob`` exceeds 0.5,
the result is equivalent to ``1-uniform_crossover_prob``,
because it returns one of the two individuals of the crossover result.
If not specified, takes default value of ``0.5``.
The range of values is ``[0.0, 1.0]``.
use_child_gene_prob:
``use_child_gene_prob`` is the probability of using the value of the generated
child variable rather than the value of the parent.
This probability is applied to each variable individually.
where ``1-use_chile_gene_prob`` is the probability of
using the parent's values as it is.
If not specified, takes default value of ``0.5``.
The range of values is ``(0.0, 1.0]``.
"""
n_parents = 2
def __init__(
self,
eta: float | None = None,
uniform_crossover_prob: float = 0.5,
use_child_gene_prob: float = 0.5,
) -> None:
if (eta is not None) and (eta < 0.0):
raise ValueError("The value of `eta` must be greater than or equal to 0.0.")
self._eta = eta
if uniform_crossover_prob < 0.0 or uniform_crossover_prob > 1.0:
raise ValueError(
"The value of `uniform_crossover_prob` must be in the range [0.0, 1.0]."
)
if use_child_gene_prob <= 0.0 or use_child_gene_prob > 1.0:
raise ValueError("The value of `use_child_gene_prob` must be in the range (0.0, 1.0].")
self._uniform_crossover_prob = uniform_crossover_prob
self._use_child_gene_prob = use_child_gene_prob
def crossover(
self,
parents_params: np.ndarray,
rng: np.random.RandomState,
study: Study,
search_space_bounds: np.ndarray,
) -> np.ndarray:
# https://www.researchgate.net/profile/M-M-Raghuwanshi/publication/267198495_Simulated_Binary_Crossover_with_Lognormal_Distribution/links/5576c78408ae7536375205d7/Simulated-Binary-Crossover-with-Lognormal-Distribution.pdf
# Section 2 Simulated Binary Crossover (SBX)
# To avoid generating solutions that violate the box constraints,
# alpha1, alpha2, xls and xus are introduced, unlike the reference.
xls = search_space_bounds[..., 0]
xus = search_space_bounds[..., 1]
xs_min = np.min(parents_params, axis=0)
xs_max = np.max(parents_params, axis=0)
if self._eta is None:
eta = 20.0 if study._is_multi_objective() else 2.0
else:
eta = self._eta
xs_diff = np.clip(xs_max - xs_min, 1e-10, None)
beta1 = 1 + 2 * (xs_min - xls) / xs_diff
beta2 = 1 + 2 * (xus - xs_max) / xs_diff
alpha1 = 2 - np.power(beta1, -(eta + 1))
alpha2 = 2 - np.power(beta2, -(eta + 1))
us = rng.rand(len(search_space_bounds))
mask1 = us > 1 / alpha1 # Equation (3).
betaq1 = np.power(us * alpha1, 1 / (eta + 1)) # Equation (3).
betaq1[mask1] = np.power((1 / (2 - us * alpha1)), 1 / (eta + 1))[mask1] # Equation (3).
mask2 = us > 1 / alpha2 # Equation (3).
betaq2 = np.power(us * alpha2, 1 / (eta + 1)) # Equation (3)
betaq2[mask2] = np.power((1 / (2 - us * alpha2)), 1 / (eta + 1))[mask2] # Equation (3).
c1 = 0.5 * ((xs_min + xs_max) - betaq1 * xs_diff) # Equation (4).
c2 = 0.5 * ((xs_min + xs_max) + betaq2 * xs_diff) # Equation (5).
# SBX applies crossover with use_child_gene_prob and uniform_crossover_prob.
# the gene of the parent individual is the gene of the child individual.
# The original SBX creates two child individuals,
# but optuna's implementation creates only one child individual.
# Therefore, when there is no crossover,
# the gene is selected with equal probability from the parent individuals x1 and x2.
child1_params_list = []
child2_params_list = []
for c1_i, c2_i, x1_i, x2_i in zip(c1, c2, parents_params[0], parents_params[1]):
if rng.rand() < self._use_child_gene_prob:
if rng.rand() >= self._uniform_crossover_prob:
child1_params_list.append(c1_i)
child2_params_list.append(c2_i)
else:
child1_params_list.append(c2_i)
child2_params_list.append(c1_i)
else:
if rng.rand() >= self._uniform_crossover_prob:
child1_params_list.append(x1_i)
child2_params_list.append(x2_i)
else:
child1_params_list.append(x2_i)
child2_params_list.append(x1_i)
child_params_list = child1_params_list if rng.rand() < 0.5 else child2_params_list
child_params = np.array(child_params_list)
return child_params
| SBXCrossover |
python | jupyterlab__jupyterlab | jupyterlab/browser_check.py | {
"start": 1202,
"end": 6108
} | class ____(logging.StreamHandler):
"""A handler that exits with 1 on a logged error."""
def __init__(self):
super().__init__(stream=sys.stderr)
self.setLevel(logging.ERROR)
self.errored = False
def filter(self, record):
# Handle known StreamClosedError from Tornado
# These occur when we forcibly close Websockets or
# browser connections during the test.
# https://github.com/tornadoweb/tornado/issues/2834
if (
hasattr(record, "exc_info")
and record.exc_info is not None
and isinstance(record.exc_info[1], (StreamClosedError, WebSocketClosedError))
):
return False
return super().filter(record)
def emit(self, record):
self.errored = True
super().emit(record)
def run_test(app, func):
"""Synchronous entry point to run a test function.
func is a function that accepts an app url as a parameter and returns a result.
func can be synchronous or asynchronous. If it is synchronous, it will be run
in a thread, so asynchronous is preferred.
"""
IOLoop.current().spawn_callback(run_test_async, app, func)
async def run_test_async(app, func):
"""Run a test against the application.
func is a function that accepts an app url as a parameter and returns a result.
func can be synchronous or asynchronous. If it is synchronous, it will be run
in a thread, so asynchronous is preferred.
"""
handler = LogErrorHandler()
app.log.addHandler(handler)
env_patch = TestEnv()
env_patch.start()
app.log.info("Running async test")
# The entry URL for browser tests is different in notebook >= 6.0,
# since that uses a local HTML file to point the user at the app.
if hasattr(app, "browser_open_file"):
url = urljoin("file:", pathname2url(app.browser_open_file))
else:
url = app.display_url
# Allow a synchronous function to be passed in.
if inspect.iscoroutinefunction(func):
test = func(url)
else:
app.log.info("Using thread pool executor to run test")
loop = asyncio.get_event_loop()
executor = ThreadPoolExecutor()
task = loop.run_in_executor(executor, func, url)
test = asyncio.wait([task])
try:
await test
except Exception as e:
app.log.critical("Caught exception during the test:")
app.log.error(str(e))
app.log.info("Test Complete")
result = 0
if handler.errored:
result = 1
app.log.critical("Exiting with 1 due to errors")
else:
app.log.info("Exiting normally")
app.log.info("Stopping server...")
try:
app.http_server.stop()
app.io_loop.stop()
env_patch.stop()
except Exception as e:
app.log.error(str(e))
result = 1
finally:
time.sleep(2)
os._exit(result)
async def run_async_process(cmd, **kwargs):
"""Run an asynchronous command"""
proc = await asyncio.create_subprocess_exec(*cmd, **kwargs)
stdout, stderr = await proc.communicate()
if proc.returncode != 0:
raise RuntimeError(str(cmd) + " exited with " + str(proc.returncode))
return stdout, stderr
async def run_browser(url):
"""Run the browser test and return an exit code."""
browser = os.environ.get("JLAB_BROWSER_TYPE", "chromium")
if browser not in {"chromium", "firefox", "webkit"}:
browser = "chromium"
target = osp.join(get_app_dir(), "browser_test")
if not osp.exists(osp.join(target, "node_modules")):
if not osp.exists(target):
os.makedirs(osp.join(target))
await run_async_process(["npm", "init", "-y"], cwd=target)
await run_async_process(["npm", "install", "playwright@^1.9.2"], cwd=target)
await run_async_process(["npx", "playwright", "install", browser], cwd=target)
shutil.copy(osp.join(here, "browser-test.js"), osp.join(target, "browser-test.js"))
await run_async_process(["node", "browser-test.js", url], cwd=target)
def run_browser_sync(url):
"""Run the browser test and return an exit code."""
browser = os.environ.get("JLAB_BROWSER_TYPE", "chromium")
if browser not in {"chromium", "firefox", "webkit"}:
browser = "chromium"
target = osp.join(get_app_dir(), "browser_test")
if not osp.exists(osp.join(target, "node_modules")):
os.makedirs(target)
subprocess.call(["npm", "init", "-y"], cwd=target) # noqa S603 S607
subprocess.call(["npm", "install", "playwright@^1.9.2"], cwd=target) # noqa S603 S607
subprocess.call(["npx", "playwright", "install", browser], cwd=target) # noqa S603 S607
shutil.copy(osp.join(here, "browser-test.js"), osp.join(target, "browser-test.js"))
return subprocess.check_call(["node", "browser-test.js", url], cwd=target) # noqa S603 S607
| LogErrorHandler |
python | mlflow__mlflow | tests/tensorflow/test_tensorflow2_core_model_export.py | {
"start": 206,
"end": 445
} | class ____(tf.Module):
def __init__(self, w, b):
super().__init__()
self.w = w
self.b = b
@tf.function
def __call__(self, x):
return tf.reshape(tf.add(tf.matmul(x, self.w), self.b), [-1])
| ToyModel |
python | pytorch__pytorch | torch/_guards.py | {
"start": 7647,
"end": 7792
} | class ____(NamedTuple):
expr: sympy.logic.boolalg.Boolean
sloc: SLoc
size_oblivious: bool
@dataclasses.dataclass(slots=True)
| ShapeGuard |
python | doocs__leetcode | solution/1800-1899/1828.Queries on Number of Points Inside a Circle/Solution.py | {
"start": 0,
"end": 357
} | class ____:
def countPoints(
self, points: List[List[int]], queries: List[List[int]]
) -> List[int]:
ans = []
for x, y, r in queries:
cnt = 0
for i, j in points:
dx, dy = i - x, j - y
cnt += dx * dx + dy * dy <= r * r
ans.append(cnt)
return ans
| Solution |
python | pyparsing__pyparsing | pyparsing/unicode.py | {
"start": 3795,
"end": 10458
} | class ____(unicode_set):
"""
A namespace class for defining common language unicode_sets.
"""
# fmt: off
# define ranges in language character sets
_ranges: UnicodeRangeList = [
(0x0020, sys.maxunicode),
]
class BasicMultilingualPlane(unicode_set):
"""Unicode set for the Basic Multilingual Plane"""
_ranges: UnicodeRangeList = [
(0x0020, 0xFFFF),
]
class Latin1(unicode_set):
"""Unicode set for Latin-1 Unicode Character Range"""
_ranges: UnicodeRangeList = [
(0x0020, 0x007E),
(0x00A0, 0x00FF),
]
class LatinA(unicode_set):
"""Unicode set for Latin-A Unicode Character Range"""
_ranges: UnicodeRangeList = [
(0x0100, 0x017F),
]
class LatinB(unicode_set):
"""Unicode set for Latin-B Unicode Character Range"""
_ranges: UnicodeRangeList = [
(0x0180, 0x024F),
]
class Greek(unicode_set):
"""Unicode set for Greek Unicode Character Ranges"""
_ranges: UnicodeRangeList = [
(0x0342, 0x0345),
(0x0370, 0x0377),
(0x037A, 0x037F),
(0x0384, 0x038A),
(0x038C,),
(0x038E, 0x03A1),
(0x03A3, 0x03E1),
(0x03F0, 0x03FF),
(0x1D26, 0x1D2A),
(0x1D5E,),
(0x1D60,),
(0x1D66, 0x1D6A),
(0x1F00, 0x1F15),
(0x1F18, 0x1F1D),
(0x1F20, 0x1F45),
(0x1F48, 0x1F4D),
(0x1F50, 0x1F57),
(0x1F59,),
(0x1F5B,),
(0x1F5D,),
(0x1F5F, 0x1F7D),
(0x1F80, 0x1FB4),
(0x1FB6, 0x1FC4),
(0x1FC6, 0x1FD3),
(0x1FD6, 0x1FDB),
(0x1FDD, 0x1FEF),
(0x1FF2, 0x1FF4),
(0x1FF6, 0x1FFE),
(0x2129,),
(0x2719, 0x271A),
(0xAB65,),
(0x10140, 0x1018D),
(0x101A0,),
(0x1D200, 0x1D245),
(0x1F7A1, 0x1F7A7),
]
class Cyrillic(unicode_set):
"""Unicode set for Cyrillic Unicode Character Range"""
_ranges: UnicodeRangeList = [
(0x0400, 0x052F),
(0x1C80, 0x1C88),
(0x1D2B,),
(0x1D78,),
(0x2DE0, 0x2DFF),
(0xA640, 0xA672),
(0xA674, 0xA69F),
(0xFE2E, 0xFE2F),
]
class Chinese(unicode_set):
"""Unicode set for Chinese Unicode Character Range"""
_ranges: UnicodeRangeList = [
(0x2E80, 0x2E99),
(0x2E9B, 0x2EF3),
(0x31C0, 0x31E3),
(0x3400, 0x4DB5),
(0x4E00, 0x9FEF),
(0xA700, 0xA707),
(0xF900, 0xFA6D),
(0xFA70, 0xFAD9),
(0x16FE2, 0x16FE3),
(0x1F210, 0x1F212),
(0x1F214, 0x1F23B),
(0x1F240, 0x1F248),
(0x20000, 0x2A6D6),
(0x2A700, 0x2B734),
(0x2B740, 0x2B81D),
(0x2B820, 0x2CEA1),
(0x2CEB0, 0x2EBE0),
(0x2F800, 0x2FA1D),
]
class Japanese(unicode_set):
"""Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges"""
class Kanji(unicode_set):
"Unicode set for Kanji Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x4E00, 0x9FBF),
(0x3000, 0x303F),
]
class Hiragana(unicode_set):
"""Unicode set for Hiragana Unicode Character Range"""
_ranges: UnicodeRangeList = [
(0x3041, 0x3096),
(0x3099, 0x30A0),
(0x30FC,),
(0xFF70,),
(0x1B001,),
(0x1B150, 0x1B152),
(0x1F200,),
]
class Katakana(unicode_set):
"""Unicode set for Katakana Unicode Character Range"""
_ranges: UnicodeRangeList = [
(0x3099, 0x309C),
(0x30A0, 0x30FF),
(0x31F0, 0x31FF),
(0x32D0, 0x32FE),
(0xFF65, 0xFF9F),
(0x1B000,),
(0x1B164, 0x1B167),
(0x1F201, 0x1F202),
(0x1F213,),
]
漢字 = Kanji
カタカナ = Katakana
ひらがな = Hiragana
_ranges = (
Kanji._ranges
+ Hiragana._ranges
+ Katakana._ranges
)
class Hangul(unicode_set):
"""Unicode set for Hangul (Korean) Unicode Character Range"""
_ranges: UnicodeRangeList = [
(0x1100, 0x11FF),
(0x302E, 0x302F),
(0x3131, 0x318E),
(0x3200, 0x321C),
(0x3260, 0x327B),
(0x327E,),
(0xA960, 0xA97C),
(0xAC00, 0xD7A3),
(0xD7B0, 0xD7C6),
(0xD7CB, 0xD7FB),
(0xFFA0, 0xFFBE),
(0xFFC2, 0xFFC7),
(0xFFCA, 0xFFCF),
(0xFFD2, 0xFFD7),
(0xFFDA, 0xFFDC),
]
Korean = Hangul
class CJK(Chinese, Japanese, Hangul):
"""Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range"""
class Thai(unicode_set):
"""Unicode set for Thai Unicode Character Range"""
_ranges: UnicodeRangeList = [
(0x0E01, 0x0E3A),
(0x0E3F, 0x0E5B)
]
class Arabic(unicode_set):
"""Unicode set for Arabic Unicode Character Range"""
_ranges: UnicodeRangeList = [
(0x0600, 0x061B),
(0x061E, 0x06FF),
(0x0700, 0x077F),
]
class Hebrew(unicode_set):
"""Unicode set for Hebrew Unicode Character Range"""
_ranges: UnicodeRangeList = [
(0x0591, 0x05C7),
(0x05D0, 0x05EA),
(0x05EF, 0x05F4),
(0xFB1D, 0xFB36),
(0xFB38, 0xFB3C),
(0xFB3E,),
(0xFB40, 0xFB41),
(0xFB43, 0xFB44),
(0xFB46, 0xFB4F),
]
class Devanagari(unicode_set):
"""Unicode set for Devanagari Unicode Character Range"""
_ranges: UnicodeRangeList = [
(0x0900, 0x097F),
(0xA8E0, 0xA8FF)
]
BMP = BasicMultilingualPlane
# add language identifiers using language Unicode
العربية = Arabic
中文 = Chinese
кириллица = Cyrillic
Ελληνικά = Greek
עִברִית = Hebrew
日本語 = Japanese
한국어 = Korean
ไทย = Thai
देवनागरी = Devanagari
# fmt: on
| pyparsing_unicode |
python | plotly__plotly.py | plotly/graph_objs/_deprecations.py | {
"start": 16885,
"end": 17597
} | class ____(dict):
"""
plotly.graph_objs.ZAxis is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.scene.ZAxis
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.ZAxis is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.scene.ZAxis
"""
warnings.warn(
"""plotly.graph_objs.ZAxis is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.layout.scene.ZAxis
""",
DeprecationWarning,
)
super().__init__(*args, **kwargs)
| ZAxis |
python | has2k1__plotnine | plotnine/_mpl/layout_manager/_layout_tree.py | {
"start": 723,
"end": 17553
} | class ____:
"""
A Tree representation of the composition
The purpose of this class (and its subclasses) is to align and
and resize plots in a composition. For example,
This composition:
(p1 | p2) | (p3 / p4)
where p1, p2, p3 & p4 are ggplot objects would look like this;
-----------------------------
| | | |
| | | |
| | | |
| | | |
| | |---------|
| | | |
| | | |
| | | |
| | | |
-----------------------------
and the tree would have this structure;
LayoutTree (.nrow=1, .ncol=3)
|
----------------------------
| | |
LayoutSpaces LayoutSpaces LayoutTree (.nrow=2, .ncol=1)
|
-------------
| |
LayoutSpaces LayoutSpaces
This composition:
(p1 + p2 + p4 + p5 + p6) + plot_layout(ncol=3)
would look like this:
-----------------------------
| | | |
| | | |
| p1 | p2 | p3 |
| | | |
|---------|---------|---------|
| | | |
| p4 | p5 | |
| | | |
| | | |
-----------------------------
and have this structure
LayoutTree (.nrow=3, .ncol=2)
|
-------------------------------------------------------
| | | | |
LayoutSpaces LayoutSpaces LayoutSpaces LayoutSpaces LayoutSpaces
Each composition is a tree or subtree
"""
cmp: Compose
"""
Composition that this tree represents
"""
nodes: list[LayoutSpaces | LayoutTree]
"""
The spaces or tree of spaces in the composition that the tree
represents.
"""
gridspec: p9GridSpec = field(init=False, repr=False)
"""
Gridspec of the composition
Originally this gridspec occupies all the space available to it so the
subplots are of equal sizes. As each subplot contains full ggplot,
differences in texts and legend sizes may make the panels (panel area)
have unequal sizes. We can resize the panels, by changing the height
and width ratios of this (composition) gridspec.
The information about the size (width & height) of the panels is in the
LayoutSpaces.
"""
def __post_init__(self):
self.gridspec = self.cmp.gridspec
self.grid = Grid["Node"](
self.nrow,
self.ncol,
self.nodes,
order="row_major" if self.cmp.layout.byrow else "col_major",
)
@property
def ncol(self) -> int:
"""
Number of columns
"""
return cast("int", self.cmp.layout.ncol)
@property
def nrow(self) -> int:
"""
Number of rows
"""
return cast("int", self.cmp.layout.nrow)
@staticmethod
def create(
cmp: Compose,
lookup_spaces: dict[ggplot, LayoutSpaces],
) -> LayoutTree:
"""
Create a LayoutTree for this composition
Parameters
----------
cmp :
Composition
lookup_spaces :
A table to lookup the LayoutSpaces for each plot.
Notes
-----
LayoutTree works by modifying the `.gridspec` of the compositions,
and the `LayoutSpaces` of the plots.
"""
from plotnine import ggplot
# Create subtree
nodes: list[LayoutSpaces | LayoutTree] = []
for item in cmp:
if isinstance(item, ggplot):
nodes.append(lookup_spaces[item])
else:
nodes.append(LayoutTree.create(item, lookup_spaces))
return LayoutTree(cmp, nodes)
@cached_property
def sub_compositions(self) -> list[LayoutTree]:
"""
LayoutTrees of the direct sub compositions of this one
"""
return [item for item in self.nodes if isinstance(item, LayoutTree)]
def harmonise(self):
"""
Align and resize plots in composition to look good
"""
self.align_axis_titles()
self.align()
self.resize()
def align(self):
"""
Align all the edges in this composition & contained compositions
This function mutates the layout spaces, specifically the
margin_alignments along the sides of the plot.
"""
self.align_tags()
self.align_panels()
self.align_sub_compositions()
def resize(self):
"""
Resize panels and the entire plots
This function mutates the composition gridspecs; specifically the
width_ratios and height_ratios.
"""
self.resize_widths()
self.resize_heights()
self.resize_sub_compositions()
def align_sub_compositions(self):
"""
Align the compositions contained in this one
"""
# Recurse into the contained compositions
for tree in self.sub_compositions:
tree.align()
def resize_sub_compositions(self):
"""
Resize panels in the compositions contained in this one
"""
for tree in self.sub_compositions:
tree.resize()
@cached_property
def bottom_most_spaces(self) -> list[bottom_spaces]:
"""
Bottom spaces of items in the last row
"""
return [s for s in self.bottom_spaces_in_row(self.nrow - 1)]
@cached_property
def top_most_spaces(self) -> list[top_spaces]:
"""
Top spaces of items in the top row
"""
return [s for s in self.top_spaces_in_row(0)]
@cached_property
def left_most_spaces(self) -> list[left_spaces]:
"""
Left spaces of items in the last column
"""
return [s for s in self.left_spaces_in_col(0)]
@cached_property
def right_most_spaces(self) -> list[right_spaces]:
"""
Right spaces of items the last column
"""
return [s for s in self.right_spaces_in_col(self.ncol - 1)]
@property
def panel_width(self) -> float:
"""
A width of all panels in this composition
"""
return sum(self.panel_widths)
@property
def panel_height(self) -> float:
"""
A height of all panels in this composition
"""
return sum(self.panel_heights)
@property
def plot_width(self) -> float:
"""
A width of all plots in this tree/composition
"""
return self.gridspec.width
@property
def plot_height(self) -> float:
"""
A height of all plots in this tree/composition
"""
return self.gridspec.height
@property
def panel_widths(self) -> Sequence[float]:
"""
Widths [figure space] of the panels along horizontal dimension
"""
# This method is used after aligning the panels. Therefore, the
# wides panel_width (i.e. max()) is the good representative width
# of the column.
w = self.plot_width / self.ncol
return [
max(node.panel_width for node in col if node) if any(col) else w
for col in self.grid.iter_cols()
]
@property
def panel_heights(self) -> Sequence[float]:
"""
Heights [figure space] of the panels along vertical dimension
"""
h = self.plot_height / self.nrow
return [
max([node.panel_height for node in row if node]) if any(row) else h
for row in self.grid.iter_rows()
]
@property
def plot_widths(self) -> Sequence[float]:
"""
Widths [figure space] of the plots along horizontal dimension
For each column, the representative width is that of the widest plot.
"""
w = self.gridspec.width / self.ncol
return [
max([node.plot_width if node else w for node in col])
for col in self.grid.iter_cols()
]
@property
def plot_heights(self) -> Sequence[float]:
"""
Heights [figure space] of the plots along vertical dimension
For each row, the representative height is that of the tallest plot.
"""
h = self.gridspec.height / self.nrow
return [
max([node.plot_height if node else h for node in row])
for row in self.grid.iter_rows()
]
@property
def panel_width_ratios(self) -> Sequence[float]:
"""
The relative widths of the panels in the composition
These are normalised to have a mean = 1.
"""
return cast("Sequence[float]", self.cmp._layout.widths)
@property
def panel_height_ratios(self) -> Sequence[float]:
"""
The relative heights of the panels in the composition
These are normalised to have a mean = 1.
"""
return cast("Sequence[float]", self.cmp._layout.heights)
def bottom_spaces_in_row(self, r: int) -> list[bottom_spaces]:
spaces: list[bottom_spaces] = []
for node in self.grid[r, :]:
if isinstance(node, LayoutSpaces):
spaces.append(node.b)
elif isinstance(node, LayoutTree):
spaces.extend(node.bottom_most_spaces)
return spaces
def top_spaces_in_row(self, r: int) -> list[top_spaces]:
spaces: list[top_spaces] = []
for node in self.grid[r, :]:
if isinstance(node, LayoutSpaces):
spaces.append(node.t)
elif isinstance(node, LayoutTree):
spaces.extend(node.top_most_spaces)
return spaces
def left_spaces_in_col(self, c: int) -> list[left_spaces]:
spaces: list[left_spaces] = []
for node in self.grid[:, c]:
if isinstance(node, LayoutSpaces):
spaces.append(node.l)
elif isinstance(node, LayoutTree):
spaces.extend(node.left_most_spaces)
return spaces
def right_spaces_in_col(self, c: int) -> list[right_spaces]:
spaces: list[right_spaces] = []
for node in self.grid[:, c]:
if isinstance(node, LayoutSpaces):
spaces.append(node.r)
elif isinstance(node, LayoutTree):
spaces.extend(node.right_most_spaces)
return spaces
def iter_left_spaces(self) -> Iterator[list[left_spaces]]:
"""
Left spaces for each non-empty column
Will not return an empty list.
"""
for c in range(self.ncol):
spaces = self.left_spaces_in_col(c)
if spaces:
yield spaces
def iter_right_spaces(self) -> Iterator[list[right_spaces]]:
"""
Right spaces for each non-empty column
Will not return an empty list.
"""
for c in range(self.ncol):
spaces = self.right_spaces_in_col(c)
if spaces:
yield spaces
def iter_bottom_spaces(self) -> Iterator[list[bottom_spaces]]:
"""
Bottom spaces for each non-empty row
"""
for r in range(self.nrow):
spaces = self.bottom_spaces_in_row(r)
if spaces:
yield spaces
def iter_top_spaces(self) -> Iterator[list[top_spaces]]:
"""
Top spaces for each non-empty row
"""
for r in range(self.nrow):
spaces = self.top_spaces_in_row(r)
if spaces:
yield spaces
def align_panels(self):
for spaces in self.iter_bottom_spaces():
bottoms = [space.panel_bottom for space in spaces]
high = max(bottoms)
diffs = [high - b for b in bottoms]
for space, diff in zip(spaces, diffs):
space.margin_alignment += diff
for spaces in self.iter_top_spaces():
tops = [space.panel_top for space in spaces]
low = min(tops)
diffs = [b - low for b in tops]
for space, diff in zip(spaces, diffs):
space.margin_alignment += diff
for spaces in self.iter_left_spaces():
lefts = [space.panel_left for space in spaces]
high = max(lefts)
diffs = [high - l for l in lefts]
for space, diff in zip(spaces, diffs):
space.margin_alignment += diff
for spaces in self.iter_right_spaces():
rights = [space.panel_right for space in spaces]
low = min(rights)
diffs = [r - low for r in rights]
for space, diff in zip(spaces, diffs):
space.margin_alignment += diff
def align_tags(self):
for spaces in self.iter_bottom_spaces():
heights = [
space.tag_height + space.tag_alignment for space in spaces
]
high = max(heights)
diffs = [high - h for h in heights]
for space, diff in zip(spaces, diffs):
space.tag_alignment += diff
for spaces in self.iter_top_spaces():
heights = [
space.tag_height + space.tag_alignment for space in spaces
]
high = max(heights)
diffs = [high - h for h in heights]
for space, diff in zip(spaces, diffs):
space.tag_alignment += diff
for spaces in self.iter_left_spaces():
widths = [
space.tag_width + space.tag_alignment for space in spaces
]
high = max(widths)
diffs = [high - w for w in widths]
for space, diff in zip(spaces, diffs):
space.tag_alignment += diff
for spaces in self.iter_right_spaces():
widths = [
space.tag_width + space.tag_alignment for space in spaces
]
high = max(widths)
diffs = [high - w for w in widths]
for space, diff in zip(spaces, diffs):
space.tag_alignment += diff
def align_axis_titles(self):
"""
Align the axis titles along the composing dimension
Since the alignment value used to for this purpose is one of
the fields in the _side_space, it affects the space created
for the panel.
We could align the titles within self.align but we would have
to store the value outside the _side_space and pick it up when
setting the position of the texts!
"""
for spaces in self.iter_bottom_spaces():
clearances = [space.axis_title_clearance for space in spaces]
high = max(clearances)
diffs = [high - b for b in clearances]
for space, diff in zip(spaces, diffs):
space.axis_title_alignment += diff
for spaces in self.iter_left_spaces():
clearances = [space.axis_title_clearance for space in spaces]
high = max(clearances)
diffs = [high - l for l in clearances]
for space, diff in zip(spaces, diffs):
space.axis_title_alignment += diff
for tree in self.sub_compositions:
tree.align_axis_titles()
def resize_widths(self):
# The scaling calcuation to get the new panel width is
# straight-forward because the ratios have a mean of 1.
# So the multiplication preserves the total panel width.
new_panel_widths = np.mean(self.panel_widths) * np.array(
self.panel_width_ratios
)
non_panel_space = np.array(self.plot_widths) - self.panel_widths
new_plot_widths = new_panel_widths + non_panel_space
width_ratios = new_plot_widths / new_plot_widths.max()
self.gridspec.set_width_ratios(width_ratios)
def resize_heights(self):
new_panel_heights = np.mean(self.panel_heights) * np.array(
self.panel_height_ratios
)
non_panel_space = np.array(self.plot_heights) - self.panel_heights
new_plot_heights = new_panel_heights + non_panel_space
height_ratios = new_plot_heights / new_plot_heights.max()
self.gridspec.set_height_ratios(height_ratios)
| LayoutTree |
python | sympy__sympy | sympy/tensor/array/expressions/array_expressions.py | {
"start": 62284,
"end": 63314
} | class ____:
"""
The ``_ArgE`` object contains references to the array expression
(``.element``) and a list containing the information about index
contractions (``.indices``).
Index contractions are numbered and contracted indices show the number of
the contraction. Uncontracted indices have ``None`` value.
For example:
``_ArgE(M, [None, 3])``
This object means that expression ``M`` is part of an array contraction
and has two indices, the first is not contracted (value ``None``),
the second index is contracted to the 4th (i.e. number ``3``) group of the
array contraction object.
"""
indices: list[int | None]
def __init__(self, element, indices: list[int | None] | None = None):
self.element = element
if indices is None:
self.indices = [None for i in range(get_rank(element))]
else:
self.indices = indices
def __str__(self):
return f"_ArgE({self.element}, {self.indices})"
__repr__ = __str__
| _ArgE |
python | huggingface__transformers | tests/models/oneformer/test_image_processing_oneformer.py | {
"start": 6369,
"end": 19856
} | class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
fast_image_processing_class = OneFormerImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = OneFormerImageProcessorTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_proc_properties(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processor, "image_mean"))
self.assertTrue(hasattr(image_processor, "image_std"))
self.assertTrue(hasattr(image_processor, "do_normalize"))
self.assertTrue(hasattr(image_processor, "do_resize"))
self.assertTrue(hasattr(image_processor, "size"))
self.assertTrue(hasattr(image_processor, "ignore_index"))
self.assertTrue(hasattr(image_processor, "class_info_file"))
self.assertTrue(hasattr(image_processor, "num_text"))
self.assertTrue(hasattr(image_processor, "repo_path"))
self.assertTrue(hasattr(image_processor, "metadata"))
self.assertTrue(hasattr(image_processor, "do_reduce_labels"))
def comm_get_image_processor_inputs(
self, with_segmentation_maps=False, is_instance_map=False, segmentation_type="np", image_processing_class=None
):
image_processor = image_processing_class(**self.image_processor_dict)
# prepare image and target
num_labels = self.image_processor_tester.num_labels
annotations = None
instance_id_to_semantic_id = None
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
if with_segmentation_maps:
high = num_labels
if is_instance_map:
labels_expanded = list(range(num_labels)) * 2
instance_id_to_semantic_id = dict(enumerate(labels_expanded))
annotations = [
np.random.randint(0, high * 2, (img.size[1], img.size[0])).astype(np.uint8) for img in image_inputs
]
if segmentation_type == "pil":
annotations = [Image.fromarray(annotation) for annotation in annotations]
inputs = image_processor(
image_inputs,
["semantic"] * len(image_inputs),
annotations,
return_tensors="pt",
instance_id_to_semantic_id=instance_id_to_semantic_id,
)
return inputs
@unittest.skip
def test_init_without_params(self):
pass
def test_call_with_segmentation_maps(self):
def common(is_instance_map=False, segmentation_type=None):
for image_processing_class in self.image_processor_list:
inputs = self.comm_get_image_processor_inputs(
with_segmentation_maps=True,
is_instance_map=is_instance_map,
segmentation_type=segmentation_type,
image_processing_class=image_processing_class,
)
mask_labels = inputs["mask_labels"]
class_labels = inputs["class_labels"]
pixel_values = inputs["pixel_values"]
text_inputs = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(mask_labels, class_labels, text_inputs):
self.assertEqual(mask_label.shape[0], class_label.shape[0])
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:], pixel_values.shape[2:])
self.assertEqual(len(text_input), self.image_processor_tester.num_text)
common()
common(is_instance_map=True)
common(is_instance_map=False, segmentation_type="pil")
common(is_instance_map=True, segmentation_type="pil")
def test_binary_mask_to_rle(self):
fake_binary_mask = np.zeros((20, 50))
fake_binary_mask[0, 20:] = 1
fake_binary_mask[1, :15] = 1
fake_binary_mask[5, :10] = 1
rle = binary_mask_to_rle(fake_binary_mask)
self.assertEqual(len(rle), 4)
self.assertEqual(rle[0], 21)
self.assertEqual(rle[1], 45)
def test_post_process_semantic_segmentation(self):
for image_processing_class in self.image_processor_list:
feature_extractor = image_processing_class(
num_labels=self.image_processor_tester.num_classes,
max_seq_length=77,
task_seq_length=77,
class_info_file="ade20k_panoptic.json",
num_text=self.image_processor_tester.num_text,
repo_path="shi-labs/oneformer_demo",
)
outputs = self.image_processor_tester.get_fake_oneformer_outputs()
segmentation = feature_extractor.post_process_semantic_segmentation(outputs)
self.assertEqual(len(segmentation), self.image_processor_tester.batch_size)
self.assertEqual(
segmentation[0].shape,
(
self.image_processor_tester.height,
self.image_processor_tester.width,
),
)
target_sizes = [(1, 4) for i in range(self.image_processor_tester.batch_size)]
segmentation = feature_extractor.post_process_semantic_segmentation(outputs, target_sizes=target_sizes)
self.assertEqual(segmentation[0].shape, target_sizes[0])
def test_post_process_instance_segmentation(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(
num_labels=self.image_processor_tester.num_classes,
max_seq_length=77,
task_seq_length=77,
class_info_file="ade20k_panoptic.json",
num_text=self.image_processor_tester.num_text,
repo_path="shi-labs/oneformer_demo",
)
outputs = self.image_processor_tester.get_fake_oneformer_outputs()
segmentation = image_processor.post_process_instance_segmentation(outputs, threshold=0)
self.assertTrue(len(segmentation) == self.image_processor_tester.batch_size)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]), list)
self.assertEqual(
el["segmentation"].shape, (self.image_processor_tester.height, self.image_processor_tester.width)
)
segmentation_with_opts = image_processor.post_process_instance_segmentation(
outputs,
threshold=0,
target_sizes=[(1, 4) for _ in range(self.image_processor_tester.batch_size)],
task_type="panoptic",
)
self.assertTrue(len(segmentation_with_opts) == self.image_processor_tester.batch_size)
for el in segmentation_with_opts:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]), list)
self.assertEqual(el["segmentation"].shape, (1, 4))
def test_post_process_panoptic_segmentation(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(
num_labels=self.image_processor_tester.num_classes,
max_seq_length=77,
task_seq_length=77,
class_info_file="ade20k_panoptic.json",
num_text=self.image_processor_tester.num_text,
repo_path="shi-labs/oneformer_demo",
)
outputs = self.image_processor_tester.get_fake_oneformer_outputs()
segmentation = image_processor.post_process_panoptic_segmentation(outputs, threshold=0)
self.assertTrue(len(segmentation) == self.image_processor_tester.batch_size)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]), list)
self.assertEqual(
el["segmentation"].shape, (self.image_processor_tester.height, self.image_processor_tester.width)
)
def test_can_load_with_local_metadata(self):
# Create a temporary json file
class_info = {
"0": {"isthing": 0, "name": "foo"},
"1": {"isthing": 0, "name": "bar"},
"2": {"isthing": 1, "name": "baz"},
}
metadata = prepare_metadata(class_info)
for image_processing_class in self.image_processor_list:
with tempfile.TemporaryDirectory() as tmpdirname:
metadata_path = os.path.join(tmpdirname, "metadata.json")
with open(metadata_path, "w") as f:
json.dump(class_info, f)
config_dict = self.image_processor_dict
config_dict["class_info_file"] = metadata_path
config_dict["repo_path"] = tmpdirname
image_processor = image_processing_class(**config_dict)
self.assertEqual(image_processor.metadata, metadata)
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image, dummy_map = prepare_semantic_single_inputs()
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
image_encoding_slow = image_processor_slow(dummy_image, segmentation_maps=dummy_map, return_tensors="pt")
image_encoding_fast = image_processor_fast(dummy_image, segmentation_maps=dummy_map, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(image_encoding_slow.pixel_values, image_encoding_fast.pixel_values)
for mask_label_slow, mask_label_fast in zip(image_encoding_slow.mask_labels, image_encoding_fast.mask_labels):
self._assert_slow_fast_tensors_equivalence(mask_label_slow, mask_label_fast)
for class_label_slow, class_label_fast in zip(
image_encoding_slow.class_labels, image_encoding_fast.class_labels
):
self._assert_slow_fast_tensors_equivalence(class_label_slow.float(), class_label_fast.float())
self.assertEqual(image_encoding_slow.text_inputs, image_encoding_fast.text_inputs)
self.assertEqual(image_encoding_slow.task_inputs, image_encoding_fast.task_inputs)
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop:
self.skipTest(
reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors"
)
dummy_images, dummy_maps = prepare_semantic_batch_inputs()
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(
dummy_images,
segmentation_maps=dummy_maps,
task_inputs=["instance"] + ["semantic"] * (len(dummy_images) - 1),
return_tensors="pt",
)
encoding_fast = image_processor_fast(
dummy_images,
segmentation_maps=dummy_maps,
task_inputs=["instance"] + ["semantic"] * (len(dummy_images) - 1),
return_tensors="pt",
)
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
for mask_label_slow, mask_label_fast in zip(encoding_slow.mask_labels, encoding_fast.mask_labels):
self._assert_slow_fast_tensors_equivalence(mask_label_slow, mask_label_fast)
for class_label_slow, class_label_fast in zip(encoding_slow.class_labels, encoding_fast.class_labels):
self._assert_slow_fast_tensors_equivalence(class_label_slow.float(), class_label_fast.float())
self.assertEqual(encoding_slow.text_inputs, encoding_fast.text_inputs)
self.assertEqual(encoding_slow.task_inputs, encoding_fast.task_inputs)
| OneFormerImageProcessingTest |
python | milvus-io__pymilvus | pymilvus/client/singleton_utils.py | {
"start": 53,
"end": 405
} | class ____(type):
_ins: ClassVar[Dict] = {}
_lock = threading.Lock()
def __call__(cls, *args, **kwargs):
if cls not in cls._ins:
with cls._lock:
if cls not in cls._ins:
ins = super().__call__(*args, **kwargs)
cls._ins[cls] = ins
return cls._ins[cls]
| Singleton |
python | ray-project__ray | python/ray/experimental/channel/common.py | {
"start": 15460,
"end": 18386
} | class ____(ReaderInterface):
"""
Asyncio-compatible channel reader.
The reader is constructed with an async queue of futures whose values it
will fulfill. It uses a threadpool to execute the blocking calls to read
from the input channel(s).
"""
def __init__(
self,
input_channels: List[ChannelInterface],
fut_queue: asyncio.Queue,
):
super().__init__(input_channels)
self._fut_queue = fut_queue
self._background_task = None
self._background_task_executor = concurrent.futures.ThreadPoolExecutor(
max_workers=1, thread_name_prefix="channel.AwaitableBackgroundReader"
)
def start(self):
self._background_task = asyncio.ensure_future(self.run())
def _run(self):
# Give it a default timeout 60 seconds to release the buffers
# of the channels that were not read in the last `read` call.
self._consume_leftover_channels_if_needed(60)
results = [None for _ in range(len(self._input_channels))]
from ray.dag import DAGContext
ctx = DAGContext.get_current()
iteration_timeout = ctx.read_iteration_timeout
done_channels = set()
while len(done_channels) < len(self._input_channels):
for i, c in enumerate(self._input_channels):
if c in done_channels:
continue
try:
result = c.read(iteration_timeout)
results[i] = result
done_channels.add(c)
if isinstance(result, ray.exceptions.RayTaskError):
self._leftover_channels = [
c for c in self._input_channels if c not in done_channels
]
return [result for _ in range(len(self._input_channels))]
except ray.exceptions.RayChannelTimeoutError:
pass
if sys.is_finalizing():
return results
return results
async def run(self):
loop = asyncio.get_running_loop()
while not self._closed:
res, fut = await asyncio.gather(
loop.run_in_executor(self._background_task_executor, self._run),
self._fut_queue.get(),
return_exceptions=True,
)
# Set the result on the main thread.
fut.set_result(res)
# NOTE(swang): If the object is zero-copy deserialized, then it
# will stay in scope as long as ret and the future are in scope.
# Therefore, we must delete both here after fulfilling the future.
del res
del fut
def close(self):
super().close()
self._background_task_executor.shutdown(cancel_futures=True)
self._background_task.cancel()
@DeveloperAPI
| AwaitableBackgroundReader |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-amazon-seller-partner/unit_tests/integration/pagination.py | {
"start": 211,
"end": 452
} | class ____(PaginationStrategy):
def update(self, response: Dict[str, Any]) -> None:
response["payload"]["pagination"] = {}
response["payload"]["pagination"]["nextToken"] = NEXT_TOKEN_STRING
| VendorFulfillmentPaginationStrategy |
python | django__django | tests/transactions/tests.py | {
"start": 11261,
"end": 13917
} | class ____(TransactionTestCase):
"""Test merging transactions with savepoint=False."""
available_apps = ["transactions"]
def test_merged_outer_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The first block has a savepoint and must roll back.
self.assertSequenceEqual(Reporter.objects.all(), [])
def test_merged_inner_savepoint_rollback(self):
with transaction.atomic():
reporter = Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second block has a savepoint and must roll back.
self.assertEqual(Reporter.objects.count(), 1)
self.assertSequenceEqual(Reporter.objects.all(), [reporter])
@skipUnlessDBFeature("uses_savepoints")
| AtomicMergeTests |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_tagkey_values.py | {
"start": 34076,
"end": 38379
} | class ____(OrganizationTagKeyTestCase, OccurrenceTestMixin):
def setUp(self) -> None:
super().setUp()
def run_dataset_test(self, key, expected, dataset: Dataset, **kwargs):
# all tests here require that we search in transactions so make that the default here
qs_params = kwargs.get("qs_params", {})
qs_params["dataset"] = dataset.value
kwargs["qs_params"] = qs_params
super().run_test(key, expected, **kwargs)
def test_dataset_events(self) -> None:
self.store_event(
data={
"event_id": "a" * 32,
"tags": {"berry": "raspberry"},
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": "b" * 32,
"tags": {"berry": "blueberry"},
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": "c" * 32,
"tags": {"berry": "banana"},
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": "d" * 32,
"tags": {"berry": "banana"},
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
# Should appear in Events and Discover datasets, but not IssuePlatform
self.run_dataset_test(
"berry",
expected=[("raspberry", 1), ("blueberry", 1), ("banana", 2)],
dataset=Dataset.Events,
)
self.run_dataset_test(
"berry",
expected=[("raspberry", 1), ("blueberry", 1), ("banana", 2)],
dataset=Dataset.Discover,
)
self.run_dataset_test(
"berry",
expected=[],
dataset=Dataset.IssuePlatform,
)
def test_dataset_issue_platform(self) -> None:
self.store_event(
data={
"event_id": "a" * 32,
"tags": {"stone_fruit": "peach"},
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
self.process_occurrence(
event_id=uuid.uuid4().hex,
project_id=self.project.id,
event_data={
"title": "some problem",
"platform": "python",
"tags": {"stone_fruit": "cherry"},
"timestamp": self.min_ago.isoformat(),
"received": self.min_ago.isoformat(),
},
)
# (stone_fruit: cherry) should appear in IssuePlatform dataset,
# but (sonte_fruit: peach) should not
self.run_dataset_test(
"stone_fruit",
expected=[("cherry", 1)],
dataset=Dataset.IssuePlatform,
)
self.run_dataset_test(
"stone_fruit",
expected=[("peach", 1)],
dataset=Dataset.Events,
)
self.run_dataset_test(
"stone_fruit",
expected=[("peach", 1)],
dataset=Dataset.Discover,
)
def test_dataset_discover(self) -> None:
event = load_data("transaction")
event["tags"].extend([["fake_fruit", "tomato"]])
event.update(
{
"transaction": "example_transaction",
"event_id": uuid.uuid4().hex,
"start_timestamp": self.min_ago.isoformat(),
"timestamp": self.min_ago.isoformat(),
}
)
event["measurements"]["lcp"]["value"] = 5000
self.store_event(data=event, project_id=self.project.id)
self.run_dataset_test(
"fake_fruit",
expected=[],
dataset=Dataset.IssuePlatform,
)
self.run_dataset_test(
"fake_fruit",
expected=[],
dataset=Dataset.Events,
)
self.run_dataset_test(
"fake_fruit",
expected=[("tomato", 1)],
dataset=Dataset.Discover,
)
| DatasetParamOrganizationTagKeyValuesTest |
python | lepture__authlib | tests/flask/test_oauth2/test_authorization_code_grant.py | {
"start": 921,
"end": 10568
} | class ____(CodeGrantMixin, _AuthorizationCodeGrant):
TOKEN_ENDPOINT_AUTH_METHODS = ["client_secret_basic", "client_secret_post", "none"]
def save_authorization_code(self, code, request):
return save_authorization_code(code, request)
@pytest.fixture(autouse=True)
def server(server):
server.register_grant(AuthorizationCodeGrant)
return server
def test_get_authorize(test_client):
rv = test_client.get(authorize_url)
assert rv.data == b"ok"
def test_invalid_client_id(test_client):
url = "/oauth/authorize?response_type=code"
rv = test_client.get(url)
assert b"invalid_client" in rv.data
url = "/oauth/authorize?response_type=code&client_id=invalid"
rv = test_client.get(url)
assert b"invalid_client" in rv.data
def test_invalid_authorize(test_client, server):
rv = test_client.post(authorize_url)
assert "error=access_denied" in rv.location
server.scopes_supported = ["profile"]
rv = test_client.post(authorize_url + "&scope=invalid&state=foo")
assert "error=invalid_scope" in rv.location
assert "state=foo" in rv.location
def test_unauthorized_client(test_client, client, db):
client.set_client_metadata(
{
"redirect_uris": ["https://client.test"],
"scope": "profile address",
"token_endpoint_auth_method": "client_secret_basic",
"response_types": ["token"],
"grant_types": ["authorization_code"],
}
)
db.session.add(client)
db.session.commit()
rv = test_client.get(authorize_url)
assert "unauthorized_client" in rv.location
def test_invalid_client(test_client):
rv = test_client.post(
"/oauth/token",
data={
"grant_type": "authorization_code",
"code": "invalid",
"client_id": "invalid-id",
},
)
resp = json.loads(rv.data)
assert resp["error"] == "invalid_client"
headers = create_basic_header("code-client", "invalid-secret")
rv = test_client.post(
"/oauth/token",
data={
"grant_type": "authorization_code",
"code": "invalid",
},
headers=headers,
)
resp = json.loads(rv.data)
assert resp["error"] == "invalid_client"
assert resp["error_uri"] == "https://client.test/error#invalid_client"
def test_invalid_code(test_client):
headers = create_basic_header("client-id", "client-secret")
rv = test_client.post(
"/oauth/token",
data={
"grant_type": "authorization_code",
},
headers=headers,
)
resp = json.loads(rv.data)
assert resp["error"] == "invalid_request"
rv = test_client.post(
"/oauth/token",
data={
"grant_type": "authorization_code",
"code": "invalid",
},
headers=headers,
)
resp = json.loads(rv.data)
assert resp["error"] == "invalid_grant"
code = AuthorizationCode(code="no-user", client_id="code-client", user_id=0)
db.session.add(code)
db.session.commit()
rv = test_client.post(
"/oauth/token",
data={
"grant_type": "authorization_code",
"code": "no-user",
},
headers=headers,
)
resp = json.loads(rv.data)
assert resp["error"] == "invalid_grant"
def test_invalid_redirect_uri(test_client):
uri = authorize_url + "&redirect_uri=https%3A%2F%2Fa.c"
rv = test_client.post(uri, data={"user_id": "1"})
resp = json.loads(rv.data)
assert resp["error"] == "invalid_request"
uri = authorize_url + "&redirect_uri=https%3A%2F%2Fclient.test"
rv = test_client.post(uri, data={"user_id": "1"})
assert "code=" in rv.location
params = dict(url_decode(urlparse.urlparse(rv.location).query))
code = params["code"]
headers = create_basic_header("client-id", "client-secret")
rv = test_client.post(
"/oauth/token",
data={
"grant_type": "authorization_code",
"code": code,
},
headers=headers,
)
resp = json.loads(rv.data)
assert resp["error"] == "invalid_grant"
def test_invalid_grant_type(test_client, client, db):
client.client_secret = ""
client.set_client_metadata(
{
"redirect_uris": ["https://client.test"],
"scope": "profile address",
"token_endpoint_auth_method": "none",
"response_types": ["code"],
"grant_types": ["invalid"],
}
)
db.session.add(client)
db.session.commit()
rv = test_client.post(
"/oauth/token",
data={
"grant_type": "authorization_code",
"client_id": "client-id",
"code": "a",
},
)
resp = json.loads(rv.data)
assert resp["error"] == "unauthorized_client"
def test_authorize_token_no_refresh_token(app, test_client, client, db, server):
app.config.update({"OAUTH2_REFRESH_TOKEN_GENERATOR": True})
server.load_config(app.config)
client.set_client_metadata(
{
"redirect_uris": ["https://client.test"],
"scope": "profile address",
"token_endpoint_auth_method": "none",
"response_types": ["code"],
"grant_types": ["authorization_code"],
}
)
db.session.add(client)
db.session.commit()
rv = test_client.post(authorize_url, data={"user_id": "1"})
assert "code=" in rv.location
params = dict(url_decode(urlparse.urlparse(rv.location).query))
code = params["code"]
rv = test_client.post(
"/oauth/token",
data={
"grant_type": "authorization_code",
"code": code,
"client_id": "client-id",
},
)
resp = json.loads(rv.data)
assert "access_token" in resp
assert "refresh_token" not in resp
def test_authorize_token_has_refresh_token(app, test_client, client, db, server):
app.config.update({"OAUTH2_REFRESH_TOKEN_GENERATOR": True})
server.load_config(app.config)
client.set_client_metadata(
{
"redirect_uris": ["https://client.test"],
"scope": "profile address",
"token_endpoint_auth_method": "client_secret_basic",
"response_types": ["code"],
"grant_types": ["authorization_code", "refresh_token"],
}
)
db.session.add(client)
db.session.commit()
url = authorize_url + "&state=bar"
rv = test_client.post(url, data={"user_id": "1"})
assert "code=" in rv.location
params = dict(url_decode(urlparse.urlparse(rv.location).query))
assert params["state"] == "bar"
code = params["code"]
headers = create_basic_header("client-id", "client-secret")
rv = test_client.post(
"/oauth/token",
data={
"grant_type": "authorization_code",
"code": code,
},
headers=headers,
)
resp = json.loads(rv.data)
assert "access_token" in resp
assert "refresh_token" in resp
def test_invalid_multiple_request_parameters(test_client):
url = (
authorize_url
+ "&scope=profile&state=bar&redirect_uri=https%3A%2F%2Fclient.test&response_type=code"
)
rv = test_client.get(url)
resp = json.loads(rv.data)
assert resp["error"] == "invalid_request"
assert resp["error_description"] == "Multiple 'response_type' in request."
def test_client_secret_post(app, test_client, client, db, server):
app.config.update({"OAUTH2_REFRESH_TOKEN_GENERATOR": True})
server.load_config(app.config)
client.set_client_metadata(
{
"redirect_uris": ["https://client.test"],
"scope": "profile address",
"token_endpoint_auth_method": "client_secret_post",
"response_types": ["code"],
"grant_types": ["authorization_code", "refresh_token"],
}
)
db.session.add(client)
db.session.commit()
url = authorize_url + "&state=bar"
rv = test_client.post(url, data={"user_id": "1"})
assert "code=" in rv.location
params = dict(url_decode(urlparse.urlparse(rv.location).query))
assert params["state"] == "bar"
code = params["code"]
rv = test_client.post(
"/oauth/token",
data={
"grant_type": "authorization_code",
"client_id": "client-id",
"client_secret": "client-secret",
"code": code,
},
)
resp = json.loads(rv.data)
assert "access_token" in resp
assert "refresh_token" in resp
def test_token_generator(app, test_client, client, server):
m = "tests.flask.test_oauth2.oauth2_server:token_generator"
app.config.update({"OAUTH2_ACCESS_TOKEN_GENERATOR": m})
server.load_config(app.config)
client.set_client_metadata(
{
"redirect_uris": ["https://client.test"],
"scope": "profile address",
"token_endpoint_auth_method": "none",
"response_types": ["code"],
"grant_types": ["authorization_code"],
}
)
db.session.add(client)
db.session.commit()
rv = test_client.post(authorize_url, data={"user_id": "1"})
assert "code=" in rv.location
params = dict(url_decode(urlparse.urlparse(rv.location).query))
code = params["code"]
rv = test_client.post(
"/oauth/token",
data={
"grant_type": "authorization_code",
"code": code,
"client_id": "client-id",
},
)
resp = json.loads(rv.data)
assert "access_token" in resp
assert "c-authorization_code.1." in resp["access_token"]
| AuthorizationCodeGrant |
python | facebook__pyre-check | source/command/test/integration/fake_repository/commit_007_T30944862/a.py | {
"start": 266,
"end": 329
} | class ____:
def foo(self, x: str) -> None:
pass
| Derived |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 186834,
"end": 188694
} | class ____(rv_continuous):
r"""A Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `laplace` is
.. math::
f(x) = \frac{1}{2} \exp(-|x|)
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return []
def _rvs(self, size=None, random_state=None):
return random_state.laplace(0, 1, size=size)
def _pdf(self, x):
# laplace.pdf(x) = 1/2 * exp(-abs(x))
return 0.5*np.exp(-abs(x))
def _cdf(self, x):
with np.errstate(over='ignore'):
return np.where(x > 0, 1.0 - 0.5*np.exp(-x), 0.5*np.exp(x))
def _sf(self, x):
# By symmetry...
return self._cdf(-x)
def _ppf(self, q):
return np.where(q > 0.5, -np.log(2*(1-q)), np.log(2*q))
def _isf(self, q):
# By symmetry...
return -self._ppf(q)
def _stats(self):
return 0, 2, 0, 3
def _entropy(self):
return np.log(2)+1
@_call_super_mom
@replace_notes_in_docstring(rv_continuous, notes="""\
This function uses explicit formulas for the maximum likelihood
estimation of the Laplace distribution parameters, so the keyword
arguments `loc`, `scale`, and `optimizer` are ignored.\n\n""")
def fit(self, data, *args, **kwds):
data, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
# Source: Statistical Distributions, 3rd Edition. Evans, Hastings,
# and Peacock (2000), Page 124
if floc is None:
floc = np.median(data)
if fscale is None:
fscale = (np.sum(np.abs(data - floc))) / len(data)
return floc, fscale
laplace = laplace_gen(name='laplace')
| laplace_gen |
python | getsentry__sentry | src/sentry/types/ratelimit.py | {
"start": 278,
"end": 730
} | class ____:
"""Dataclass for defining a rate limit
Attributes:
limit (int): Max number of hits allowed within the window
window (int): Period of time in seconds that the rate limit applies for
concurrent_limit Optional(int): concurrent request limit (irrespective of window)
"""
limit: int
window: int
concurrent_limit: int | None = field(default=settings.SENTRY_CONCURRENT_RATE_LIMIT_DEFAULT)
| RateLimit |
python | huggingface__transformers | src/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py | {
"start": 12931,
"end": 12991
} | class ____(Dinov2Encoder):
pass
| Dinov2WithRegistersEncoder |
python | keras-team__keras | keras/src/layers/rnn/lstm.py | {
"start": 505,
"end": 13173
} | class ____(Layer, DropoutRNNCell):
"""Cell class for the LSTM layer.
This class processes one step within the whole time sequence input, whereas
`keras.layer.LSTM` processes the whole sequence.
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use. Default: hyperbolic tangent
(`tanh`). If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use for the recurrent step.
Default: sigmoid (`sigmoid`). If you pass `None`, no activation is
applied (ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer
should use a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`"glorot_uniform"`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation
of the recurrent state. Default: `"orthogonal"`.
bias_initializer: Initializer for the bias vector. Default: `"zeros"`.
unit_forget_bias: Boolean (default `True`). If `True`,
add 1 to the bias of the forget gate at initialization.
Setting it to `True` will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.](
https://github.com/mlresearch/v37/blob/gh-pages/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector.
Default: `None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector.
Default: `None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
for the linear transformation of the recurrent state. Default: 0.
seed: Random seed for dropout.
Call arguments:
inputs: A 2D tensor, with shape `(batch, features)`.
states: A 2D tensor with shape `(batch, units)`, which is the state
from the previous time step.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
Example:
>>> inputs = np.random.random((32, 10, 8))
>>> rnn = keras.layers.RNN(keras.layers.LSTMCell(4))
>>> output = rnn(inputs)
>>> output.shape
(32, 4)
>>> rnn = keras.layers.RNN(
... keras.layers.LSTMCell(4),
... return_sequences=True,
... return_state=True)
>>> whole_sequence_output, final_state = rnn(inputs)
>>> whole_sequence_output.shape
(32, 10, 4)
>>> final_state.shape
(32, 4)
"""
def __init__(
self,
units,
activation="tanh",
recurrent_activation="sigmoid",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
seed=None,
**kwargs,
):
if units <= 0:
raise ValueError(
"Received an invalid value for argument `units`, "
f"expected a positive integer, got {units}."
)
implementation = kwargs.pop("implementation", 2)
super().__init__(**kwargs)
self.implementation = implementation
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1.0, max(0.0, dropout))
self.recurrent_dropout = min(1.0, max(0.0, recurrent_dropout))
if self.recurrent_dropout != 0.0:
self.implementation = 1
if self.implementation == 1:
self.dropout_mask_count = 4
self.seed = seed
self.seed_generator = backend.random.SeedGenerator(seed=seed)
self.unit_forget_bias = unit_forget_bias
self.state_size = [self.units, self.units]
self.output_size = self.units
def build(self, input_shape):
super().build(input_shape)
input_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(input_dim, self.units * 4),
name="kernel",
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name="recurrent_kernel",
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint,
)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return ops.concatenate(
[
self.bias_initializer(
(self.units,), *args, **kwargs
),
initializers.get("ones")(
(self.units,), *args, **kwargs
),
self.bias_initializer(
(self.units * 2,), *args, **kwargs
),
]
)
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(
shape=(self.units * 4,),
name="bias",
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
else:
self.bias = None
def _compute_carry_and_output(self, x, h_tm1, c_tm1):
"""Computes carry and output using split kernels."""
x_i, x_f, x_c, x_o = x
h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1
i = self.recurrent_activation(
x_i + ops.matmul(h_tm1_i, self.recurrent_kernel[:, : self.units])
)
f = self.recurrent_activation(
x_f
+ ops.matmul(
h_tm1_f, self.recurrent_kernel[:, self.units : self.units * 2]
)
)
c = f * c_tm1 + i * self.activation(
x_c
+ ops.matmul(
h_tm1_c,
self.recurrent_kernel[:, self.units * 2 : self.units * 3],
)
)
o = self.recurrent_activation(
x_o
+ ops.matmul(h_tm1_o, self.recurrent_kernel[:, self.units * 3 :])
)
return c, o
def _compute_carry_and_output_fused(self, z, c_tm1):
"""Computes carry and output using fused kernels."""
z0, z1, z2, z3 = z
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
return c, o
def call(self, inputs, states, training=False):
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
if self.implementation == 1:
if training and 0.0 < self.dropout < 1.0:
dp_mask = self.get_dropout_mask(inputs)
inputs_i = inputs * dp_mask[0]
inputs_f = inputs * dp_mask[1]
inputs_c = inputs * dp_mask[2]
inputs_o = inputs * dp_mask[3]
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
k_i, k_f, k_c, k_o = ops.split(self.kernel, 4, axis=1)
x_i = ops.matmul(inputs_i, k_i)
x_f = ops.matmul(inputs_f, k_f)
x_c = ops.matmul(inputs_c, k_c)
x_o = ops.matmul(inputs_o, k_o)
if self.use_bias:
b_i, b_f, b_c, b_o = ops.split(self.bias, 4, axis=0)
x_i += b_i
x_f += b_f
x_c += b_c
x_o += b_o
if training and 0.0 < self.recurrent_dropout < 1.0:
rec_dp_mask = self.get_recurrent_dropout_mask(h_tm1)
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
x = (x_i, x_f, x_c, x_o)
h_tm1 = (h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o)
c, o = self._compute_carry_and_output(x, h_tm1, c_tm1)
else:
if training and 0.0 < self.dropout < 1.0:
dp_mask = self.get_dropout_mask(inputs)
inputs = inputs * dp_mask
z = ops.matmul(inputs, self.kernel)
z = ops.add(z, ops.matmul(h_tm1, self.recurrent_kernel))
if self.use_bias:
z = ops.add(z, self.bias)
z = ops.split(z, 4, axis=1)
c, o = self._compute_carry_and_output_fused(z, c_tm1)
h = o * self.activation(c)
return h, [h, c]
def get_config(self):
config = {
"units": self.units,
"activation": activations.serialize(self.activation),
"recurrent_activation": activations.serialize(
self.recurrent_activation
),
"use_bias": self.use_bias,
"unit_forget_bias": self.unit_forget_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"recurrent_initializer": initializers.serialize(
self.recurrent_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"recurrent_regularizer": regularizers.serialize(
self.recurrent_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"recurrent_constraint": constraints.serialize(
self.recurrent_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
"dropout": self.dropout,
"recurrent_dropout": self.recurrent_dropout,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
def get_initial_state(self, batch_size=None):
return [
ops.zeros((batch_size, d), dtype=self.compute_dtype)
for d in self.state_size
]
@keras_export("keras.layers.LSTM")
| LSTMCell |
python | sqlalchemy__sqlalchemy | test/perf/compiled_extensions/misc.py | {
"start": 243,
"end": 2733
} | class ____(Case):
@staticmethod
def python():
from sqlalchemy.engine import _processors_cy
py_processors = load_uncompiled_module(_processors_cy)
assert not py_processors._is_compiled()
return py_processors
@staticmethod
def cython():
from sqlalchemy.engine import _processors_cy
assert _processors_cy._is_compiled()
return _processors_cy
IMPLEMENTATIONS = {
"python": python.__func__,
"cython": cython.__func__,
}
NUMBER = 500_000
def init_objects(self):
self.to_dec = self.impl.to_decimal_processor_factory(Decimal, 3)
@classmethod
def update_results(cls, results):
cls._divide_results(results, "c", "python", "c / py")
cls._divide_results(results, "cython", "python", "cy / py")
cls._divide_results(results, "cython", "c", "cy / c")
@test_case
def int_to_boolean(self):
self.impl.int_to_boolean(None)
self.impl.int_to_boolean(10)
self.impl.int_to_boolean(1)
self.impl.int_to_boolean(-10)
self.impl.int_to_boolean(0)
@test_case
def to_str(self):
self.impl.to_str(None)
self.impl.to_str(123)
self.impl.to_str(True)
self.impl.to_str(self)
self.impl.to_str("self")
@test_case
def to_float(self):
self.impl.to_float(None)
self.impl.to_float(123)
self.impl.to_float(True)
self.impl.to_float(42)
self.impl.to_float(0)
self.impl.to_float(42.0)
self.impl.to_float("nan")
self.impl.to_float("42")
self.impl.to_float("42.0")
@test_case
def str_to_datetime(self):
self.impl.str_to_datetime(None)
self.impl.str_to_datetime("2020-01-01 20:10:34")
self.impl.str_to_datetime("2030-11-21 01:04:34.123456")
@test_case
def str_to_time(self):
self.impl.str_to_time(None)
self.impl.str_to_time("20:10:34")
self.impl.str_to_time("01:04:34.123456")
@test_case
def str_to_date(self):
self.impl.str_to_date(None)
self.impl.str_to_date("2020-01-01")
@test_case
def to_decimal_call(self):
assert self.to_dec(None) is None
self.to_dec(123.44)
self.to_dec(99)
self.to_dec(1 / 3)
@test_case
def to_decimal_pf_make(self):
self.impl.to_decimal_processor_factory(Decimal, 3)
self.impl.to_decimal_processor_factory(Decimal, 7)
| Processors |
python | streamlit__streamlit | lib/tests/streamlit/elements/doc_string_test.py | {
"start": 1730,
"end": 4606
} | class ____(DeltaGeneratorTestCase):
"""Test Public Streamlit Public APIs."""
def test_st_help(self):
"""Test st.help."""
with patch_varname_getter():
st.help(os.chdir)
el = self.get_delta_from_queue().new_element.doc_string
assert el.name == "os.chdir"
assert el.type == "builtin_function_or_method"
assert el.doc_string.startswith("Change the current working directory")
assert el.value in ["posix.chdir(path)", "nt.chdir(path)"]
def test_st_help_with_available_conditional_members(self):
"""Test st.help with conditional members available"""
st.help(ConditionalHello(True))
el = self.get_delta_from_queue().new_element.doc_string
assert el.type == "ConditionalHello"
member_names = [member.name for member in el.members]
assert "say_hello" in member_names
def test_st_help_with_unavailable_conditional_members(self):
"""Test st.help with conditional members not available
via AttributeError"""
st.help(ConditionalHello(False))
el = self.get_delta_from_queue().new_element.doc_string
assert el.type == "ConditionalHello"
member_names = [member.name for member in el.members]
assert "say_hello" not in member_names
def test_st_help_with_erroneous_members(self):
"""Test st.help with conditional members not available
via some non-AttributeError exception"""
with pytest.raises(
ValueError, match="say_hello is not accessible when x is even"
):
st.help(ConditionalHello(False, ValueError))
def test_help_width(self):
"""Test that help() correctly handles width parameter."""
st.help(st, width="stretch")
c = self.get_delta_from_queue().new_element
assert (
c.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert c.width_config.use_stretch
st.help(st, width=500)
c = self.get_delta_from_queue().new_element
assert (
c.width_config.WhichOneof("width_spec")
== WidthConfigFields.PIXEL_WIDTH.value
)
assert c.width_config.pixel_width == 500
st.help(st)
c = self.get_delta_from_queue().new_element
assert (
c.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert c.width_config.use_stretch
@parameterized.expand(
["invalid", -100, 0, 100.5, None],
)
def test_help_invalid_width(self, width):
"""Test that help() raises an error for invalid width values."""
with pytest.raises(StreamlitInvalidWidthError) as exc_info:
st.help(st, width=width)
assert "Invalid width" in str(exc_info.value)
| StHelpAPITest |
python | walkccc__LeetCode | solutions/1046. Last Stone Weight/1046.py | {
"start": 0,
"end": 310
} | class ____:
def lastStoneWeight(self, stones: list[int]) -> int:
pq = [-stone for stone in stones]
heapq.heapify(pq)
while len(pq) >= 2:
n1 = -heapq.heappop(pq)
n2 = -heapq.heappop(pq)
if n1 != n2:
heapq.heappush(pq, -(n1 - n2))
return 0 if not pq else -pq[0]
| Solution |
python | automl__auto-sklearn | autosklearn/pipeline/implementations/SparseOneHotEncoder.py | {
"start": 141,
"end": 4492
} | class ____(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a sparse matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix were each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
Attributes
----------
`feature_indices_` : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
`n_values_` : array of shape (n_features,)
Maximum number of values per feature.
"""
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _check_X(self, X):
if not sparse.issparse(X):
raise TypeError("SparseOneHotEncoder requires X to be sparse")
X = check_array(X, accept_sparse="csc", force_all_finite=False, dtype=np.int32)
if X.min() < 0:
raise ValueError("X needs to contain only non-negative integers.")
return X
def fit_transform(self, X, y=None):
X = self._check_X(X)
n_samples, n_features = X.shape
n_values = X.max(axis=0).toarray().flatten() + 2
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
row_indices = X.indices
column_indices = []
for i in range(len(X.indptr) - 1):
nbr = X.indptr[i + 1] - X.indptr[i]
column_indices_ = [indices[i]] * nbr
column_indices_ += X.data[X.indptr[i] : X.indptr[i + 1]]
column_indices.extend(column_indices_)
data = np.ones(X.data.size)
out = sparse.coo_matrix(
(data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=np.int32,
).tocsc()
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out.tocsr()
def transform(self, X):
X = self._check_X(X)
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError(
"X has different shape than during fitting."
" Expected %d, got %d." % (indices.shape[0] - 1, n_features)
)
n_values_check = X.max(axis=0).toarray().flatten() + 1
# Ignore all indicators which are out of bounds (i.e. assign index 0)
# This strategy is analogous with using handle_unkwon='ignore' on a sklearn's
# one hot encoder.
if (n_values_check > self.n_values_).any():
# raise ValueError("Feature out of bounds. Try setting n_values.")
for i, n_value_check in enumerate(n_values_check):
if (n_value_check - 1) >= self.n_values_[i]:
indptr_start = X.indptr[i]
indptr_end = X.indptr[i + 1]
zeros_mask = X.data[indptr_start:indptr_end] >= self.n_values_[i]
X.data[indptr_start:indptr_end][zeros_mask] = 0
row_indices = X.indices
column_indices = []
for i in range(len(X.indptr) - 1):
nbr = X.indptr[i + 1] - X.indptr[i]
column_indices_ = [indices[i]] * nbr
column_indices_ += X.data[X.indptr[i] : X.indptr[i + 1]]
column_indices.extend(column_indices_)
data = np.ones(X.data.size)
out = sparse.coo_matrix(
(data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=np.int32,
).tocsc()
out = out[:, self.active_features_]
return out.tocsr()
| SparseOneHotEncoder |
python | pallets__jinja | tests/test_security.py | {
"start": 563,
"end": 714
} | class ____:
def bar(self):
return 23
def _foo(self):
return 42
def __repr__(self):
return "PublicStuff"
| PublicStuff |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_setup_teardown.py | {
"start": 803,
"end": 1486
} | class ____:
@given(integers())
@settings(suppress_health_check=[HealthCheck.differing_executors])
def give_me_an_int(self, x):
pass
@given(text())
def give_me_a_string(self, x):
pass
@given(integers())
@settings(max_examples=1000)
def give_me_a_positive_int(self, x):
assert x >= 0
@given(integers().map(lambda x: x.nope))
def fail_in_reify(self, x):
pass
@given(integers())
@settings(suppress_health_check=[HealthCheck.filter_too_much])
def assume_some_stuff(self, x):
assume(x > 0)
@given(integers().filter(lambda x: x > 0))
def assume_in_reify(self, x):
pass
| SomeGivens |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/asset_checks.py | {
"start": 9273,
"end": 9593
} | class ____(graphene.Union):
class Meta:
types = (
GrapheneAssetChecks,
GrapheneAssetCheckNeedsMigrationError,
GrapheneAssetCheckNeedsUserCodeUpgrade,
GrapheneAssetCheckNeedsAgentUpgradeError,
)
name = "AssetChecksOrError"
| GrapheneAssetChecksOrError |
python | huggingface__transformers | src/transformers/models/qwen3_next/modeling_qwen3_next.py | {
"start": 2865,
"end": 3596
} | class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6, **kwargs):
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states, gate=None):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
# Norm before gate
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
hidden_states = self.weight * hidden_states.to(input_dtype)
hidden_states = hidden_states * F.silu(gate.to(torch.float32))
return hidden_states.to(input_dtype)
| Qwen3NextRMSNormGated |
python | huggingface__transformers | src/transformers/models/zoedepth/modeling_zoedepth.py | {
"start": 5386,
"end": 6452
} | class ____(nn.Module):
def __init__(self, config, channels, factor):
super().__init__()
# projection
hidden_size = config.backbone_hidden_size
self.projection = nn.Conv2d(in_channels=hidden_size, out_channels=channels, kernel_size=1)
# up/down sampling depending on factor
if factor > 1:
self.resize = nn.ConvTranspose2d(channels, channels, kernel_size=factor, stride=factor, padding=0)
elif factor == 1:
self.resize = nn.Identity()
elif factor < 1:
# so should downsample
self.resize = nn.Conv2d(channels, channels, kernel_size=3, stride=int(1 / factor), padding=1)
# Copied from transformers.models.dpt.modeling_dpt.DPTReassembleLayer.forward with DPT->ZoeDepth
def forward(self, hidden_state):
hidden_state = self.projection(hidden_state)
hidden_state = self.resize(hidden_state)
return hidden_state
# Copied from transformers.models.dpt.modeling_dpt.DPTFeatureFusionStage with DPT->ZoeDepth
| ZoeDepthReassembleLayer |
python | pdm-project__pdm | tests/test_utils.py | {
"start": 9432,
"end": 13281
} | class ____:
def test_conda_env_with_conda_meta_in_bin(self, tmp_path: Path):
path = tmp_path / "conda/bin/python3"
path_parent = path.parent
path_parent.mkdir(parents=True)
path.touch()
path_parent.joinpath("conda-meta").mkdir()
received = utils.get_venv_like_prefix(path)
expected = path_parent, True
assert received == expected
def test_py_env_with_pyvenv_cfg(self, tmp_path: Path):
path = tmp_path / "venv/bin/python3"
bin_path = path.parent
venv_path = path.parent.parent
bin_path.mkdir(parents=True)
path.touch()
venv_path.joinpath("pyvenv.cfg").touch()
received = utils.get_venv_like_prefix(str(path))
expected = venv_path, False
assert received == expected
def test_conda_env_with_conda_meta(self, tmp_path: Path):
path = tmp_path / "conda/bin/python3"
interpreter_bin_path = path.parent
interpreter_bin_parent_path = interpreter_bin_path.parent
interpreter_bin_path.mkdir(parents=True)
path.touch()
interpreter_bin_parent_path.joinpath("conda-meta").mkdir()
received = utils.get_venv_like_prefix(str(path))
expected = interpreter_bin_parent_path, True
assert received == expected
def test_virtual_env(self, monkeypatch):
path = Path("/my/venv")
expected = path, False
monkeypatch.setenv("VIRTUAL_ENV", str(path))
received = utils.get_venv_like_prefix(path.joinpath("bin", "python3"))
assert received == expected
def test_conda_virtual_env(self, monkeypatch):
path = Path("/my/conda/venv")
expected = path, True
monkeypatch.setenv("CONDA_PREFIX", str(path))
received = utils.get_venv_like_prefix(path.joinpath("bin", "python3"))
assert received == expected
def test_no_virtual_env(self):
path = Path("/not/a/venv/bin/python3")
expected = None, False
received = utils.get_venv_like_prefix(str(path))
assert received == expected
def compare_python_paths(path1, path2):
return path1.parent == path2.parent
@pytest.mark.path
def test_find_python_in_path(tmp_path):
assert utils.find_python_in_path(sys.executable) == pathlib.Path(sys.executable).absolute()
posix_path_to_executable = pathlib.Path(sys.executable)
assert compare_python_paths(
utils.find_python_in_path(sys.prefix),
posix_path_to_executable,
)
assert not utils.find_python_in_path(tmp_path)
@pytest.mark.parametrize(
"given,expected",
[
("scheme://netloc/path@rev#fragment", "rev"),
("scheme://netloc/path@rev", "rev"),
("scheme://netloc/path", ""),
("scheme://netloc/path#fragment", ""),
],
)
def test_get_rev_from_url(given, expected):
assert utils.get_rev_from_url(given) == expected
@pytest.mark.parametrize(
"given,expected",
[
(("ProjectName", False), "ProjectName"),
(("ProjectName", True), "projectname"),
(("1Project_Name", False), "1Project-Name"),
(("1Project_Name", True), "1project-name"),
(("Project-Name", False), "Project-Name"),
(("Project-Name", True), "project-name"),
(("Project123Name", False), "Project123Name"),
(("Project123name", True), "project123name"),
(("123$!ProjectName", False), "123-ProjectName"),
(("123$!ProjectName", True), "123-projectname"),
(("123$!Project_Name", False), "123-Project-Name"),
(("123$!Project_Name", True), "123-project-name"),
(("$!123Project_Name4", False), "-123Project-Name4"),
(("$!123Project_Name4", True), "-123project-name4"),
],
)
def test_normalize_name(given, expected):
assert utils.normalize_name(*given) == expected
| TestGetVenvLikePrefix |
python | pydata__xarray | xarray/tests/test_dataarray.py | {
"start": 1934,
"end": 185183
} | class ____:
@pytest.fixture(autouse=True)
def setup(self):
self.attrs = {"attr1": "value1", "attr2": 2929}
self.x = np.random.random((10, 20))
self.v = Variable(["x", "y"], self.x)
self.va = Variable(["x", "y"], self.x, self.attrs)
self.ds = Dataset({"foo": self.v})
self.dv = self.ds["foo"]
self.mindex = pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=("level_1", "level_2")
)
self.mda = DataArray([0, 1, 2, 3], coords={"x": self.mindex}, dims="x").astype(
np.uint64
)
def test_repr(self) -> None:
v = Variable(["time", "x"], [[1, 2, 3], [4, 5, 6]], {"foo": "bar"})
v = v.astype(np.uint64)
coords = {"x": np.arange(3, dtype=np.uint64), "other": np.uint64(0)}
data_array = DataArray(v, coords, name="my_variable")
expected = dedent(
"""\
<xarray.DataArray 'my_variable' (time: 2, x: 3)> Size: 48B
array([[1, 2, 3],
[4, 5, 6]], dtype=uint64)
Coordinates:
* x (x) uint64 24B 0 1 2
other uint64 8B 0
Dimensions without coordinates: time
Attributes:
foo: bar"""
)
assert expected == repr(data_array)
def test_repr_multiindex(self) -> None:
obj_size = np.dtype("O").itemsize
expected = dedent(
f"""\
<xarray.DataArray (x: 4)> Size: 32B
array([0, 1, 2, 3], dtype=uint64)
Coordinates:
* x (x) object {4 * obj_size}B MultiIndex
* level_1 (x) object {4 * obj_size}B 'a' 'a' 'b' 'b'
* level_2 (x) int64 32B 1 2 1 2"""
)
assert expected == repr(self.mda)
def test_repr_multiindex_long(self) -> None:
mindex_long = pd.MultiIndex.from_product(
[["a", "b", "c", "d"], [1, 2, 3, 4, 5, 6, 7, 8]],
names=("level_1", "level_2"),
)
mda_long = DataArray(
list(range(32)), coords={"x": mindex_long}, dims="x"
).astype(np.uint64)
obj_size = np.dtype("O").itemsize
expected = dedent(
f"""\
<xarray.DataArray (x: 32)> Size: 256B
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31],
dtype=uint64)
Coordinates:
* x (x) object {32 * obj_size}B MultiIndex
* level_1 (x) object {32 * obj_size}B 'a' 'a' 'a' 'a' 'a' 'a' ... 'd' 'd' 'd' 'd' 'd' 'd'
* level_2 (x) int64 256B 1 2 3 4 5 6 7 8 1 2 3 4 ... 5 6 7 8 1 2 3 4 5 6 7 8"""
)
assert expected == repr(mda_long)
def test_properties(self) -> None:
assert_equal(self.dv.variable, self.v)
assert_array_equal(self.dv.values, self.v.values)
for attr in ["dims", "dtype", "shape", "size", "nbytes", "ndim", "attrs"]:
assert getattr(self.dv, attr) == getattr(self.v, attr)
assert len(self.dv) == len(self.v)
assert_equal(self.dv.variable, self.v)
assert set(self.dv.coords) == set(self.ds.coords)
for k, v in self.dv.coords.items():
assert_array_equal(v, self.ds.coords[k])
with pytest.raises(AttributeError):
_ = self.dv.dataset
assert isinstance(self.ds["x"].to_index(), pd.Index)
with pytest.raises(ValueError, match=r"must be 1-dimensional"):
self.ds["foo"].to_index()
with pytest.raises(AttributeError):
self.dv.variable = self.v # type: ignore[misc]
def test_data_property(self) -> None:
array = DataArray(np.zeros((3, 4)))
actual = array.copy()
actual.values = np.ones((3, 4))
assert_array_equal(np.ones((3, 4)), actual.values)
actual.data = 2 * np.ones((3, 4))
assert_array_equal(2 * np.ones((3, 4)), actual.data)
assert_array_equal(actual.data, actual.values)
def test_indexes(self) -> None:
array = DataArray(np.zeros((2, 3)), [("x", [0, 1]), ("y", ["a", "b", "c"])])
expected_indexes = {"x": pd.Index([0, 1]), "y": pd.Index(["a", "b", "c"])}
expected_xindexes = {
k: PandasIndex(idx, k) for k, idx in expected_indexes.items()
}
assert array.xindexes.keys() == expected_xindexes.keys()
assert array.indexes.keys() == expected_indexes.keys()
assert all(isinstance(idx, pd.Index) for idx in array.indexes.values())
assert all(isinstance(idx, Index) for idx in array.xindexes.values())
for k in expected_indexes:
assert array.xindexes[k].equals(expected_xindexes[k])
assert array.indexes[k].equals(expected_indexes[k])
def test_get_index(self) -> None:
array = DataArray(np.zeros((2, 3)), coords={"x": ["a", "b"]}, dims=["x", "y"])
assert array.get_index("x").equals(pd.Index(["a", "b"]))
assert array.get_index("y").equals(pd.Index([0, 1, 2]))
with pytest.raises(KeyError):
array.get_index("z")
def test_get_index_size_zero(self) -> None:
array = DataArray(np.zeros((0,)), dims=["x"])
actual = array.get_index("x")
expected = pd.Index([], dtype=np.int64)
assert actual.equals(expected)
assert actual.dtype == expected.dtype
def test_struct_array_dims(self) -> None:
"""
This test checks subtraction of two DataArrays for the case
when dimension is a structured array.
"""
# GH837, GH861
# checking array subtraction when dims are the same
p_data = np.array(
[("Abe", 180), ("Stacy", 150), ("Dick", 200)],
dtype=[("name", "|S256"), ("height", object)],
)
weights_0 = DataArray(
[80, 56, 120], dims=["participant"], coords={"participant": p_data}
)
weights_1 = DataArray(
[81, 52, 115], dims=["participant"], coords={"participant": p_data}
)
actual = weights_1 - weights_0
expected = DataArray(
[1, -4, -5], dims=["participant"], coords={"participant": p_data}
)
assert_identical(actual, expected)
# checking array subtraction when dims are not the same
p_data_alt = np.array(
[("Abe", 180), ("Stacy", 151), ("Dick", 200)],
dtype=[("name", "|S256"), ("height", object)],
)
weights_1 = DataArray(
[81, 52, 115], dims=["participant"], coords={"participant": p_data_alt}
)
actual = weights_1 - weights_0
expected = DataArray(
[1, -5], dims=["participant"], coords={"participant": p_data[[0, 2]]}
)
assert_identical(actual, expected)
# checking array subtraction when dims are not the same and one
# is np.nan
p_data_nan = np.array(
[("Abe", 180), ("Stacy", np.nan), ("Dick", 200)],
dtype=[("name", "|S256"), ("height", object)],
)
weights_1 = DataArray(
[81, 52, 115], dims=["participant"], coords={"participant": p_data_nan}
)
actual = weights_1 - weights_0
expected = DataArray(
[1, -5], dims=["participant"], coords={"participant": p_data[[0, 2]]}
)
assert_identical(actual, expected)
def test_name(self) -> None:
arr = self.dv
assert arr.name == "foo"
copied = arr.copy()
arr.name = "bar"
assert arr.name == "bar"
assert_equal(copied, arr)
actual = DataArray(IndexVariable("x", [3]))
actual.name = "y"
expected = DataArray([3], [("x", [3])], name="y")
assert_identical(actual, expected)
def test_dims(self) -> None:
arr = self.dv
assert arr.dims == ("x", "y")
with pytest.raises(AttributeError, match=r"you cannot assign"):
arr.dims = ("w", "z")
def test_sizes(self) -> None:
array = DataArray(np.zeros((3, 4)), dims=["x", "y"])
assert array.sizes == {"x": 3, "y": 4}
assert tuple(array.sizes) == array.dims
with pytest.raises(TypeError):
array.sizes["foo"] = 5 # type: ignore[index]
def test_encoding(self) -> None:
expected = {"foo": "bar"}
self.dv.encoding["foo"] = "bar"
assert expected == self.dv.encoding
expected2 = {"baz": 0}
self.dv.encoding = expected2
assert expected2 is not self.dv.encoding
def test_drop_encoding(self) -> None:
array = self.mda
encoding = {"scale_factor": 10}
array.encoding = encoding
array["x"].encoding = encoding
assert array.encoding == encoding
assert array["x"].encoding == encoding
actual = array.drop_encoding()
# did not modify in place
assert array.encoding == encoding
assert array["x"].encoding == encoding
# variable and coord encoding is empty
assert actual.encoding == {}
assert actual["x"].encoding == {}
def test_constructor(self) -> None:
data = np.random.random((2, 3))
# w/o coords, w/o dims
actual = DataArray(data)
expected = Dataset({None: (["dim_0", "dim_1"], data)})[None]
assert_identical(expected, actual)
actual = DataArray(data, [["a", "b"], [-1, -2, -3]])
expected = Dataset(
{
None: (["dim_0", "dim_1"], data),
"dim_0": ("dim_0", ["a", "b"]),
"dim_1": ("dim_1", [-1, -2, -3]),
}
)[None]
assert_identical(expected, actual)
# pd.Index coords, w/o dims
actual = DataArray(
data, [pd.Index(["a", "b"], name="x"), pd.Index([-1, -2, -3], name="y")]
)
expected = Dataset(
{None: (["x", "y"], data), "x": ("x", ["a", "b"]), "y": ("y", [-1, -2, -3])}
)[None]
assert_identical(expected, actual)
# list coords, w dims
coords1: list[Any] = [["a", "b"], [-1, -2, -3]]
actual = DataArray(data, coords1, ["x", "y"])
assert_identical(expected, actual)
# pd.Index coords, w dims
coords2: list[pd.Index] = [
pd.Index(["a", "b"], name="A"),
pd.Index([-1, -2, -3], name="B"),
]
actual = DataArray(data, coords2, ["x", "y"])
assert_identical(expected, actual)
# dict coords, w dims
coords3 = {"x": ["a", "b"], "y": [-1, -2, -3]}
actual = DataArray(data, coords3, ["x", "y"])
assert_identical(expected, actual)
# dict coords, w/o dims
actual = DataArray(data, coords3)
assert_identical(expected, actual)
# tuple[dim, list] coords, w/o dims
coords4 = [("x", ["a", "b"]), ("y", [-1, -2, -3])]
actual = DataArray(data, coords4)
assert_identical(expected, actual)
# partial dict coords, w dims
expected = Dataset({None: (["x", "y"], data), "x": ("x", ["a", "b"])})[None]
actual = DataArray(data, {"x": ["a", "b"]}, ["x", "y"])
assert_identical(expected, actual)
# w/o coords, w dims
actual = DataArray(data, dims=["x", "y"])
expected = Dataset({None: (["x", "y"], data)})[None]
assert_identical(expected, actual)
# w/o coords, w dims, w name
actual = DataArray(data, dims=["x", "y"], name="foo")
expected = Dataset({"foo": (["x", "y"], data)})["foo"]
assert_identical(expected, actual)
# w/o coords, w/o dims, w name
actual = DataArray(data, name="foo")
expected = Dataset({"foo": (["dim_0", "dim_1"], data)})["foo"]
assert_identical(expected, actual)
# w/o coords, w dims, w attrs
actual = DataArray(data, dims=["x", "y"], attrs={"bar": 2})
expected = Dataset({None: (["x", "y"], data, {"bar": 2})})[None]
assert_identical(expected, actual)
# w/o coords, w dims (ds has attrs)
actual = DataArray(data, dims=["x", "y"])
expected = Dataset({None: (["x", "y"], data, {}, {"bar": 2})})[None]
assert_identical(expected, actual)
# data is list, w coords
actual = DataArray([1, 2, 3], coords={"x": [0, 1, 2]})
expected = DataArray([1, 2, 3], coords=[("x", [0, 1, 2])])
assert_identical(expected, actual)
def test_constructor_invalid(self) -> None:
data = np.random.randn(3, 2)
with pytest.raises(ValueError, match=r"coords is not dict-like"):
DataArray(data, [[0, 1, 2]], ["x", "y"])
with pytest.raises(ValueError, match=r"not a subset of the .* dim"):
DataArray(data, {"x": [0, 1, 2]}, ["a", "b"])
with pytest.raises(ValueError, match=r"not a subset of the .* dim"):
DataArray(data, {"x": [0, 1, 2]})
with pytest.raises(TypeError, match=r"is not hashable"):
DataArray(data, dims=["x", []]) # type: ignore[list-item]
with pytest.raises(
CoordinateValidationError, match=r"conflicting sizes for dim"
):
DataArray([1, 2, 3], coords=[("x", [0, 1])])
with pytest.raises(
CoordinateValidationError, match=r"conflicting sizes for dim"
):
DataArray([1, 2], coords={"x": [0, 1], "y": ("x", [1])}, dims="x")
with pytest.raises(ValueError, match=r"conflicting MultiIndex"):
DataArray(np.random.rand(4, 4), [("x", self.mindex), ("y", self.mindex)])
with pytest.raises(ValueError, match=r"conflicting MultiIndex"):
DataArray(np.random.rand(4, 4), [("x", self.mindex), ("level_1", range(4))])
def test_constructor_from_self_described(self) -> None:
data: list[list[float]] = [[-0.1, 21], [0, 2]]
expected = DataArray(
data,
coords={"x": ["a", "b"], "y": [-1, -2]},
dims=["x", "y"],
name="foobar",
attrs={"bar": 2},
)
actual = DataArray(expected)
assert_identical(expected, actual)
actual = DataArray(expected.values, actual.coords)
assert_equal(expected, actual)
frame = pd.DataFrame(
data,
index=pd.Index(["a", "b"], name="x"),
columns=pd.Index([-1, -2], name="y"),
)
actual = DataArray(frame)
assert_equal(expected, actual)
series = pd.Series(data[0], index=pd.Index([-1, -2], name="y"))
actual = DataArray(series)
assert_equal(expected[0].reset_coords("x", drop=True), actual)
expected = DataArray(
data,
coords={"x": ["a", "b"], "y": [-1, -2], "a": 0, "z": ("x", [-0.5, 0.5])},
dims=["x", "y"],
)
actual = DataArray(expected)
assert_identical(expected, actual)
actual = DataArray(expected.values, expected.coords)
assert_identical(expected, actual)
expected = Dataset({"foo": ("foo", ["a", "b"])})["foo"]
actual = DataArray(pd.Index(["a", "b"], name="foo"))
assert_identical(expected, actual)
actual = DataArray(IndexVariable("foo", ["a", "b"]))
assert_identical(expected, actual)
@requires_dask
def test_constructor_from_self_described_chunked(self) -> None:
expected = DataArray(
[[-0.1, 21], [0, 2]],
coords={"x": ["a", "b"], "y": [-1, -2]},
dims=["x", "y"],
name="foobar",
attrs={"bar": 2},
).chunk()
actual = DataArray(expected)
assert_identical(expected, actual)
assert_chunks_equal(expected, actual)
def test_constructor_from_0d(self) -> None:
expected = Dataset({None: ([], 0)})[None]
actual = DataArray(0)
assert_identical(expected, actual)
@requires_dask
def test_constructor_dask_coords(self) -> None:
# regression test for GH1684
import dask.array as da
coord = da.arange(8, chunks=(4,))
data = da.random.random((8, 8), chunks=(4, 4)) + 1
actual = DataArray(data, coords={"x": coord, "y": coord}, dims=["x", "y"])
ecoord = np.arange(8)
expected = DataArray(data, coords={"x": ecoord, "y": ecoord}, dims=["x", "y"])
assert_equal(actual, expected)
def test_constructor_no_default_index(self) -> None:
# explicitly passing a Coordinates object skips the creation of default index
da = DataArray(range(3), coords=Coordinates({"x": [1, 2, 3]}, indexes={}))
assert "x" in da.coords
assert "x" not in da.xindexes
def test_constructor_multiindex(self) -> None:
midx = pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("one", "two"))
coords = Coordinates.from_pandas_multiindex(midx, "x")
da = DataArray(range(4), coords=coords, dims="x")
assert_identical(da.coords, coords)
def test_constructor_custom_index(self) -> None:
class CustomIndex(Index): ...
coords = Coordinates(
coords={"x": ("x", [1, 2, 3])}, indexes={"x": CustomIndex()}
)
da = DataArray(range(3), coords=coords)
assert isinstance(da.xindexes["x"], CustomIndex)
# test coordinate variables copied
assert da.coords["x"] is not coords.variables["x"]
def test_constructor_extra_dim_index_coord(self) -> None:
class AnyIndex(Index):
def should_add_coord_to_array(self, name, var, dims):
return True
idx = AnyIndex()
coords = Coordinates(
coords={
"x": ("x", [1, 2]),
"x_bounds": (("x", "x_bnds"), [(0.5, 1.5), (1.5, 2.5)]),
},
indexes={"x": idx, "x_bounds": idx},
)
actual = DataArray([1.0, 2.0], coords=coords, dims="x")
assert_identical(actual.coords, coords, check_default_indexes=False)
assert "x_bnds" not in actual.dims
def test_equals_and_identical(self) -> None:
orig = DataArray(np.arange(5.0), {"a": 42}, dims="x")
expected = orig
actual = orig.copy()
assert expected.equals(actual)
assert expected.identical(actual)
actual = expected.rename("baz")
assert expected.equals(actual)
assert not expected.identical(actual)
actual = expected.rename({"x": "xxx"})
assert not expected.equals(actual)
assert not expected.identical(actual)
actual = expected.copy()
actual.attrs["foo"] = "bar"
assert expected.equals(actual)
assert not expected.identical(actual)
actual = expected.copy()
actual["x"] = ("x", -np.arange(5))
assert not expected.equals(actual)
assert not expected.identical(actual)
actual = expected.reset_coords(drop=True)
assert not expected.equals(actual)
assert not expected.identical(actual)
actual = orig.copy()
actual[0] = np.nan
expected = actual.copy()
assert expected.equals(actual)
assert expected.identical(actual)
actual[:] = np.nan
assert not expected.equals(actual)
assert not expected.identical(actual)
actual = expected.copy()
actual["a"] = 100000
assert not expected.equals(actual)
assert not expected.identical(actual)
def test_equals_failures(self) -> None:
orig = DataArray(np.arange(5.0), {"a": 42}, dims="x")
assert not orig.equals(np.arange(5)) # type: ignore[arg-type]
assert not orig.identical(123) # type: ignore[arg-type]
assert not orig.broadcast_equals({1: 2}) # type: ignore[arg-type]
def test_broadcast_equals(self) -> None:
a = DataArray([0, 0], {"y": 0}, dims="x")
b = DataArray([0, 0], {"y": ("x", [0, 0])}, dims="x")
assert a.broadcast_equals(b)
assert b.broadcast_equals(a)
assert not a.equals(b)
assert not a.identical(b)
c = DataArray([0], coords={"x": 0}, dims="y")
assert not a.broadcast_equals(c)
assert not c.broadcast_equals(a)
def test_getitem(self) -> None:
# strings pull out dataarrays
assert_identical(self.dv, self.ds["foo"])
x = self.dv["x"]
y = self.dv["y"]
assert_identical(self.ds["x"], x)
assert_identical(self.ds["y"], y)
arr = ReturnItem()
for i in [
arr[:],
arr[...],
arr[x.values],
arr[x.variable],
arr[x],
arr[x, y],
arr[x.values > -1],
arr[x.variable > -1],
arr[x > -1],
arr[x > -1, y > -1],
]:
assert_equal(self.dv, self.dv[i])
for i in [
arr[0],
arr[:, 0],
arr[:3, :2],
arr[x.values[:3]],
arr[x.variable[:3]],
arr[x[:3]],
arr[x[:3], y[:4]],
arr[x.values > 3],
arr[x.variable > 3],
arr[x > 3],
arr[x > 3, y > 3],
]:
assert_array_equal(self.v[i], self.dv[i])
def test_getitem_dict(self) -> None:
actual = self.dv[{"x": slice(3), "y": 0}]
expected = self.dv.isel(x=slice(3), y=0)
assert_identical(expected, actual)
def test_getitem_coords(self) -> None:
orig = DataArray(
[[10], [20]],
{
"x": [1, 2],
"y": [3],
"z": 4,
"x2": ("x", ["a", "b"]),
"y2": ("y", ["c"]),
"xy": (["y", "x"], [["d", "e"]]),
},
dims=["x", "y"],
)
assert_identical(orig, orig[:])
assert_identical(orig, orig[:, :])
assert_identical(orig, orig[...])
assert_identical(orig, orig[:2, :1])
assert_identical(orig, orig[[0, 1], [0]])
actual = orig[0, 0]
expected = DataArray(
10, {"x": 1, "y": 3, "z": 4, "x2": "a", "y2": "c", "xy": "d"}
)
assert_identical(expected, actual)
actual = orig[0, :]
expected = DataArray(
[10],
{
"x": 1,
"y": [3],
"z": 4,
"x2": "a",
"y2": ("y", ["c"]),
"xy": ("y", ["d"]),
},
dims="y",
)
assert_identical(expected, actual)
actual = orig[:, 0]
expected = DataArray(
[10, 20],
{
"x": [1, 2],
"y": 3,
"z": 4,
"x2": ("x", ["a", "b"]),
"y2": "c",
"xy": ("x", ["d", "e"]),
},
dims="x",
)
assert_identical(expected, actual)
def test_getitem_dataarray(self) -> None:
# It should not conflict
da = DataArray(np.arange(12).reshape((3, 4)), dims=["x", "y"])
ind = DataArray([[0, 1], [0, 1]], dims=["x", "z"])
actual = da[ind]
assert_array_equal(actual, da.values[[[0, 1], [0, 1]], :])
da = DataArray(
np.arange(12).reshape((3, 4)),
dims=["x", "y"],
coords={"x": [0, 1, 2], "y": ["a", "b", "c", "d"]},
)
ind = xr.DataArray([[0, 1], [0, 1]], dims=["X", "Y"])
actual = da[ind]
expected = da.values[[[0, 1], [0, 1]], :]
assert_array_equal(actual, expected)
assert actual.dims == ("X", "Y", "y")
# boolean indexing
ind = xr.DataArray([True, True, False], dims=["x"])
assert_equal(da[ind], da[[0, 1], :])
assert_equal(da[ind], da[[0, 1]])
assert_equal(da[ind], da[ind.values])
def test_getitem_empty_index(self) -> None:
da = DataArray(np.arange(12).reshape((3, 4)), dims=["x", "y"])
assert_identical(da[{"x": []}], DataArray(np.zeros((0, 4)), dims=["x", "y"]))
assert_identical(
da.loc[{"y": []}], DataArray(np.zeros((3, 0)), dims=["x", "y"])
)
assert_identical(da[[]], DataArray(np.zeros((0, 4)), dims=["x", "y"]))
def test_getitem_typeerror(self) -> None:
with pytest.raises(TypeError, match=r"unexpected indexer type"):
self.dv[True]
with pytest.raises(TypeError, match=r"unexpected indexer type"):
self.dv[np.array(True)]
with pytest.raises(TypeError, match=r"invalid indexer array"):
self.dv[3.0]
with pytest.raises(TypeError, match=r"invalid indexer array"):
self.dv[None]
def test_setitem(self) -> None:
# basic indexing should work as numpy's indexing
tuples: list[tuple[int | list[int] | slice, int | list[int] | slice]] = [
(0, 0),
(0, slice(None, None)),
(slice(None, None), slice(None, None)),
(slice(None, None), 0),
([1, 0], slice(None, None)),
(slice(None, None), [1, 0]),
]
for t in tuples:
expected = np.arange(6).reshape(3, 2)
orig = DataArray(
np.arange(6).reshape(3, 2),
{
"x": [1, 2, 3],
"y": ["a", "b"],
"z": 4,
"x2": ("x", ["a", "b", "c"]),
"y2": ("y", ["d", "e"]),
},
dims=["x", "y"],
)
orig[t] = 1
expected[t] = 1
assert_array_equal(orig.values, expected)
def test_setitem_fancy(self) -> None:
# vectorized indexing
da = DataArray(np.ones((3, 2)), dims=["x", "y"])
ind = Variable(["a"], [0, 1])
da[dict(x=ind, y=ind)] = 0
expected = DataArray([[0, 1], [1, 0], [1, 1]], dims=["x", "y"])
assert_identical(expected, da)
# assign another 0d-variable
da[dict(x=ind, y=ind)] = Variable((), 0)
expected = DataArray([[0, 1], [1, 0], [1, 1]], dims=["x", "y"])
assert_identical(expected, da)
# assign another 1d-variable
da[dict(x=ind, y=ind)] = Variable(["a"], [2, 3])
expected = DataArray([[2, 1], [1, 3], [1, 1]], dims=["x", "y"])
assert_identical(expected, da)
# 2d-vectorized indexing
da = DataArray(np.ones((3, 2)), dims=["x", "y"])
ind_x = DataArray([[0, 1]], dims=["a", "b"])
ind_y = DataArray([[1, 0]], dims=["a", "b"])
da[dict(x=ind_x, y=ind_y)] = 0
expected = DataArray([[1, 0], [0, 1], [1, 1]], dims=["x", "y"])
assert_identical(expected, da)
da = DataArray(np.ones((3, 2)), dims=["x", "y"])
ind = Variable(["a"], [0, 1])
da[ind] = 0
expected = DataArray([[0, 0], [0, 0], [1, 1]], dims=["x", "y"])
assert_identical(expected, da)
def test_setitem_dataarray(self) -> None:
def get_data():
return DataArray(
np.ones((4, 3, 2)),
dims=["x", "y", "z"],
coords={
"x": np.arange(4),
"y": ["a", "b", "c"],
"non-dim": ("x", [1, 3, 4, 2]),
},
)
da = get_data()
# indexer with inconsistent coordinates.
ind = DataArray(np.arange(1, 4), dims=["x"], coords={"x": np.random.randn(3)})
with pytest.raises(IndexError, match=r"dimension coordinate 'x'"):
da[dict(x=ind)] = 0
# indexer with consistent coordinates.
ind = DataArray(np.arange(1, 4), dims=["x"], coords={"x": np.arange(1, 4)})
da[dict(x=ind)] = 0 # should not raise
assert np.allclose(da[dict(x=ind)].values, 0)
assert_identical(da["x"], get_data()["x"])
assert_identical(da["non-dim"], get_data()["non-dim"])
da = get_data()
# conflict in the assigning values
value = xr.DataArray(
np.zeros((3, 3, 2)),
dims=["x", "y", "z"],
coords={"x": [0, 1, 2], "non-dim": ("x", [0, 2, 4])},
)
with pytest.raises(IndexError, match=r"dimension coordinate 'x'"):
da[dict(x=ind)] = value
# consistent coordinate in the assigning values
value = xr.DataArray(
np.zeros((3, 3, 2)),
dims=["x", "y", "z"],
coords={"x": [1, 2, 3], "non-dim": ("x", [0, 2, 4])},
)
da[dict(x=ind)] = value
assert np.allclose(da[dict(x=ind)].values, 0)
assert_identical(da["x"], get_data()["x"])
assert_identical(da["non-dim"], get_data()["non-dim"])
# Conflict in the non-dimension coordinate
value = xr.DataArray(
np.zeros((3, 3, 2)),
dims=["x", "y", "z"],
coords={"x": [1, 2, 3], "non-dim": ("x", [0, 2, 4])},
)
da[dict(x=ind)] = value # should not raise
# conflict in the assigning values
value = xr.DataArray(
np.zeros((3, 3, 2)),
dims=["x", "y", "z"],
coords={"x": [0, 1, 2], "non-dim": ("x", [0, 2, 4])},
)
with pytest.raises(IndexError, match=r"dimension coordinate 'x'"):
da[dict(x=ind)] = value
# consistent coordinate in the assigning values
value = xr.DataArray(
np.zeros((3, 3, 2)),
dims=["x", "y", "z"],
coords={"x": [1, 2, 3], "non-dim": ("x", [0, 2, 4])},
)
da[dict(x=ind)] = value # should not raise
def test_setitem_vectorized(self) -> None:
# Regression test for GH:7030
# Positional indexing
v = xr.DataArray(np.r_[:120].reshape(2, 3, 4, 5), dims=["a", "b", "c", "d"])
b = xr.DataArray([[0, 0], [1, 0]], dims=["u", "v"])
c = xr.DataArray([[0, 1], [2, 3]], dims=["u", "v"])
w = xr.DataArray([-1, -2], dims=["u"])
index = dict(b=b, c=c)
v[index] = w
assert (v[index] == w).all()
# Indexing with coordinates
v = xr.DataArray(np.r_[:120].reshape(2, 3, 4, 5), dims=["a", "b", "c", "d"])
v.coords["b"] = [2, 4, 6]
b = xr.DataArray([[2, 2], [4, 2]], dims=["u", "v"])
c = xr.DataArray([[0, 1], [2, 3]], dims=["u", "v"])
w = xr.DataArray([-1, -2], dims=["u"])
index = dict(b=b, c=c)
v.loc[index] = w
assert (v.loc[index] == w).all()
def test_contains(self) -> None:
data_array = DataArray([1, 2])
assert 1 in data_array
assert 3 not in data_array
def test_pickle(self) -> None:
data = DataArray(np.random.random((3, 3)), dims=("id", "time"))
roundtripped = pickle.loads(pickle.dumps(data))
assert_identical(data, roundtripped)
@requires_dask
def test_chunk(self) -> None:
unblocked = DataArray(np.ones((3, 4)))
assert unblocked.chunks is None
blocked = unblocked.chunk()
assert blocked.chunks == ((3,), (4,))
first_dask_name = blocked.data.name
with pytest.warns(DeprecationWarning):
blocked = unblocked.chunk(chunks=((2, 1), (2, 2))) # type: ignore[arg-type]
assert blocked.chunks == ((2, 1), (2, 2))
assert blocked.data.name != first_dask_name
blocked = unblocked.chunk(chunks=(3, 3))
assert blocked.chunks == ((3,), (3, 1))
assert blocked.data.name != first_dask_name
with pytest.raises(ValueError):
blocked.chunk(chunks=(3, 3, 3))
# name doesn't change when rechunking by same amount
# this fails if ReprObject doesn't have __dask_tokenize__ defined
assert unblocked.chunk(2).data.name == unblocked.chunk(2).data.name
assert blocked.load().chunks is None
# Check that kwargs are passed
import dask.array as da
blocked = unblocked.chunk(name_prefix="testname_")
assert isinstance(blocked.data, da.Array)
assert "testname_" in blocked.data.name
# test kwargs form of chunks
blocked = unblocked.chunk(dim_0=3, dim_1=3)
assert blocked.chunks == ((3,), (3, 1))
assert blocked.data.name != first_dask_name
def test_isel(self) -> None:
assert_identical(self.dv[0], self.dv.isel(x=0))
assert_identical(self.dv, self.dv.isel(x=slice(None)))
assert_identical(self.dv[:3], self.dv.isel(x=slice(3)))
assert_identical(self.dv[:3, :5], self.dv.isel(x=slice(3), y=slice(5)))
with pytest.raises(
ValueError,
match=r"Dimensions {'not_a_dim'} do not exist. Expected "
r"one or more of \('x', 'y'\)",
):
self.dv.isel(not_a_dim=0)
with pytest.warns(
UserWarning,
match=r"Dimensions {'not_a_dim'} do not exist. "
r"Expected one or more of \('x', 'y'\)",
):
self.dv.isel(not_a_dim=0, missing_dims="warn")
assert_identical(self.dv, self.dv.isel(not_a_dim=0, missing_dims="ignore"))
def test_isel_types(self) -> None:
# regression test for #1405
da = DataArray([1, 2, 3], dims="x")
# uint64
assert_identical(
da.isel(x=np.array([0], dtype="uint64")), da.isel(x=np.array([0]))
)
# uint32
assert_identical(
da.isel(x=np.array([0], dtype="uint32")), da.isel(x=np.array([0]))
)
# int64
assert_identical(
da.isel(x=np.array([0], dtype="int64")), da.isel(x=np.array([0]))
)
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_isel_fancy(self) -> None:
shape = (10, 7, 6)
np_array = np.random.random(shape)
da = DataArray(
np_array, dims=["time", "y", "x"], coords={"time": np.arange(0, 100, 10)}
)
y = [1, 3]
x = [3, 0]
expected = da.values[:, y, x]
actual = da.isel(y=(("test_coord",), y), x=(("test_coord",), x))
assert actual.coords["test_coord"].shape == (len(y),)
assert list(actual.coords) == ["time"]
assert actual.dims == ("time", "test_coord")
np.testing.assert_equal(actual, expected)
# a few corner cases
da.isel(
time=(("points",), [1, 2]), x=(("points",), [2, 2]), y=(("points",), [3, 4])
)
np.testing.assert_allclose(
da.isel(
time=(("p",), [1]), x=(("p",), [2]), y=(("p",), [4])
).values.squeeze(),
np_array[1, 4, 2].squeeze(),
)
da.isel(time=(("points",), [1, 2]))
y = [-1, 0]
x = [-2, 2]
expected2 = da.values[:, y, x]
actual2 = da.isel(x=(("points",), x), y=(("points",), y)).values
np.testing.assert_equal(actual2, expected2)
# test that the order of the indexers doesn't matter
assert_identical(
da.isel(y=(("points",), y), x=(("points",), x)),
da.isel(x=(("points",), x), y=(("points",), y)),
)
# make sure we're raising errors in the right places
with pytest.raises(IndexError, match=r"Dimensions of indexers mismatch"):
da.isel(y=(("points",), [1, 2]), x=(("points",), [1, 2, 3]))
# tests using index or DataArray as indexers
stations = Dataset()
stations["station"] = (("station",), ["A", "B", "C"])
stations["dim1s"] = (("station",), [1, 2, 3])
stations["dim2s"] = (("station",), [4, 5, 1])
actual3 = da.isel(x=stations["dim1s"], y=stations["dim2s"])
assert "station" in actual3.coords
assert "station" in actual3.dims
assert_identical(actual3["station"], stations["station"])
with pytest.raises(ValueError, match=r"conflicting values/indexes on "):
da.isel(
x=DataArray([0, 1, 2], dims="station", coords={"station": [0, 1, 2]}),
y=DataArray([0, 1, 2], dims="station", coords={"station": [0, 1, 3]}),
)
# multi-dimensional selection
stations = Dataset()
stations["a"] = (("a",), ["A", "B", "C"])
stations["b"] = (("b",), [0, 1])
stations["dim1s"] = (("a", "b"), [[1, 2], [2, 3], [3, 4]])
stations["dim2s"] = (("a",), [4, 5, 1])
actual4 = da.isel(x=stations["dim1s"], y=stations["dim2s"])
assert "a" in actual4.coords
assert "a" in actual4.dims
assert "b" in actual4.coords
assert "b" in actual4.dims
assert_identical(actual4["a"], stations["a"])
assert_identical(actual4["b"], stations["b"])
expected4 = da.variable[
:, stations["dim2s"].variable, stations["dim1s"].variable
]
assert_array_equal(actual4, expected4)
def test_sel(self) -> None:
self.ds["x"] = ("x", np.array(list("abcdefghij")))
da = self.ds["foo"]
assert_identical(da, da.sel(x=slice(None)))
assert_identical(da[1], da.sel(x="b"))
assert_identical(da[:3], da.sel(x=slice("c")))
assert_identical(da[:3], da.sel(x=["a", "b", "c"]))
assert_identical(da[:, :4], da.sel(y=(self.ds["y"] < 4)))
# verify that indexing with a dataarray works
b = DataArray("b")
assert_identical(da[1], da.sel(x=b))
assert_identical(da[[1]], da.sel(x=slice(b, b)))
def test_sel_dataarray(self) -> None:
# indexing with DataArray
self.ds["x"] = ("x", np.array(list("abcdefghij")))
da = self.ds["foo"]
ind = DataArray(["a", "b", "c"], dims=["x"])
actual = da.sel(x=ind)
assert_identical(actual, da.isel(x=[0, 1, 2]))
# along new dimension
ind = DataArray(["a", "b", "c"], dims=["new_dim"])
actual = da.sel(x=ind)
assert_array_equal(actual, da.isel(x=[0, 1, 2]))
assert "new_dim" in actual.dims
# with coordinate
ind = DataArray(
["a", "b", "c"], dims=["new_dim"], coords={"new_dim": [0, 1, 2]}
)
actual = da.sel(x=ind)
assert_array_equal(actual, da.isel(x=[0, 1, 2]))
assert "new_dim" in actual.dims
assert "new_dim" in actual.coords
assert_equal(actual["new_dim"].drop_vars("x"), ind["new_dim"])
def test_sel_invalid_slice(self) -> None:
array = DataArray(np.arange(10), [("x", np.arange(10))])
with pytest.raises(ValueError, match=r"cannot use non-scalar arrays"):
array.sel(x=slice(array.x))
def test_sel_dataarray_datetime_slice(self) -> None:
# regression test for GH1240
times = pd.date_range("2000-01-01", freq="D", periods=365)
array = DataArray(np.arange(365), [("time", times)])
result = array.sel(time=slice(array.time[0], array.time[-1]))
assert_equal(result, array)
array = DataArray(np.arange(365), [("delta", times - times[0])])
result = array.sel(delta=slice(array.delta[0], array.delta[-1]))
assert_equal(result, array)
@pytest.mark.parametrize(
["coord_values", "indices"],
(
pytest.param(
np.array([0.0, 0.111, 0.222, 0.333], dtype="float64"),
slice(1, 3),
id="float64",
),
pytest.param(
np.array([0.0, 0.111, 0.222, 0.333], dtype="float32"),
slice(1, 3),
id="float32",
),
pytest.param(
np.array([0.0, 0.111, 0.222, 0.333], dtype="float32"), [2], id="scalar"
),
),
)
def test_sel_float(self, coord_values, indices) -> None:
data_values = np.arange(4)
arr = DataArray(data_values, coords={"x": coord_values}, dims="x")
actual = arr.sel(x=coord_values[indices])
expected = DataArray(
data_values[indices], coords={"x": coord_values[indices]}, dims="x"
)
assert_equal(actual, expected)
def test_sel_float16(self) -> None:
data_values = np.arange(4)
coord_values = np.array([0.0, 0.111, 0.222, 0.333], dtype="float16")
indices = slice(1, 3)
message = "`pandas.Index` does not support the `float16` dtype.*"
with pytest.warns(DeprecationWarning, match=message):
arr = DataArray(data_values, coords={"x": coord_values}, dims="x")
with pytest.warns(DeprecationWarning, match=message):
expected = DataArray(
data_values[indices], coords={"x": coord_values[indices]}, dims="x"
)
actual = arr.sel(x=coord_values[indices])
assert_equal(actual, expected)
def test_sel_float_multiindex(self) -> None:
# regression test https://github.com/pydata/xarray/issues/5691
# test multi-index created from coordinates, one with dtype=float32
lvl1 = ["a", "a", "b", "b"]
lvl2 = np.array([0.1, 0.2, 0.3, 0.4], dtype=np.float32)
da = xr.DataArray(
[1, 2, 3, 4], dims="x", coords={"lvl1": ("x", lvl1), "lvl2": ("x", lvl2)}
)
da = da.set_index(x=["lvl1", "lvl2"])
actual = da.sel(lvl1="a", lvl2=0.1)
expected = da.isel(x=0)
assert_equal(actual, expected)
def test_sel_no_index(self) -> None:
array = DataArray(np.arange(10), dims="x")
assert_identical(array[0], array.sel(x=0))
assert_identical(array[:5], array.sel(x=slice(5)))
assert_identical(array[[0, -1]], array.sel(x=[0, -1]))
assert_identical(array[array < 5], array.sel(x=(array < 5)))
def test_sel_method(self) -> None:
data = DataArray(np.random.randn(3, 4), [("x", [0, 1, 2]), ("y", list("abcd"))])
with pytest.raises(KeyError, match="Try setting the `method`"):
data.sel(y="ab")
expected = data.sel(y=["a", "b"])
actual = data.sel(y=["ab", "ba"], method="pad")
assert_identical(expected, actual)
expected = data.sel(x=[1, 2])
actual = data.sel(x=[0.9, 1.9], method="backfill", tolerance=1)
assert_identical(expected, actual)
def test_sel_drop(self) -> None:
data = DataArray([1, 2, 3], [("x", [0, 1, 2])])
expected = DataArray(1)
selected = data.sel(x=0, drop=True)
assert_identical(expected, selected)
expected = DataArray(1, {"x": 0})
selected = data.sel(x=0, drop=False)
assert_identical(expected, selected)
data = DataArray([1, 2, 3], dims=["x"])
expected = DataArray(1)
selected = data.sel(x=0, drop=True)
assert_identical(expected, selected)
def test_isel_drop(self) -> None:
data = DataArray([1, 2, 3], [("x", [0, 1, 2])])
expected = DataArray(1)
selected = data.isel(x=0, drop=True)
assert_identical(expected, selected)
expected = DataArray(1, {"x": 0})
selected = data.isel(x=0, drop=False)
assert_identical(expected, selected)
def test_head(self) -> None:
assert_equal(self.dv.isel(x=slice(5)), self.dv.head(x=5))
assert_equal(self.dv.isel(x=slice(0)), self.dv.head(x=0))
assert_equal(
self.dv.isel({dim: slice(6) for dim in self.dv.dims}), self.dv.head(6)
)
assert_equal(
self.dv.isel({dim: slice(5) for dim in self.dv.dims}), self.dv.head()
)
with pytest.raises(TypeError, match=r"either dict-like or a single int"):
self.dv.head([3]) # type: ignore[arg-type]
with pytest.raises(TypeError, match=r"expected integer type"):
self.dv.head(x=3.1)
with pytest.raises(ValueError, match=r"expected positive int"):
self.dv.head(-3)
def test_tail(self) -> None:
assert_equal(self.dv.isel(x=slice(-5, None)), self.dv.tail(x=5))
assert_equal(self.dv.isel(x=slice(0)), self.dv.tail(x=0))
assert_equal(
self.dv.isel({dim: slice(-6, None) for dim in self.dv.dims}),
self.dv.tail(6),
)
assert_equal(
self.dv.isel({dim: slice(-5, None) for dim in self.dv.dims}), self.dv.tail()
)
with pytest.raises(TypeError, match=r"either dict-like or a single int"):
self.dv.tail([3]) # type: ignore[arg-type]
with pytest.raises(TypeError, match=r"expected integer type"):
self.dv.tail(x=3.1)
with pytest.raises(ValueError, match=r"expected positive int"):
self.dv.tail(-3)
def test_thin(self) -> None:
assert_equal(self.dv.isel(x=slice(None, None, 5)), self.dv.thin(x=5))
assert_equal(
self.dv.isel({dim: slice(None, None, 6) for dim in self.dv.dims}),
self.dv.thin(6),
)
with pytest.raises(TypeError, match=r"either dict-like or a single int"):
self.dv.thin([3]) # type: ignore[arg-type]
with pytest.raises(TypeError, match=r"expected integer type"):
self.dv.thin(x=3.1)
with pytest.raises(ValueError, match=r"expected positive int"):
self.dv.thin(-3)
with pytest.raises(ValueError, match=r"cannot be zero"):
self.dv.thin(time=0)
def test_loc(self) -> None:
self.ds["x"] = ("x", np.array(list("abcdefghij")))
da = self.ds["foo"]
# typing issue: see https://github.com/python/mypy/issues/2410
assert_identical(da[:3], da.loc[:"c"]) # type: ignore[misc]
assert_identical(da[1], da.loc["b"])
assert_identical(da[1], da.loc[{"x": "b"}])
assert_identical(da[1], da.loc["b", ...])
assert_identical(da[:3], da.loc[["a", "b", "c"]])
assert_identical(da[:3, :4], da.loc[["a", "b", "c"], np.arange(4)])
assert_identical(da[:, :4], da.loc[:, self.ds["y"] < 4])
def test_loc_datetime64_value(self) -> None:
# regression test for https://github.com/pydata/xarray/issues/4283
t = np.array(["2017-09-05T12", "2017-09-05T15"], dtype="datetime64[ns]")
array = DataArray(np.ones(t.shape), dims=("time",), coords=(t,))
assert_identical(array.loc[{"time": t[0]}], array[0])
def test_loc_assign(self) -> None:
self.ds["x"] = ("x", np.array(list("abcdefghij")))
da = self.ds["foo"]
# assignment
# typing issue: see https://github.com/python/mypy/issues/2410
da.loc["a":"j"] = 0 # type: ignore[misc]
assert np.all(da.values == 0)
da.loc[{"x": slice("a", "j")}] = 2
assert np.all(da.values == 2)
da.loc[{"x": slice("a", "j")}] = 2
assert np.all(da.values == 2)
# Multi dimensional case
da = DataArray(np.arange(12).reshape(3, 4), dims=["x", "y"])
da.loc[0, 0] = 0
assert da.values[0, 0] == 0
assert da.values[0, 1] != 0
da = DataArray(np.arange(12).reshape(3, 4), dims=["x", "y"])
da.loc[0] = 0
assert np.all(da.values[0] == np.zeros(4))
assert da.values[1, 0] != 0
def test_loc_assign_dataarray(self) -> None:
def get_data():
return DataArray(
np.ones((4, 3, 2)),
dims=["x", "y", "z"],
coords={
"x": np.arange(4),
"y": ["a", "b", "c"],
"non-dim": ("x", [1, 3, 4, 2]),
},
)
da = get_data()
# indexer with inconsistent coordinates.
ind = DataArray(np.arange(1, 4), dims=["y"], coords={"y": np.random.randn(3)})
with pytest.raises(IndexError, match=r"dimension coordinate 'y'"):
da.loc[dict(x=ind)] = 0
# indexer with consistent coordinates.
ind = DataArray(np.arange(1, 4), dims=["x"], coords={"x": np.arange(1, 4)})
da.loc[dict(x=ind)] = 0 # should not raise
assert np.allclose(da[dict(x=ind)].values, 0)
assert_identical(da["x"], get_data()["x"])
assert_identical(da["non-dim"], get_data()["non-dim"])
da = get_data()
# conflict in the assigning values
value = xr.DataArray(
np.zeros((3, 3, 2)),
dims=["x", "y", "z"],
coords={"x": [0, 1, 2], "non-dim": ("x", [0, 2, 4])},
)
with pytest.raises(IndexError, match=r"dimension coordinate 'x'"):
da.loc[dict(x=ind)] = value
# consistent coordinate in the assigning values
value = xr.DataArray(
np.zeros((3, 3, 2)),
dims=["x", "y", "z"],
coords={"x": [1, 2, 3], "non-dim": ("x", [0, 2, 4])},
)
da.loc[dict(x=ind)] = value
assert np.allclose(da[dict(x=ind)].values, 0)
assert_identical(da["x"], get_data()["x"])
assert_identical(da["non-dim"], get_data()["non-dim"])
def test_loc_single_boolean(self) -> None:
data = DataArray([0, 1], coords=[[True, False]])
assert data.loc[True] == 0
assert data.loc[False] == 1
def test_loc_dim_name_collision_with_sel_params(self) -> None:
da = xr.DataArray(
[[0, 0], [1, 1]],
dims=["dim1", "method"],
coords={"dim1": ["x", "y"], "method": ["a", "b"]},
)
np.testing.assert_array_equal(
da.loc[dict(dim1=["x", "y"], method=["a"])], [[0], [1]]
)
def test_selection_multiindex(self) -> None:
mindex = pd.MultiIndex.from_product(
[["a", "b"], [1, 2], [-1, -2]], names=("one", "two", "three")
)
mdata = DataArray(range(8), [("x", mindex)])
def test_sel(
lab_indexer, pos_indexer, replaced_idx=False, renamed_dim=None
) -> None:
da = mdata.sel(x=lab_indexer)
expected_da = mdata.isel(x=pos_indexer)
if not replaced_idx:
assert_identical(da, expected_da)
else:
if renamed_dim:
assert da.dims[0] == renamed_dim
da = da.rename({renamed_dim: "x"})
assert_identical(da.variable, expected_da.variable)
assert not da["x"].equals(expected_da["x"])
test_sel(("a", 1, -1), 0)
test_sel(("b", 2, -2), -1)
test_sel(("a", 1), [0, 1], replaced_idx=True, renamed_dim="three")
test_sel(("a",), range(4), replaced_idx=True)
test_sel("a", range(4), replaced_idx=True)
test_sel([("a", 1, -1), ("b", 2, -2)], [0, 7])
test_sel(slice("a", "b"), range(8))
test_sel(slice(("a", 1), ("b", 1)), range(6))
test_sel({"one": "a", "two": 1, "three": -1}, 0)
test_sel({"one": "a", "two": 1}, [0, 1], replaced_idx=True, renamed_dim="three")
test_sel({"one": "a"}, range(4), replaced_idx=True)
assert_identical(mdata.loc["a"], mdata.sel(x="a"))
assert_identical(mdata.loc[("a", 1), ...], mdata.sel(x=("a", 1)))
assert_identical(mdata.loc[{"one": "a"}, ...], mdata.sel(x={"one": "a"}))
with pytest.raises(IndexError):
mdata.loc[("a", 1)]
assert_identical(mdata.sel(x={"one": "a", "two": 1}), mdata.sel(one="a", two=1))
def test_selection_multiindex_remove_unused(self) -> None:
# GH2619. For MultiIndex, we need to call remove_unused.
ds = xr.DataArray(
np.arange(40).reshape(8, 5),
dims=["x", "y"],
coords={"x": np.arange(8), "y": np.arange(5)},
)
ds = ds.stack(xy=["x", "y"])
ds_isel = ds.isel(xy=ds["x"] < 4)
with pytest.raises(KeyError):
ds_isel.sel(x=5)
actual = ds_isel.unstack()
expected = ds.reset_index("xy").isel(xy=ds["x"] < 4)
expected = expected.set_index(xy=["x", "y"]).unstack()
assert_identical(expected, actual)
def test_selection_multiindex_from_level(self) -> None:
# GH: 3512
da = DataArray([0, 1], dims=["x"], coords={"x": [0, 1], "y": "a"})
db = DataArray([2, 3], dims=["x"], coords={"x": [0, 1], "y": "b"})
data = xr.concat(
[da, db], dim="x", coords="different", compat="equals"
).set_index(xy=["x", "y"])
assert data.dims == ("xy",)
actual = data.sel(y="a")
expected = data.isel(xy=[0, 1]).unstack("xy").squeeze("y")
assert_equal(actual, expected)
def test_concat_with_default_coords_warns(self) -> None:
da = DataArray([0, 1], dims=["x"], coords={"x": [0, 1], "y": "a"})
db = DataArray([2, 3], dims=["x"], coords={"x": [0, 1], "y": "b"})
with pytest.warns(FutureWarning):
original = xr.concat([da, db], dim="x")
assert original.y.size == 4
with set_options(use_new_combine_kwarg_defaults=True):
# default compat="override" will pick the first one
new = xr.concat([da, db], dim="x")
assert new.y.size == 1
def test_virtual_default_coords(self) -> None:
array = DataArray(np.zeros((5,)), dims="x")
expected = DataArray(range(5), dims="x", name="x")
assert_identical(expected, array["x"])
assert_identical(expected, array.coords["x"])
def test_virtual_time_components(self) -> None:
dates = pd.date_range("2000-01-01", periods=10)
da = DataArray(np.arange(1, 11), [("time", dates)])
assert_array_equal(da["time.dayofyear"], da.values)
assert_array_equal(da.coords["time.dayofyear"], da.values)
def test_coords(self) -> None:
# use int64 to ensure repr() consistency on windows
coords = [
IndexVariable("x", np.array([-1, -2], "int64")),
IndexVariable("y", np.array([0, 1, 2], "int64")),
]
da = DataArray(np.random.randn(2, 3), coords, name="foo")
# len
assert len(da.coords) == 2
# iter
assert list(da.coords) == ["x", "y"]
assert coords[0].identical(da.coords["x"])
assert coords[1].identical(da.coords["y"])
assert "x" in da.coords
assert 0 not in da.coords
assert "foo" not in da.coords
with pytest.raises(KeyError):
da.coords[0]
with pytest.raises(KeyError):
da.coords["foo"]
# repr
expected_repr = dedent(
"""\
Coordinates:
* x (x) int64 16B -1 -2
* y (y) int64 24B 0 1 2"""
)
actual = repr(da.coords)
assert expected_repr == actual
# dtypes
assert da.coords.dtypes == {"x": np.dtype("int64"), "y": np.dtype("int64")}
del da.coords["x"]
da._indexes = filter_indexes_from_coords(da.xindexes, set(da.coords))
expected = DataArray(da.values, {"y": [0, 1, 2]}, dims=["x", "y"], name="foo")
assert_identical(da, expected)
with pytest.raises(
ValueError, match=r"cannot drop or update coordinate.*corrupt.*index "
):
self.mda["level_1"] = ("x", np.arange(4))
self.mda.coords["level_1"] = ("x", np.arange(4))
def test_coords_to_index(self) -> None:
da = DataArray(np.zeros((2, 3)), [("x", [1, 2]), ("y", list("abc"))])
with pytest.raises(ValueError, match=r"no valid index"):
da[0, 0].coords.to_index()
expected = pd.Index(["a", "b", "c"], name="y")
actual = da[0].coords.to_index()
assert expected.equals(actual)
expected = pd.MultiIndex.from_product(
[[1, 2], ["a", "b", "c"]], names=["x", "y"]
)
actual = da.coords.to_index()
assert expected.equals(actual)
expected = pd.MultiIndex.from_product(
[["a", "b", "c"], [1, 2]], names=["y", "x"]
)
actual = da.coords.to_index(["y", "x"])
assert expected.equals(actual)
with pytest.raises(ValueError, match=r"ordered_dims must match"):
da.coords.to_index(["x"])
def test_coord_coords(self) -> None:
orig = DataArray(
[10, 20], {"x": [1, 2], "x2": ("x", ["a", "b"]), "z": 4}, dims="x"
)
actual = orig.coords["x"]
expected = DataArray(
[1, 2], {"z": 4, "x2": ("x", ["a", "b"]), "x": [1, 2]}, dims="x", name="x"
)
assert_identical(expected, actual)
del actual.coords["x2"]
assert_identical(expected.reset_coords("x2", drop=True), actual)
actual.coords["x3"] = ("x", ["a", "b"])
expected = DataArray(
[1, 2], {"z": 4, "x3": ("x", ["a", "b"]), "x": [1, 2]}, dims="x", name="x"
)
assert_identical(expected, actual)
def test_reset_coords(self) -> None:
data = DataArray(
np.zeros((3, 4)),
{"bar": ("x", ["a", "b", "c"]), "baz": ("y", range(4)), "y": range(4)},
dims=["x", "y"],
name="foo",
)
actual1 = data.reset_coords()
expected1 = Dataset(
{
"foo": (["x", "y"], np.zeros((3, 4))),
"bar": ("x", ["a", "b", "c"]),
"baz": ("y", range(4)),
"y": range(4),
}
)
assert_identical(actual1, expected1)
actual2 = data.reset_coords(["bar", "baz"])
assert_identical(actual2, expected1)
actual3 = data.reset_coords("bar")
expected3 = Dataset(
{"foo": (["x", "y"], np.zeros((3, 4))), "bar": ("x", ["a", "b", "c"])},
{"baz": ("y", range(4)), "y": range(4)},
)
assert_identical(actual3, expected3)
actual4 = data.reset_coords(["bar"])
assert_identical(actual4, expected3)
actual5 = data.reset_coords(drop=True)
expected5 = DataArray(
np.zeros((3, 4)), coords={"y": range(4)}, dims=["x", "y"], name="foo"
)
assert_identical(actual5, expected5)
actual6 = data.copy().reset_coords(drop=True)
assert_identical(actual6, expected5)
actual7 = data.reset_coords("bar", drop=True)
expected7 = DataArray(
np.zeros((3, 4)),
{"baz": ("y", range(4)), "y": range(4)},
dims=["x", "y"],
name="foo",
)
assert_identical(actual7, expected7)
with pytest.raises(ValueError, match=r"cannot be found"):
data.reset_coords("foo", drop=True)
with pytest.raises(ValueError, match=r"cannot be found"):
data.reset_coords("not_found")
with pytest.raises(ValueError, match=r"cannot remove index"):
data.reset_coords("y")
# non-dimension index coordinate
midx = pd.MultiIndex.from_product([["a", "b"], [0, 1]], names=("lvl1", "lvl2"))
data = DataArray([1, 2, 3, 4], coords={"x": midx}, dims="x", name="foo")
with pytest.raises(ValueError, match=r"cannot remove index"):
data.reset_coords("lvl1")
def test_assign_coords(self) -> None:
array = DataArray(10)
actual = array.assign_coords(c=42)
expected = DataArray(10, {"c": 42})
assert_identical(actual, expected)
with pytest.raises(
ValueError, match=r"cannot drop or update coordinate.*corrupt.*index "
):
self.mda.assign_coords(level_1=("x", range(4)))
# GH: 2112
da = xr.DataArray([0, 1, 2], dims="x")
with pytest.raises(CoordinateValidationError):
da["x"] = [0, 1, 2, 3] # size conflict
with pytest.raises(CoordinateValidationError):
da.coords["x"] = [0, 1, 2, 3] # size conflict
with pytest.raises(CoordinateValidationError):
da.coords["x"] = ("y", [1, 2, 3]) # no new dimension to a DataArray
def test_assign_coords_existing_multiindex(self) -> None:
data = self.mda
with pytest.warns(
FutureWarning, match=r"updating coordinate.*MultiIndex.*inconsistent"
):
data.assign_coords(x=range(4))
def test_assign_coords_custom_index(self) -> None:
class CustomIndex(Index):
pass
coords = Coordinates(
coords={"x": ("x", [1, 2, 3])}, indexes={"x": CustomIndex()}
)
da = xr.DataArray([0, 1, 2], dims="x")
actual = da.assign_coords(coords)
assert isinstance(actual.xindexes["x"], CustomIndex)
def test_assign_coords_no_default_index(self) -> None:
coords = Coordinates({"y": [1, 2, 3]}, indexes={})
da = DataArray([1, 2, 3], dims="y")
actual = da.assign_coords(coords)
assert_identical(actual.coords, coords, check_default_indexes=False)
assert "y" not in actual.xindexes
def test_assign_coords_extra_dim_index_coord(self) -> None:
class AnyIndex(Index):
def should_add_coord_to_array(self, name, var, dims):
return True
idx = AnyIndex()
coords = Coordinates(
coords={
"x": ("x", [1, 2]),
"x_bounds": (("x", "x_bnds"), [(0.5, 1.5), (1.5, 2.5)]),
},
indexes={"x": idx, "x_bounds": idx},
)
da = DataArray([1.0, 2.0], dims="x")
actual = da.assign_coords(coords)
expected = DataArray([1.0, 2.0], coords=coords, dims="x")
assert_identical(actual, expected, check_default_indexes=False)
assert "x_bnds" not in actual.dims
def test_coords_alignment(self) -> None:
lhs = DataArray([1, 2, 3], [("x", [0, 1, 2])])
rhs = DataArray([2, 3, 4], [("x", [1, 2, 3])])
lhs.coords["rhs"] = rhs
expected = DataArray(
[1, 2, 3], coords={"rhs": ("x", [np.nan, 2, 3]), "x": [0, 1, 2]}, dims="x"
)
assert_identical(lhs, expected)
def test_set_coords_update_index(self) -> None:
actual = DataArray([1, 2, 3], [("x", [1, 2, 3])])
actual.coords["x"] = ["a", "b", "c"]
assert actual.xindexes["x"].to_pandas_index().equals(pd.Index(["a", "b", "c"]))
def test_set_coords_multiindex_level(self) -> None:
with pytest.raises(
ValueError, match=r"cannot drop or update coordinate.*corrupt.*index "
):
self.mda["level_1"] = range(4)
def test_coords_replacement_alignment(self) -> None:
# regression test for GH725
arr = DataArray([0, 1, 2], dims=["abc"])
new_coord = DataArray([1, 2, 3], dims=["abc"], coords=[[1, 2, 3]])
arr["abc"] = new_coord
expected = DataArray([0, 1, 2], coords=[("abc", [1, 2, 3])])
assert_identical(arr, expected)
def test_coords_non_string(self) -> None:
arr = DataArray(0, coords={1: 2})
actual = arr.coords[1]
expected = DataArray(2, coords={1: 2}, name=1)
assert_identical(actual, expected)
def test_coords_delitem_delete_indexes(self) -> None:
# regression test for GH3746
arr = DataArray(np.ones((2,)), dims="x", coords={"x": [0, 1]})
del arr.coords["x"]
assert "x" not in arr.xindexes
def test_coords_delitem_multiindex_level(self) -> None:
with pytest.raises(
ValueError, match=r"cannot remove coordinate.*corrupt.*index "
):
del self.mda.coords["level_1"]
def test_broadcast_like(self) -> None:
arr1 = DataArray(
np.ones((2, 3)),
dims=["x", "y"],
coords={"x": ["a", "b"], "y": ["a", "b", "c"]},
)
arr2 = DataArray(
np.ones((3, 2)),
dims=["x", "y"],
coords={"x": ["a", "b", "c"], "y": ["a", "b"]},
)
orig1, orig2 = broadcast(arr1, arr2)
new1 = arr1.broadcast_like(arr2)
new2 = arr2.broadcast_like(arr1)
assert_identical(orig1, new1)
assert_identical(orig2, new2)
orig3 = DataArray(np.random.randn(5), [("x", range(5))])
orig4 = DataArray(np.random.randn(6), [("y", range(6))])
new3, new4 = broadcast(orig3, orig4)
assert_identical(orig3.broadcast_like(orig4), new3.transpose("y", "x"))
assert_identical(orig4.broadcast_like(orig3), new4)
def test_reindex_like(self) -> None:
foo = DataArray(np.random.randn(5, 6), [("x", range(5)), ("y", range(6))])
bar = foo[:2, :2]
assert_identical(foo.reindex_like(bar), bar)
expected = foo.copy()
expected[:] = np.nan
expected[:2, :2] = bar
assert_identical(bar.reindex_like(foo), expected)
def test_reindex_like_no_index(self) -> None:
foo = DataArray(np.random.randn(5, 6), dims=["x", "y"])
assert_identical(foo, foo.reindex_like(foo))
bar = foo[:4]
with pytest.raises(ValueError, match=r"different size for unlabeled"):
foo.reindex_like(bar)
def test_reindex_regressions(self) -> None:
da = DataArray(np.random.randn(5), coords=[("time", range(5))])
time2 = DataArray(np.arange(5), dims="time2")
with pytest.raises(ValueError):
da.reindex(time=time2)
# regression test for #736, reindex can not change complex nums dtype
xnp = np.array([1, 2, 3], dtype=complex)
x = DataArray(xnp, coords=[[0.1, 0.2, 0.3]])
y = DataArray([2, 5, 6, 7, 8], coords=[[-1.1, 0.21, 0.31, 0.41, 0.51]])
re_dtype = x.reindex_like(y, method="pad").dtype
assert x.dtype == re_dtype
def test_reindex_method(self) -> None:
x = DataArray([10, 20], dims="y", coords={"y": [0, 1]})
y = [-0.1, 0.5, 1.1]
actual = x.reindex(y=y, method="backfill", tolerance=0.2)
expected = DataArray([10, np.nan, np.nan], coords=[("y", y)])
assert_identical(expected, actual)
actual = x.reindex(y=y, method="backfill", tolerance=[0.1, 0.1, 0.01])
expected = DataArray([10, np.nan, np.nan], coords=[("y", y)])
assert_identical(expected, actual)
alt = Dataset({"y": y})
actual = x.reindex_like(alt, method="backfill")
expected = DataArray([10, 20, np.nan], coords=[("y", y)])
assert_identical(expected, actual)
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {None: 2, "u": 1}])
def test_reindex_fill_value(self, fill_value) -> None:
x = DataArray([10, 20], dims="y", coords={"y": [0, 1], "u": ("y", [1, 2])})
y = [0, 1, 2]
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value_var = fill_value_u = np.nan
elif isinstance(fill_value, dict):
fill_value_var = fill_value[None]
fill_value_u = fill_value["u"]
else:
fill_value_var = fill_value_u = fill_value
actual = x.reindex(y=y, fill_value=fill_value)
expected = DataArray(
[10, 20, fill_value_var],
dims="y",
coords={"y": y, "u": ("y", [1, 2, fill_value_u])},
)
assert_identical(expected, actual)
@pytest.mark.parametrize("dtype", [str, bytes])
def test_reindex_str_dtype(self, dtype) -> None:
data = DataArray(
[1, 2], dims="x", coords={"x": np.array(["a", "b"], dtype=dtype)}
)
actual = data.reindex(x=data.x)
expected = data
assert_identical(expected, actual)
assert actual.dtype == expected.dtype
def test_reindex_empty_array_dtype(self) -> None:
# Dtype of reindex result should match dtype of the original DataArray.
# See GH issue #7299
x = xr.DataArray([], dims=("x",), coords={"x": []}).astype("float32")
y = x.reindex(x=[1.0, 2.0])
assert x.dtype == y.dtype, (
"Dtype of reindexed DataArray should match dtype of the original DataArray"
)
assert y.dtype == np.float32, (
"Dtype of reindexed DataArray should remain float32"
)
def test_rename(self) -> None:
da = xr.DataArray(
[1, 2, 3], dims="dim", name="name", coords={"coord": ("dim", [5, 6, 7])}
)
# change name
renamed_name = da.rename("name_new")
assert renamed_name.name == "name_new"
expected_name = da.copy()
expected_name.name = "name_new"
assert_identical(renamed_name, expected_name)
# change name to None?
renamed_noname = da.rename(None)
assert renamed_noname.name is None
expected_noname = da.copy()
expected_noname.name = None
assert_identical(renamed_noname, expected_noname)
renamed_noname = da.rename()
assert renamed_noname.name is None
assert_identical(renamed_noname, expected_noname)
# change dim
renamed_dim = da.rename({"dim": "dim_new"})
assert renamed_dim.dims == ("dim_new",)
expected_dim = xr.DataArray(
[1, 2, 3],
dims="dim_new",
name="name",
coords={"coord": ("dim_new", [5, 6, 7])},
)
assert_identical(renamed_dim, expected_dim)
# change dim with kwargs
renamed_dimkw = da.rename(dim="dim_new")
assert renamed_dimkw.dims == ("dim_new",)
assert_identical(renamed_dimkw, expected_dim)
# change coords
renamed_coord = da.rename({"coord": "coord_new"})
assert "coord_new" in renamed_coord.coords
expected_coord = xr.DataArray(
[1, 2, 3], dims="dim", name="name", coords={"coord_new": ("dim", [5, 6, 7])}
)
assert_identical(renamed_coord, expected_coord)
# change coords with kwargs
renamed_coordkw = da.rename(coord="coord_new")
assert "coord_new" in renamed_coordkw.coords
assert_identical(renamed_coordkw, expected_coord)
# change coord and dim
renamed_both = da.rename({"dim": "dim_new", "coord": "coord_new"})
assert renamed_both.dims == ("dim_new",)
assert "coord_new" in renamed_both.coords
expected_both = xr.DataArray(
[1, 2, 3],
dims="dim_new",
name="name",
coords={"coord_new": ("dim_new", [5, 6, 7])},
)
assert_identical(renamed_both, expected_both)
# change coord and dim with kwargs
renamed_bothkw = da.rename(dim="dim_new", coord="coord_new")
assert renamed_bothkw.dims == ("dim_new",)
assert "coord_new" in renamed_bothkw.coords
assert_identical(renamed_bothkw, expected_both)
# change all
renamed_all = da.rename("name_new", dim="dim_new", coord="coord_new")
assert renamed_all.name == "name_new"
assert renamed_all.dims == ("dim_new",)
assert "coord_new" in renamed_all.coords
expected_all = xr.DataArray(
[1, 2, 3],
dims="dim_new",
name="name_new",
coords={"coord_new": ("dim_new", [5, 6, 7])},
)
assert_identical(renamed_all, expected_all)
def test_rename_dimension_coord_warnings(self) -> None:
# create a dimension coordinate by renaming a dimension or coordinate
# should raise a warning (no index created)
da = DataArray([0, 0], coords={"x": ("y", [0, 1])}, dims="y")
with pytest.warns(
UserWarning, match=r"rename 'x' to 'y' does not create an index.*"
):
da.rename(x="y")
da = xr.DataArray([0, 0], coords={"y": ("x", [0, 1])}, dims="x")
with pytest.warns(
UserWarning, match=r"rename 'x' to 'y' does not create an index.*"
):
da.rename(x="y")
# No operation should not raise a warning
da = xr.DataArray(
data=np.ones((2, 3)),
dims=["x", "y"],
coords={"x": range(2), "y": range(3), "a": ("x", [3, 4])},
)
with warnings.catch_warnings():
warnings.simplefilter("error")
da.rename(x="x")
def test_replace(self) -> None:
# Tests the `attrs` replacement and whether it interferes with a
# `variable` replacement
da = self.mda
attrs1 = {"a1": "val1", "a2": 161}
x = np.ones((10, 20))
v = Variable(["x", "y"], x)
assert da._replace(variable=v, attrs=attrs1).attrs == attrs1
attrs2 = {"b1": "val2", "b2": 1312}
va = Variable(["x", "y"], x, attrs2)
# assuming passed `attrs` should prevail
assert da._replace(variable=va, attrs=attrs1).attrs == attrs1
# assuming `va.attrs` should be adopted
assert da._replace(variable=va).attrs == attrs2
def test_init_value(self) -> None:
expected = DataArray(
np.full((3, 4), 3), dims=["x", "y"], coords=[range(3), range(4)]
)
actual = DataArray(3, dims=["x", "y"], coords=[range(3), range(4)])
assert_identical(expected, actual)
expected = DataArray(
np.full((1, 10, 2), 0),
dims=["w", "x", "y"],
coords={"x": np.arange(10), "y": ["north", "south"]},
)
actual = DataArray(0, dims=expected.dims, coords=expected.coords)
assert_identical(expected, actual)
expected = DataArray(
np.full((10, 2), np.nan), coords=[("x", np.arange(10)), ("y", ["a", "b"])]
)
actual = DataArray(coords=[("x", np.arange(10)), ("y", ["a", "b"])])
assert_identical(expected, actual)
with pytest.raises(ValueError, match=r"different number of dim"):
DataArray(np.array(1), coords={"x": np.arange(10)}, dims=["x"])
with pytest.raises(ValueError, match=r"does not match the 0 dim"):
DataArray(np.array(1), coords=[("x", np.arange(10))])
def test_swap_dims(self) -> None:
array = DataArray(np.random.randn(3), {"x": list("abc")}, "x")
expected = DataArray(array.values, {"x": ("y", list("abc"))}, dims="y")
actual = array.swap_dims({"x": "y"})
assert_identical(expected, actual)
for dim_name in set().union(expected.xindexes.keys(), actual.xindexes.keys()):
assert actual.xindexes[dim_name].equals(expected.xindexes[dim_name])
# as kwargs
array = DataArray(np.random.randn(3), {"x": list("abc")}, "x")
expected = DataArray(array.values, {"x": ("y", list("abc"))}, dims="y")
actual = array.swap_dims(x="y")
assert_identical(expected, actual)
for dim_name in set().union(expected.xindexes.keys(), actual.xindexes.keys()):
assert actual.xindexes[dim_name].equals(expected.xindexes[dim_name])
# multiindex case
idx = pd.MultiIndex.from_arrays([list("aab"), list("yzz")], names=["y1", "y2"])
array = DataArray(np.random.randn(3), {"y": ("x", idx)}, "x")
expected = DataArray(array.values, {"y": idx}, "y")
actual = array.swap_dims({"x": "y"})
assert_identical(expected, actual)
for dim_name in set().union(expected.xindexes.keys(), actual.xindexes.keys()):
assert actual.xindexes[dim_name].equals(expected.xindexes[dim_name])
def test_expand_dims_error(self) -> None:
array = DataArray(
np.random.randn(3, 4),
dims=["x", "dim_0"],
coords={"x": np.linspace(0.0, 1.0, 3)},
attrs={"key": "entry"},
)
with pytest.raises(TypeError, match=r"dim should be Hashable or"):
array.expand_dims(0)
with pytest.raises(ValueError, match=r"lengths of dim and axis"):
# dims and axis argument should be the same length
array.expand_dims(dim=["a", "b"], axis=[1, 2, 3])
with pytest.raises(ValueError, match=r"Dimension x already"):
# Should not pass the already existing dimension.
array.expand_dims(dim=["x"])
# raise if duplicate
with pytest.raises(ValueError, match=r"duplicate values"):
array.expand_dims(dim=["y", "y"])
with pytest.raises(ValueError, match=r"duplicate values"):
array.expand_dims(dim=["y", "z"], axis=[1, 1])
with pytest.raises(ValueError, match=r"duplicate values"):
array.expand_dims(dim=["y", "z"], axis=[2, -2])
# out of bounds error, axis must be in [-4, 3]
with pytest.raises(IndexError):
array.expand_dims(dim=["y", "z"], axis=[2, 4])
with pytest.raises(IndexError):
array.expand_dims(dim=["y", "z"], axis=[2, -5])
# Does not raise an IndexError
array.expand_dims(dim=["y", "z"], axis=[2, -4])
array.expand_dims(dim=["y", "z"], axis=[2, 3])
array = DataArray(
np.random.randn(3, 4),
dims=["x", "dim_0"],
coords={"x": np.linspace(0.0, 1.0, 3)},
attrs={"key": "entry"},
)
with pytest.raises(TypeError):
array.expand_dims({"new_dim": 3.2})
# Attempt to use both dim and kwargs
with pytest.raises(ValueError):
array.expand_dims({"d": 4}, e=4)
def test_expand_dims(self) -> None:
array = DataArray(
np.random.randn(3, 4),
dims=["x", "dim_0"],
coords={"x": np.linspace(0.0, 1.0, 3)},
attrs={"key": "entry"},
)
# pass only dim label
actual = array.expand_dims(dim="y")
expected = DataArray(
np.expand_dims(array.values, 0),
dims=["y", "x", "dim_0"],
coords={"x": np.linspace(0.0, 1.0, 3)},
attrs={"key": "entry"},
)
assert_identical(expected, actual)
roundtripped = actual.squeeze("y", drop=True)
assert_identical(array, roundtripped)
# pass multiple dims
actual = array.expand_dims(dim=["y", "z"])
expected = DataArray(
np.expand_dims(np.expand_dims(array.values, 0), 0),
dims=["y", "z", "x", "dim_0"],
coords={"x": np.linspace(0.0, 1.0, 3)},
attrs={"key": "entry"},
)
assert_identical(expected, actual)
roundtripped = actual.squeeze(["y", "z"], drop=True)
assert_identical(array, roundtripped)
# pass multiple dims and axis. Axis is out of order
actual = array.expand_dims(dim=["z", "y"], axis=[2, 1])
expected = DataArray(
np.expand_dims(np.expand_dims(array.values, 1), 2),
dims=["x", "y", "z", "dim_0"],
coords={"x": np.linspace(0.0, 1.0, 3)},
attrs={"key": "entry"},
)
assert_identical(expected, actual)
# make sure the attrs are tracked
assert actual.attrs["key"] == "entry"
roundtripped = actual.squeeze(["z", "y"], drop=True)
assert_identical(array, roundtripped)
# Negative axis and they are out of order
actual = array.expand_dims(dim=["y", "z"], axis=[-1, -2])
expected = DataArray(
np.expand_dims(np.expand_dims(array.values, -1), -1),
dims=["x", "dim_0", "z", "y"],
coords={"x": np.linspace(0.0, 1.0, 3)},
attrs={"key": "entry"},
)
assert_identical(expected, actual)
assert actual.attrs["key"] == "entry"
roundtripped = actual.squeeze(["y", "z"], drop=True)
assert_identical(array, roundtripped)
def test_expand_dims_with_scalar_coordinate(self) -> None:
array = DataArray(
np.random.randn(3, 4),
dims=["x", "dim_0"],
coords={"x": np.linspace(0.0, 1.0, 3), "z": 1.0},
attrs={"key": "entry"},
)
actual = array.expand_dims(dim="z")
expected = DataArray(
np.expand_dims(array.values, 0),
dims=["z", "x", "dim_0"],
coords={"x": np.linspace(0.0, 1.0, 3), "z": np.ones(1)},
attrs={"key": "entry"},
)
assert_identical(expected, actual)
roundtripped = actual.squeeze(["z"], drop=False)
assert_identical(array, roundtripped)
def test_expand_dims_with_greater_dim_size(self) -> None:
array = DataArray(
np.random.randn(3, 4),
dims=["x", "dim_0"],
coords={"x": np.linspace(0.0, 1.0, 3), "z": 1.0},
attrs={"key": "entry"},
)
actual = array.expand_dims({"y": 2, "z": 1, "dim_1": ["a", "b", "c"]})
expected_coords = {
"y": [0, 1],
"z": [1.0],
"dim_1": ["a", "b", "c"],
"x": np.linspace(0, 1, 3),
"dim_0": range(4),
}
expected = DataArray(
array.values * np.ones([2, 1, 3, 3, 4]),
coords=expected_coords,
dims=list(expected_coords.keys()),
attrs={"key": "entry"},
).drop_vars(["y", "dim_0"])
assert_identical(expected, actual)
# Test with kwargs instead of passing dict to dim arg.
other_way = array.expand_dims(dim_1=["a", "b", "c"])
other_way_expected = DataArray(
array.values * np.ones([3, 3, 4]),
coords={
"dim_1": ["a", "b", "c"],
"x": np.linspace(0, 1, 3),
"dim_0": range(4),
"z": 1.0,
},
dims=["dim_1", "x", "dim_0"],
attrs={"key": "entry"},
).drop_vars("dim_0")
assert_identical(other_way_expected, other_way)
def test_set_index(self) -> None:
indexes = [self.mindex.get_level_values(n) for n in self.mindex.names] # type: ignore[arg-type,unused-ignore] # pandas-stubs varies
coords = {idx.name: ("x", idx) for idx in indexes}
array = DataArray(self.mda.values, coords=coords, dims="x")
expected = self.mda.copy()
level_3 = ("x", [1, 2, 3, 4])
array["level_3"] = level_3
expected["level_3"] = level_3
obj = array.set_index(x=self.mindex.names)
assert_identical(obj, expected)
obj = obj.set_index(x="level_3", append=True)
expected = array.set_index(x=["level_1", "level_2", "level_3"])
assert_identical(obj, expected)
array = array.set_index(x=["level_1", "level_2", "level_3"])
assert_identical(array, expected)
array2d = DataArray(
np.random.rand(2, 2),
coords={"x": ("x", [0, 1]), "level": ("y", [1, 2])},
dims=("x", "y"),
)
with pytest.raises(ValueError, match=r"dimension mismatch"):
array2d.set_index(x="level")
# Issue 3176: Ensure clear error message on key error.
with pytest.raises(ValueError, match=r".*variable\(s\) do not exist"):
obj.set_index(x="level_4")
def test_reset_index(self) -> None:
indexes = [self.mindex.get_level_values(n) for n in self.mindex.names] # type: ignore[arg-type,unused-ignore] # pandas-stubs varies
coords = {idx.name: ("x", idx) for idx in indexes}
expected = DataArray(self.mda.values, coords=coords, dims="x")
obj = self.mda.reset_index("x")
assert_identical(obj, expected, check_default_indexes=False)
assert len(obj.xindexes) == 0
obj = self.mda.reset_index(self.mindex.names)
assert_identical(obj, expected, check_default_indexes=False)
assert len(obj.xindexes) == 0
obj = self.mda.reset_index(["x", "level_1"])
assert_identical(obj, expected, check_default_indexes=False)
assert len(obj.xindexes) == 0
coords = {
"x": ("x", self.mindex.droplevel("level_1")),
"level_1": ("x", self.mindex.get_level_values("level_1")),
}
expected = DataArray(self.mda.values, coords=coords, dims="x")
obj = self.mda.reset_index(["level_1"])
assert_identical(obj, expected, check_default_indexes=False)
assert list(obj.xindexes) == ["x"]
assert type(obj.xindexes["x"]) is PandasIndex
expected = DataArray(self.mda.values, dims="x")
obj = self.mda.reset_index("x", drop=True)
assert_identical(obj, expected, check_default_indexes=False)
array = self.mda.copy()
array = array.reset_index(["x"], drop=True)
assert_identical(array, expected, check_default_indexes=False)
# single index
array = DataArray([1, 2], coords={"x": ["a", "b"]}, dims="x")
obj = array.reset_index("x")
print(obj.x.variable)
print(array.x.variable)
assert_equal(obj.x.variable, array.x.variable.to_base_variable())
assert len(obj.xindexes) == 0
def test_reset_index_keep_attrs(self) -> None:
coord_1 = DataArray([1, 2], dims=["coord_1"], attrs={"attrs": True})
da = DataArray([1, 0], [coord_1])
obj = da.reset_index("coord_1")
assert obj.coord_1.attrs == da.coord_1.attrs
assert len(obj.xindexes) == 0
def test_reorder_levels(self) -> None:
midx = self.mindex.reorder_levels(["level_2", "level_1"])
expected = DataArray(self.mda.values, coords={"x": midx}, dims="x")
obj = self.mda.reorder_levels(x=["level_2", "level_1"])
assert_identical(obj, expected)
array = DataArray([1, 2], dims="x")
with pytest.raises(KeyError):
array.reorder_levels(x=["level_1", "level_2"])
array["x"] = [0, 1]
with pytest.raises(ValueError, match=r"has no MultiIndex"):
array.reorder_levels(x=["level_1", "level_2"])
def test_set_xindex(self) -> None:
da = DataArray(
[1, 2, 3, 4], coords={"foo": ("x", ["a", "a", "b", "b"])}, dims="x"
)
class IndexWithOptions(Index):
def __init__(self, opt):
self.opt = opt
@classmethod
def from_variables(cls, variables, options):
return cls(options["opt"])
indexed = da.set_xindex("foo", IndexWithOptions, opt=1)
assert "foo" in indexed.xindexes
assert indexed.xindexes["foo"].opt == 1 # type: ignore[attr-defined]
def test_dataset_getitem(self) -> None:
dv = self.ds["foo"]
assert_identical(dv, self.dv)
def test_array_interface(self) -> None:
assert_array_equal(np.asarray(self.dv), self.x)
# test patched in methods
assert_array_equal(self.dv.astype(float), self.v.astype(float))
assert_array_equal(self.dv.argsort(), self.v.argsort())
assert_array_equal(self.dv.clip(2, 3), self.v.clip(2, 3))
# test ufuncs
expected = deepcopy(self.ds)
expected["foo"][:] = np.sin(self.x)
assert_equal(expected["foo"], np.sin(self.dv))
assert_array_equal(self.dv, np.maximum(self.v, self.dv))
bar = Variable(["x", "y"], np.zeros((10, 20)))
assert_equal(self.dv, np.maximum(self.dv, bar))
def test_astype_attrs(self) -> None:
# Split into two loops for mypy - Variable, DataArray, and Dataset
# don't share a common base class, so mypy infers type object for v,
# which doesn't have the attrs or astype methods
for v in [self.mda.copy(), self.ds.copy()]:
v.attrs["foo"] = "bar"
assert v.attrs == v.astype(float).attrs
assert not v.astype(float, keep_attrs=False).attrs
# Test Variable separately to avoid mypy inferring object type
va = self.va.copy()
va.attrs["foo"] = "bar"
assert va.attrs == va.astype(float).attrs
assert not va.astype(float, keep_attrs=False).attrs
def test_astype_dtype(self) -> None:
original = DataArray([-1, 1, 2, 3, 1000])
converted = original.astype(float)
assert_array_equal(original, converted)
assert np.issubdtype(original.dtype, np.integer)
assert np.issubdtype(converted.dtype, np.floating)
def test_astype_order(self) -> None:
original = DataArray([[1, 2], [3, 4]])
converted = original.astype("d", order="F")
assert_equal(original, converted)
assert original.values.flags["C_CONTIGUOUS"]
assert converted.values.flags["F_CONTIGUOUS"]
def test_astype_subok(self) -> None:
class NdArraySubclass(np.ndarray):
pass
original = DataArray(NdArraySubclass(np.arange(3)))
converted_not_subok = original.astype("d", subok=False)
converted_subok = original.astype("d", subok=True)
if not isinstance(original.data, NdArraySubclass):
pytest.xfail("DataArray cannot be backed yet by a subclasses of np.ndarray")
assert isinstance(converted_not_subok.data, np.ndarray)
assert not isinstance(converted_not_subok.data, NdArraySubclass)
assert isinstance(converted_subok.data, NdArraySubclass)
def test_is_null(self) -> None:
x = np.random.default_rng(42).random((5, 6))
x[x < 0] = np.nan
original = DataArray(x, [-np.arange(5), np.arange(6)], ["x", "y"])
expected = DataArray(pd.isnull(x), [-np.arange(5), np.arange(6)], ["x", "y"])
assert_identical(expected, original.isnull())
assert_identical(~expected, original.notnull())
def test_math(self) -> None:
x = self.x
v = self.v
a = self.dv
# variable math was already tested extensively, so let's just make sure
# that all types are properly converted here
assert_equal(a, +a)
assert_equal(a, a + 0)
assert_equal(a, 0 + a)
assert_equal(a, a + 0 * v)
assert_equal(a, 0 * v + a)
assert_equal(a, a + 0 * x)
assert_equal(a, 0 * x + a)
assert_equal(a, a + 0 * a)
assert_equal(a, 0 * a + a)
def test_math_automatic_alignment(self) -> None:
a = DataArray(range(5), [("x", range(5))])
b = DataArray(range(5), [("x", range(1, 6))])
expected = DataArray(np.ones(4), [("x", [1, 2, 3, 4])])
assert_identical(a - b, expected)
def test_non_overlapping_dataarrays_return_empty_result(self) -> None:
a = DataArray(range(5), [("x", range(5))])
result = a.isel(x=slice(2)) + a.isel(x=slice(2, None))
assert len(result["x"]) == 0
def test_empty_dataarrays_return_empty_result(self) -> None:
a = DataArray(data=[])
result = a * a
assert len(result["dim_0"]) == 0
def test_inplace_math_basics(self) -> None:
x = self.x
a = self.dv
v = a.variable
b = a
b += 1
assert b is a
assert b.variable is v
assert_array_equal(b.values, x)
assert source_ndarray(b.values) is x
def test_inplace_math_error(self) -> None:
data = np.random.rand(4)
times = np.arange(4)
foo = DataArray(data, coords=[times], dims=["time"])
b = times.copy()
with pytest.raises(
TypeError, match=r"Values of an IndexVariable are immutable"
):
foo.coords["time"] += 1
# Check error throwing prevented inplace operation
assert_array_equal(foo.coords["time"], b)
def test_inplace_math_automatic_alignment(self) -> None:
a = DataArray(range(5), [("x", range(5))])
b = DataArray(range(1, 6), [("x", range(1, 6))])
with pytest.raises(xr.MergeError, match="Automatic alignment is not supported"):
a += b
with pytest.raises(xr.MergeError, match="Automatic alignment is not supported"):
b += a
def test_math_name(self) -> None:
# Verify that name is preserved only when it can be done unambiguously.
# The rule (copied from pandas.Series) is keep the current name only if
# the other object has the same name or no name attribute and this
# object isn't a coordinate; otherwise reset to None.
a = self.dv
assert (+a).name == "foo"
assert (a + 0).name == "foo"
assert (a + a.rename(None)).name is None
assert (a + a.rename("bar")).name is None
assert (a + a).name == "foo"
assert (+a["x"]).name == "x"
assert (a["x"] + 0).name == "x"
assert (a + a["x"]).name is None
def test_math_with_coords(self) -> None:
coords = {
"x": [-1, -2],
"y": ["ab", "cd", "ef"],
"lat": (["x", "y"], [[1, 2, 3], [-1, -2, -3]]),
"c": -999,
}
orig = DataArray(np.random.randn(2, 3), coords, dims=["x", "y"])
actual = orig + 1
expected = DataArray(orig.values + 1, orig.coords)
assert_identical(expected, actual)
actual = 1 + orig
assert_identical(expected, actual)
actual = orig + orig[0, 0]
exp_coords = {k: v for k, v in coords.items() if k != "lat"}
expected = DataArray(
orig.values + orig.values[0, 0], exp_coords, dims=["x", "y"]
)
assert_identical(expected, actual)
actual = orig[0, 0] + orig
assert_identical(expected, actual)
actual = orig[0, 0] + orig[-1, -1]
expected = DataArray(orig.values[0, 0] + orig.values[-1, -1], {"c": -999})
assert_identical(expected, actual)
actual = orig[:, 0] + orig[0, :]
exp_values = orig[:, 0].values[:, None] + orig[0, :].values[None, :]
expected = DataArray(exp_values, exp_coords, dims=["x", "y"])
assert_identical(expected, actual)
actual = orig[0, :] + orig[:, 0]
assert_identical(expected.transpose(transpose_coords=True), actual)
actual = orig - orig.transpose(transpose_coords=True)
expected = DataArray(np.zeros((2, 3)), orig.coords)
assert_identical(expected, actual)
actual = orig.transpose(transpose_coords=True) - orig
assert_identical(expected.transpose(transpose_coords=True), actual)
alt = DataArray([1, 1], {"x": [-1, -2], "c": "foo", "d": 555}, "x")
actual = orig + alt
expected = orig + 1
expected.coords["d"] = 555
del expected.coords["c"]
assert_identical(expected, actual)
actual = alt + orig
assert_identical(expected, actual)
def test_index_math(self) -> None:
orig = DataArray(range(3), dims="x", name="x")
actual = orig + 1
expected = DataArray(1 + np.arange(3), dims="x", name="x")
assert_identical(expected, actual)
# regression tests for #254
actual = orig[0] < orig
expected = DataArray([False, True, True], dims="x", name="x")
assert_identical(expected, actual)
actual = orig > orig[0]
assert_identical(expected, actual)
def test_dataset_math(self) -> None:
# more comprehensive tests with multiple dataset variables
obs = Dataset(
{"tmin": ("x", np.arange(5)), "tmax": ("x", 10 + np.arange(5))},
{"x": ("x", 0.5 * np.arange(5)), "loc": ("x", range(-2, 3))},
)
actual1 = 2 * obs["tmax"]
expected1 = DataArray(2 * (10 + np.arange(5)), obs.coords, name="tmax")
assert_identical(actual1, expected1)
actual2 = obs["tmax"] - obs["tmin"]
expected2 = DataArray(10 * np.ones(5), obs.coords)
assert_identical(actual2, expected2)
sim = Dataset(
{
"tmin": ("x", 1 + np.arange(5)),
"tmax": ("x", 11 + np.arange(5)),
# does *not* include 'loc' as a coordinate
"x": ("x", 0.5 * np.arange(5)),
}
)
actual3 = sim["tmin"] - obs["tmin"]
expected3 = DataArray(np.ones(5), obs.coords, name="tmin")
assert_identical(actual3, expected3)
actual4 = -obs["tmin"] + sim["tmin"]
assert_identical(actual4, expected3)
actual5 = sim["tmin"].copy()
actual5 -= obs["tmin"]
assert_identical(actual5, expected3)
actual6 = sim.copy()
actual6["tmin"] = sim["tmin"] - obs["tmin"]
expected6 = Dataset(
{"tmin": ("x", np.ones(5)), "tmax": ("x", sim["tmax"].values)}, obs.coords
)
assert_identical(actual6, expected6)
actual7 = sim.copy()
actual7["tmin"] -= obs["tmin"]
assert_identical(actual7, expected6)
def test_stack_unstack(self) -> None:
orig = DataArray(
[[0, 1], [2, 3]],
dims=["x", "y"],
attrs={"foo": 2},
)
assert_identical(orig, orig.unstack())
# test GH3000
a = orig[:0, :1].stack(new_dim=("x", "y")).indexes["new_dim"]
b = pd.MultiIndex(
levels=[
pd.Index([], dtype=np.int64), # type: ignore[list-item,unused-ignore]
pd.Index([0], dtype=np.int64), # type: ignore[list-item,unused-ignore]
],
codes=[[], []],
names=["x", "y"],
)
pd.testing.assert_index_equal(a, b)
actual = orig.stack(z=["x", "y"]).unstack("z").drop_vars(["x", "y"])
assert_identical(orig, actual)
actual = orig.stack(z=[...]).unstack("z").drop_vars(["x", "y"])
assert_identical(orig, actual)
dims = ["a", "b", "c", "d", "e"]
coords = {
"a": [0],
"b": [1, 2],
"c": [3, 4, 5],
"d": [6, 7],
"e": [8],
}
orig = xr.DataArray(np.random.rand(1, 2, 3, 2, 1), coords=coords, dims=dims)
stacked = orig.stack(ab=["a", "b"], cd=["c", "d"])
unstacked = stacked.unstack(["ab", "cd"])
assert_identical(orig, unstacked.transpose(*dims))
unstacked = stacked.unstack()
assert_identical(orig, unstacked.transpose(*dims))
def test_stack_unstack_decreasing_coordinate(self) -> None:
# regression test for GH980
orig = DataArray(
np.random.rand(3, 4),
dims=("y", "x"),
coords={"x": np.arange(4), "y": np.arange(3, 0, -1)},
)
stacked = orig.stack(allpoints=["y", "x"])
actual = stacked.unstack("allpoints")
assert_identical(orig, actual)
def test_unstack_pandas_consistency(self) -> None:
df = pd.DataFrame({"foo": range(3), "x": ["a", "b", "b"], "y": [0, 0, 1]})
s = df.set_index(["x", "y"])["foo"]
expected = DataArray(s.unstack(), name="foo")
actual = DataArray(s, dims="z").unstack("z")
assert_identical(expected, actual)
def test_unstack_requires_unique(self) -> None:
df = pd.DataFrame({"foo": range(2), "x": ["a", "a"], "y": [0, 0]})
s = df.set_index(["x", "y"])["foo"]
with pytest.raises(
ValueError, match="Cannot unstack MultiIndex containing duplicates"
):
DataArray(s, dims="z").unstack("z")
@pytest.mark.filterwarnings("error")
def test_unstack_roundtrip_integer_array(self) -> None:
arr = xr.DataArray(
np.arange(6).reshape(2, 3),
coords={"x": ["a", "b"], "y": [0, 1, 2]},
dims=["x", "y"],
)
stacked = arr.stack(z=["x", "y"])
roundtripped = stacked.unstack()
assert_identical(arr, roundtripped)
def test_stack_nonunique_consistency(self, da) -> None:
da = da.isel(time=0, drop=True) # 2D
actual = da.stack(z=["a", "x"])
expected = DataArray(da.to_pandas().stack(), dims="z")
assert_identical(expected, actual)
def test_to_unstacked_dataset_raises_value_error(self) -> None:
data = DataArray([0, 1], dims="x", coords={"x": [0, 1]})
with pytest.raises(ValueError, match="'x' is not a stacked coordinate"):
data.to_unstacked_dataset("x", 0)
def test_transpose(self) -> None:
da = DataArray(
np.random.randn(3, 4, 5),
dims=("x", "y", "z"),
coords={
"x": range(3),
"y": range(4),
"z": range(5),
"xy": (("x", "y"), np.random.randn(3, 4)),
},
)
actual = da.transpose(transpose_coords=False)
expected = DataArray(da.values.T, dims=("z", "y", "x"), coords=da.coords)
assert_equal(expected, actual)
actual = da.transpose("z", "y", "x", transpose_coords=True)
expected = DataArray(
da.values.T,
dims=("z", "y", "x"),
coords={
"x": da.x.values,
"y": da.y.values,
"z": da.z.values,
"xy": (("y", "x"), da.xy.values.T),
},
)
assert_equal(expected, actual)
# same as previous but with ellipsis
actual = da.transpose("z", ..., "x", transpose_coords=True)
assert_equal(expected, actual)
# same as previous but with a missing dimension
actual = da.transpose(
"z", "y", "x", "not_a_dim", transpose_coords=True, missing_dims="ignore"
)
assert_equal(expected, actual)
with pytest.raises(ValueError):
da.transpose("x", "y")
with pytest.raises(ValueError):
da.transpose("not_a_dim", "z", "x", ...)
with pytest.warns(UserWarning):
da.transpose("not_a_dim", "y", "x", ..., missing_dims="warn")
def test_squeeze(self) -> None:
assert_equal(self.dv.variable.squeeze(), self.dv.squeeze().variable)
def test_squeeze_drop(self) -> None:
array = DataArray([1], [("x", [0])])
expected = DataArray(1)
actual = array.squeeze(drop=True)
assert_identical(expected, actual)
expected = DataArray(1, {"x": 0})
actual = array.squeeze(drop=False)
assert_identical(expected, actual)
array = DataArray([[[0.0, 1.0]]], dims=["dim_0", "dim_1", "dim_2"])
expected = DataArray([[0.0, 1.0]], dims=["dim_1", "dim_2"])
actual = array.squeeze(axis=0)
assert_identical(expected, actual)
array = DataArray([[[[0.0, 1.0]]]], dims=["dim_0", "dim_1", "dim_2", "dim_3"])
expected = DataArray([[0.0, 1.0]], dims=["dim_1", "dim_3"])
actual = array.squeeze(axis=(0, 2))
assert_identical(expected, actual)
array = DataArray([[[0.0, 1.0]]], dims=["dim_0", "dim_1", "dim_2"])
with pytest.raises(ValueError):
array.squeeze(axis=0, dim="dim_1")
def test_drop_coordinates(self) -> None:
expected = DataArray(np.random.randn(2, 3), dims=["x", "y"])
arr = expected.copy()
arr.coords["z"] = 2
actual = arr.drop_vars("z")
assert_identical(expected, actual)
with pytest.raises(ValueError):
arr.drop_vars("not found")
actual = expected.drop_vars("not found", errors="ignore")
assert_identical(actual, expected)
with pytest.raises(ValueError, match=r"cannot be found"):
arr.drop_vars("w")
actual = expected.drop_vars("w", errors="ignore")
assert_identical(actual, expected)
renamed = arr.rename("foo")
with pytest.raises(ValueError, match=r"cannot be found"):
renamed.drop_vars("foo")
actual = renamed.drop_vars("foo", errors="ignore")
assert_identical(actual, renamed)
def test_drop_vars_callable(self) -> None:
A = DataArray(
np.random.randn(2, 3), dims=["x", "y"], coords={"x": [1, 2], "y": [3, 4, 5]}
)
expected = A.drop_vars(["x", "y"])
actual = A.drop_vars(lambda x: x.indexes)
assert_identical(expected, actual)
def test_drop_multiindex_level(self) -> None:
# GH6505
expected = self.mda.drop_vars(["x", "level_1", "level_2"])
with pytest.warns(DeprecationWarning):
actual = self.mda.drop_vars("level_1")
assert_identical(expected, actual)
def test_drop_all_multiindex_levels(self) -> None:
dim_levels = ["x", "level_1", "level_2"]
actual = self.mda.drop_vars(dim_levels)
# no error, multi-index dropped
for key in dim_levels:
assert key not in actual.xindexes
def test_drop_index_labels(self) -> None:
arr = DataArray(np.random.randn(2, 3), coords={"y": [0, 1, 2]}, dims=["x", "y"])
actual = arr.drop_sel(y=[0, 1])
expected = arr[:, 2:]
assert_identical(actual, expected)
with pytest.raises((KeyError, ValueError), match=r"not .* in axis"):
actual = arr.drop_sel(y=[0, 1, 3])
actual = arr.drop_sel(y=[0, 1, 3], errors="ignore")
assert_identical(actual, expected)
with pytest.warns(DeprecationWarning):
arr.drop([0, 1, 3], dim="y", errors="ignore") # type: ignore[arg-type]
def test_drop_index_positions(self) -> None:
arr = DataArray(np.random.randn(2, 3), dims=["x", "y"])
actual = arr.drop_isel(y=[0, 1])
expected = arr[:, 2:]
assert_identical(actual, expected)
def test_drop_indexes(self) -> None:
arr = DataArray([1, 2, 3], coords={"x": ("x", [1, 2, 3])}, dims="x")
actual = arr.drop_indexes("x")
assert "x" not in actual.xindexes
actual = arr.drop_indexes("not_a_coord", errors="ignore")
assert_identical(actual, arr)
def test_dropna(self) -> None:
x = np.random.randn(4, 4)
x[::2, 0] = np.nan
arr = DataArray(x, dims=["a", "b"])
actual = arr.dropna("a")
expected = arr[1::2]
assert_identical(actual, expected)
actual = arr.dropna("b", how="all")
assert_identical(actual, arr)
actual = arr.dropna("a", thresh=1)
assert_identical(actual, arr)
actual = arr.dropna("b", thresh=3)
expected = arr[:, 1:]
assert_identical(actual, expected)
def test_where(self) -> None:
arr = DataArray(np.arange(4), dims="x")
expected = arr.sel(x=slice(2))
actual = arr.where(arr.x < 2, drop=True)
assert_identical(actual, expected)
def test_where_lambda(self) -> None:
arr = DataArray(np.arange(4), dims="y")
expected = arr.sel(y=slice(2))
actual = arr.where(lambda x: x.y < 2, drop=True)
assert_identical(actual, expected)
def test_where_other_lambda(self) -> None:
arr = DataArray(np.arange(4), dims="y")
expected = xr.concat(
[arr.sel(y=slice(2)), arr.sel(y=slice(2, None)) + 1], dim="y"
)
actual = arr.where(lambda x: x.y < 2, lambda x: x + 1)
assert_identical(actual, expected)
def test_where_string(self) -> None:
array = DataArray(["a", "b"])
expected = DataArray(np.array(["a", np.nan], dtype=object))
actual = array.where([True, False])
assert_identical(actual, expected)
def test_cumops(self) -> None:
coords = {
"x": [-1, -2],
"y": ["ab", "cd", "ef"],
"lat": (["x", "y"], [[1, 2, 3], [-1, -2, -3]]),
"c": -999,
}
orig = DataArray([[-1, 0, 1], [-3, 0, 3]], coords, dims=["x", "y"])
actual = orig.cumsum()
expected = DataArray([[-1, -1, 0], [-4, -4, 0]], coords, dims=["x", "y"])
assert_identical(expected, actual)
actual = orig.cumsum("x")
expected = DataArray([[-1, 0, 1], [-4, 0, 4]], coords, dims=["x", "y"])
assert_identical(expected, actual)
actual = orig.cumsum("y")
expected = DataArray([[-1, -1, 0], [-3, -3, 0]], coords, dims=["x", "y"])
assert_identical(expected, actual)
actual = orig.cumprod("x")
expected = DataArray([[-1, 0, 1], [3, 0, 3]], coords, dims=["x", "y"])
assert_identical(expected, actual)
actual = orig.cumprod("y")
expected = DataArray([[-1, 0, 0], [-3, 0, 0]], coords, dims=["x", "y"])
assert_identical(expected, actual)
def test_reduce(self) -> None:
coords = {
"x": [-1, -2],
"y": ["ab", "cd", "ef"],
"lat": (["x", "y"], [[1, 2, 3], [-1, -2, -3]]),
"c": -999,
}
orig = DataArray([[-1, 0, 1], [-3, 0, 3]], coords, dims=["x", "y"])
actual = orig.mean()
expected = DataArray(0, {"c": -999})
assert_identical(expected, actual)
actual = orig.mean(["x", "y"])
assert_identical(expected, actual)
actual = orig.mean("x")
expected = DataArray([-2, 0, 2], {"y": coords["y"], "c": -999}, "y")
assert_identical(expected, actual)
actual = orig.mean(["x"])
assert_identical(expected, actual)
actual = orig.mean("y")
expected = DataArray([0, 0], {"x": coords["x"], "c": -999}, "x")
assert_identical(expected, actual)
assert_equal(self.dv.reduce(np.mean, "x").variable, self.v.reduce(np.mean, "x"))
orig = DataArray([[1, 0, np.nan], [3, 0, 3]], coords, dims=["x", "y"])
actual = orig.count()
expected = DataArray(5, {"c": -999})
assert_identical(expected, actual)
# uint support
orig = DataArray(np.arange(6).reshape(3, 2).astype("uint"), dims=["x", "y"])
assert orig.dtype.kind == "u"
actual = orig.mean(dim="x", skipna=True)
expected = DataArray(orig.values.astype(int), dims=["x", "y"]).mean("x")
assert_equal(actual, expected)
def test_reduce_keepdims(self) -> None:
coords = {
"x": [-1, -2],
"y": ["ab", "cd", "ef"],
"lat": (["x", "y"], [[1, 2, 3], [-1, -2, -3]]),
"c": -999,
}
orig = DataArray([[-1, 0, 1], [-3, 0, 3]], coords, dims=["x", "y"])
# Mean on all axes loses non-constant coordinates
actual = orig.mean(keepdims=True)
expected = DataArray(
orig.data.mean(keepdims=True),
dims=orig.dims,
coords={k: v for k, v in coords.items() if k == "c"},
)
assert_equal(actual, expected)
assert actual.sizes["x"] == 1
assert actual.sizes["y"] == 1
# Mean on specific axes loses coordinates not involving that axis
actual = orig.mean("y", keepdims=True)
expected = DataArray(
orig.data.mean(axis=1, keepdims=True),
dims=orig.dims,
coords={k: v for k, v in coords.items() if k not in ["y", "lat"]},
)
assert_equal(actual, expected)
@requires_bottleneck
def test_reduce_keepdims_bottleneck(self) -> None:
import bottleneck
coords = {
"x": [-1, -2],
"y": ["ab", "cd", "ef"],
"lat": (["x", "y"], [[1, 2, 3], [-1, -2, -3]]),
"c": -999,
}
orig = DataArray([[-1, 0, 1], [-3, 0, 3]], coords, dims=["x", "y"])
# Bottleneck does not have its own keepdims implementation
actual = orig.reduce(bottleneck.nanmean, keepdims=True)
expected = orig.mean(keepdims=True)
assert_equal(actual, expected)
def test_reduce_dtype(self) -> None:
coords = {
"x": [-1, -2],
"y": ["ab", "cd", "ef"],
"lat": (["x", "y"], [[1, 2, 3], [-1, -2, -3]]),
"c": -999,
}
orig = DataArray([[-1, 0, 1], [-3, 0, 3]], coords, dims=["x", "y"])
for dtype in [np.float16, np.float32, np.float64]:
assert orig.astype(float).mean(dtype=dtype).dtype == dtype
def test_reduce_out(self) -> None:
coords = {
"x": [-1, -2],
"y": ["ab", "cd", "ef"],
"lat": (["x", "y"], [[1, 2, 3], [-1, -2, -3]]),
"c": -999,
}
orig = DataArray([[-1, 0, 1], [-3, 0, 3]], coords, dims=["x", "y"])
with pytest.raises(TypeError):
orig.mean(out=np.ones(orig.shape))
@pytest.mark.parametrize("compute_backend", ["numbagg", None], indirect=True)
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("q", [0.25, [0.50], [0.25, 0.75]])
@pytest.mark.parametrize(
"axis, dim",
zip([None, 0, [0], [0, 1]], [None, "x", ["x"], ["x", "y"]], strict=True),
)
def test_quantile(self, q, axis, dim, skipna, compute_backend) -> None:
va = self.va.copy(deep=True)
va[0, 0] = np.nan
actual = DataArray(va).quantile(q, dim=dim, keep_attrs=True, skipna=skipna)
_percentile_func = np.nanpercentile if skipna in (True, None) else np.percentile
expected = _percentile_func(va.values, np.array(q) * 100, axis=axis)
np.testing.assert_allclose(actual.values, expected)
if is_scalar(q):
assert "quantile" not in actual.dims
else:
assert "quantile" in actual.dims
assert actual.attrs == self.attrs
@pytest.mark.parametrize("method", ["midpoint", "lower"])
def test_quantile_method(self, method) -> None:
q = [0.25, 0.5, 0.75]
actual = DataArray(self.va).quantile(q, method=method)
expected = np.nanquantile(self.dv.values, np.array(q), method=method)
np.testing.assert_allclose(actual.values, expected)
@pytest.mark.filterwarnings(
"default:The `interpolation` argument to quantile was renamed to `method`:FutureWarning"
)
@pytest.mark.parametrize("method", ["midpoint", "lower"])
def test_quantile_interpolation_deprecated(self, method) -> None:
da = DataArray(self.va)
q = [0.25, 0.5, 0.75]
with pytest.warns(
FutureWarning,
match="`interpolation` argument to quantile was renamed to `method`",
):
actual = da.quantile(q, interpolation=method)
expected = da.quantile(q, method=method)
np.testing.assert_allclose(actual.values, expected.values)
with warnings.catch_warnings(record=True):
with pytest.raises(TypeError, match="interpolation and method keywords"):
da.quantile(q, method=method, interpolation=method)
def test_reduce_keep_attrs(self) -> None:
# Test default behavior (keeps attrs for reduction operations)
vm = self.va.mean()
assert len(vm.attrs) == len(self.attrs)
assert vm.attrs == self.attrs
# Test explicitly keeping attrs
vm = self.va.mean(keep_attrs=True)
assert len(vm.attrs) == len(self.attrs)
assert vm.attrs == self.attrs
# Test explicitly dropping attrs
vm = self.va.mean(keep_attrs=False)
assert len(vm.attrs) == 0
assert vm.attrs == {}
def test_assign_attrs(self) -> None:
expected = DataArray([], attrs=dict(a=1, b=2))
expected.attrs["a"] = 1
expected.attrs["b"] = 2
new = DataArray([])
actual = DataArray([]).assign_attrs(a=1, b=2)
assert_identical(actual, expected)
assert new.attrs == {}
expected.attrs["c"] = 3
new_actual = actual.assign_attrs({"c": 3})
assert_identical(new_actual, expected)
assert actual.attrs == {"a": 1, "b": 2}
def test_drop_attrs(self) -> None:
# Mostly tested in test_dataset.py, but adding a very small test here
coord_ = DataArray([], attrs=dict(d=3, e=4))
da = DataArray([], attrs=dict(a=1, b=2)).assign_coords(dict(coord_=coord_))
assert da.drop_attrs().attrs == {}
assert da.drop_attrs().coord_.attrs == {}
assert da.drop_attrs(deep=False).coord_.attrs == dict(d=3, e=4)
@pytest.mark.parametrize(
"func", [lambda x: x.clip(0, 1), lambda x: np.float64(1.0) * x, np.abs, abs]
)
def test_propagate_attrs(self, func) -> None:
da = DataArray(self.va)
# test defaults
assert func(da).attrs == da.attrs
with set_options(keep_attrs=False):
assert func(da).attrs == {}
with set_options(keep_attrs=True):
assert func(da).attrs == da.attrs
def test_fillna(self) -> None:
a = DataArray([np.nan, 1, np.nan, 3], coords={"x": range(4)}, dims="x")
actual = a.fillna(-1)
expected = DataArray([-1, 1, -1, 3], coords={"x": range(4)}, dims="x")
assert_identical(expected, actual)
b = DataArray(range(4), coords={"x": range(4)}, dims="x")
actual = a.fillna(b)
expected = b.copy()
assert_identical(expected, actual)
actual = a.fillna(np.arange(4))
assert_identical(expected, actual)
actual = a.fillna(b[:3])
assert_identical(expected, actual)
actual = a.fillna(b[:0])
assert_identical(a, actual)
with pytest.raises(TypeError, match=r"fillna on a DataArray"):
a.fillna({0: 0})
with pytest.raises(ValueError, match=r"broadcast"):
a.fillna(np.array([1, 2]))
def test_align(self) -> None:
array = DataArray(
np.random.random((6, 8)), coords={"x": list("abcdef")}, dims=["x", "y"]
)
array1, array2 = align(array, array[:5], join="inner")
assert_identical(array1, array[:5])
assert_identical(array2, array[:5])
def test_align_dtype(self) -> None:
# regression test for #264
x1 = np.arange(30)
x2 = np.arange(5, 35)
a = DataArray(np.random.random((30,)).astype(np.float32), [("x", x1)])
b = DataArray(np.random.random((30,)).astype(np.float32), [("x", x2)])
c, _d = align(a, b, join="outer")
assert c.dtype == np.float32
def test_align_copy(self) -> None:
x = DataArray([1, 2, 3], coords=[("a", [1, 2, 3])])
y = DataArray([1, 2], coords=[("a", [3, 1])])
expected_x2 = x
expected_y2 = DataArray([2, np.nan, 1], coords=[("a", [1, 2, 3])])
x2, y2 = align(x, y, join="outer", copy=False)
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
assert source_ndarray(x2.data) is source_ndarray(x.data)
x2, y2 = align(x, y, join="outer", copy=True)
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
assert source_ndarray(x2.data) is not source_ndarray(x.data)
# Trivial align - 1 element
x = DataArray([1, 2, 3], coords=[("a", [1, 2, 3])])
(x2,) = align(x, copy=False)
assert_identical(x, x2)
assert source_ndarray(x2.data) is source_ndarray(x.data)
(x2,) = align(x, copy=True)
assert_identical(x, x2)
assert source_ndarray(x2.data) is not source_ndarray(x.data)
def test_align_override(self) -> None:
left = DataArray([1, 2, 3], dims="x", coords={"x": [0, 1, 2]})
right = DataArray(
np.arange(9).reshape((3, 3)),
dims=["x", "y"],
coords={"x": [0.1, 1.1, 2.1], "y": [1, 2, 3]},
)
expected_right = DataArray(
np.arange(9).reshape(3, 3),
dims=["x", "y"],
coords={"x": [0, 1, 2], "y": [1, 2, 3]},
)
new_left, new_right = align(left, right, join="override")
assert_identical(left, new_left)
assert_identical(new_right, expected_right)
new_left, new_right = align(left, right, exclude="x", join="override")
assert_identical(left, new_left)
assert_identical(right, new_right)
new_left, new_right = xr.align(
left.isel(x=0, drop=True), right, exclude="x", join="override"
)
assert_identical(left.isel(x=0, drop=True), new_left)
assert_identical(right, new_right)
with pytest.raises(
ValueError, match=r"cannot align.*join.*override.*same size"
):
align(left.isel(x=0).expand_dims("x"), right, join="override")
@pytest.mark.parametrize(
"darrays",
[
[
DataArray(0),
DataArray([1], [("x", [1])]),
DataArray([2, 3], [("x", [2, 3])]),
],
[
DataArray([2, 3], [("x", [2, 3])]),
DataArray([1], [("x", [1])]),
DataArray(0),
],
],
)
def test_align_override_error(self, darrays) -> None:
with pytest.raises(
ValueError, match=r"cannot align.*join.*override.*same size"
):
xr.align(*darrays, join="override")
def test_align_exclude(self) -> None:
x = DataArray([[1, 2], [3, 4]], coords=[("a", [-1, -2]), ("b", [3, 4])])
y = DataArray([[1, 2], [3, 4]], coords=[("a", [-1, 20]), ("b", [5, 6])])
z = DataArray([1], dims=["a"], coords={"a": [20], "b": 7})
x2, y2, z2 = align(x, y, z, join="outer", exclude=["b"])
expected_x2 = DataArray(
[[3, 4], [1, 2], [np.nan, np.nan]],
coords=[("a", [-2, -1, 20]), ("b", [3, 4])],
)
expected_y2 = DataArray(
[[np.nan, np.nan], [1, 2], [3, 4]],
coords=[("a", [-2, -1, 20]), ("b", [5, 6])],
)
expected_z2 = DataArray(
[np.nan, np.nan, 1], dims=["a"], coords={"a": [-2, -1, 20], "b": 7}
)
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
assert_identical(expected_z2, z2)
def test_align_indexes(self) -> None:
x = DataArray([1, 2, 3], coords=[("a", [-1, 10, -2])])
y = DataArray([1, 2], coords=[("a", [-2, -1])])
x2, y2 = align(x, y, join="outer", indexes={"a": [10, -1, -2]})
expected_x2 = DataArray([2, 1, 3], coords=[("a", [10, -1, -2])])
expected_y2 = DataArray([np.nan, 2, 1], coords=[("a", [10, -1, -2])])
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
(x2,) = align(x, join="outer", indexes={"a": [-2, 7, 10, -1]})
expected_x2 = DataArray([3, np.nan, 2, 1], coords=[("a", [-2, 7, 10, -1])])
assert_identical(expected_x2, x2)
def test_align_without_indexes_exclude(self) -> None:
arrays = [DataArray([1, 2, 3], dims=["x"]), DataArray([1, 2], dims=["x"])]
result0, result1 = align(*arrays, exclude=["x"])
assert_identical(result0, arrays[0])
assert_identical(result1, arrays[1])
def test_align_mixed_indexes(self) -> None:
array_no_coord = DataArray([1, 2], dims=["x"])
array_with_coord = DataArray([1, 2], coords=[("x", ["a", "b"])])
result0, result1 = align(array_no_coord, array_with_coord)
assert_identical(result0, array_with_coord)
assert_identical(result1, array_with_coord)
result0, result1 = align(array_no_coord, array_with_coord, exclude=["x"])
assert_identical(result0, array_no_coord)
assert_identical(result1, array_with_coord)
def test_align_without_indexes_errors(self) -> None:
with pytest.raises(
ValueError,
match=r"cannot.*align.*dimension.*conflicting.*sizes.*",
):
align(DataArray([1, 2, 3], dims=["x"]), DataArray([1, 2], dims=["x"]))
with pytest.raises(
ValueError,
match=r"cannot.*align.*dimension.*conflicting.*sizes.*",
):
align(
DataArray([1, 2, 3], dims=["x"]),
DataArray([1, 2], coords=[("x", [0, 1])]),
)
def test_align_str_dtype(self) -> None:
a = DataArray([0, 1], dims=["x"], coords={"x": ["a", "b"]})
b = DataArray([1, 2], dims=["x"], coords={"x": ["b", "c"]})
expected_a = DataArray(
[0, 1, np.nan], dims=["x"], coords={"x": ["a", "b", "c"]}
)
expected_b = DataArray(
[np.nan, 1, 2], dims=["x"], coords={"x": ["a", "b", "c"]}
)
actual_a, actual_b = xr.align(a, b, join="outer")
assert_identical(expected_a, actual_a)
assert expected_a.x.dtype == actual_a.x.dtype
assert_identical(expected_b, actual_b)
assert expected_b.x.dtype == actual_b.x.dtype
def test_broadcast_on_vs_off_global_option_different_dims(self) -> None:
xda_1 = xr.DataArray([1], dims="x1")
xda_2 = xr.DataArray([1], dims="x2")
with xr.set_options(arithmetic_broadcast=True):
expected_xda = xr.DataArray([[1.0]], dims=("x1", "x2"))
actual_xda = xda_1 / xda_2
assert_identical(actual_xda, expected_xda)
with xr.set_options(arithmetic_broadcast=False):
with pytest.raises(
ValueError,
match=re.escape(
"Broadcasting is necessary but automatic broadcasting is disabled via "
"global option `'arithmetic_broadcast'`. "
"Use `xr.set_options(arithmetic_broadcast=True)` to enable automatic broadcasting."
),
):
xda_1 / xda_2
@pytest.mark.parametrize("arithmetic_broadcast", [True, False])
def test_broadcast_on_vs_off_global_option_same_dims(
self, arithmetic_broadcast: bool
) -> None:
# Ensure that no error is raised when arithmetic broadcasting is disabled,
# when broadcasting is not needed. The two DataArrays have the same
# dimensions of the same size.
xda_1 = xr.DataArray([1], dims="x")
xda_2 = xr.DataArray([1], dims="x")
expected_xda = xr.DataArray([2.0], dims=("x",))
with xr.set_options(arithmetic_broadcast=arithmetic_broadcast):
assert_identical(xda_1 + xda_2, expected_xda)
assert_identical(xda_1 + np.array([1.0]), expected_xda)
assert_identical(np.array([1.0]) + xda_1, expected_xda)
def test_broadcast_arrays(self) -> None:
x = DataArray([1, 2], coords=[("a", [-1, -2])], name="x")
y = DataArray([1, 2], coords=[("b", [3, 4])], name="y")
x2, y2 = broadcast(x, y)
expected_coords = [("a", [-1, -2]), ("b", [3, 4])]
expected_x2 = DataArray([[1, 1], [2, 2]], expected_coords, name="x")
expected_y2 = DataArray([[1, 2], [1, 2]], expected_coords, name="y")
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
x = DataArray(np.random.randn(2, 3), dims=["a", "b"])
y = DataArray(np.random.randn(3, 2), dims=["b", "a"])
x2, y2 = broadcast(x, y)
expected_x2 = x
expected_y2 = y.T
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
def test_broadcast_arrays_misaligned(self) -> None:
# broadcast on misaligned coords must auto-align
x = DataArray([[1, 2], [3, 4]], coords=[("a", [-1, -2]), ("b", [3, 4])])
y = DataArray([1, 2], coords=[("a", [-1, 20])])
expected_x2 = DataArray(
[[3, 4], [1, 2], [np.nan, np.nan]],
coords=[("a", [-2, -1, 20]), ("b", [3, 4])],
)
expected_y2 = DataArray(
[[np.nan, np.nan], [1, 1], [2, 2]],
coords=[("a", [-2, -1, 20]), ("b", [3, 4])],
)
x2, y2 = broadcast(x, y)
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
def test_broadcast_arrays_nocopy(self) -> None:
# Test that input data is not copied over in case
# no alteration is needed
x = DataArray([1, 2], coords=[("a", [-1, -2])], name="x")
y = DataArray(3, name="y")
expected_x2 = DataArray([1, 2], coords=[("a", [-1, -2])], name="x")
expected_y2 = DataArray([3, 3], coords=[("a", [-1, -2])], name="y")
x2, y2 = broadcast(x, y)
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
assert source_ndarray(x2.data) is source_ndarray(x.data)
# single-element broadcast (trivial case)
(x2,) = broadcast(x)
assert_identical(x, x2)
assert source_ndarray(x2.data) is source_ndarray(x.data)
def test_broadcast_arrays_exclude(self) -> None:
x = DataArray([[1, 2], [3, 4]], coords=[("a", [-1, -2]), ("b", [3, 4])])
y = DataArray([1, 2], coords=[("a", [-1, 20])])
z = DataArray(5, coords={"b": 5})
x2, y2, z2 = broadcast(x, y, z, exclude=["b"])
expected_x2 = DataArray(
[[3, 4], [1, 2], [np.nan, np.nan]],
coords=[("a", [-2, -1, 20]), ("b", [3, 4])],
)
expected_y2 = DataArray([np.nan, 1, 2], coords=[("a", [-2, -1, 20])])
expected_z2 = DataArray(
[5, 5, 5], dims=["a"], coords={"a": [-2, -1, 20], "b": 5}
)
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
assert_identical(expected_z2, z2)
def test_broadcast_coordinates(self) -> None:
# regression test for GH649
ds = Dataset({"a": (["x", "y"], np.ones((5, 6)))})
x_bc, y_bc, a_bc = broadcast(ds.x, ds.y, ds.a)
assert_identical(ds.a, a_bc)
X, Y = np.meshgrid(np.arange(5), np.arange(6), indexing="ij")
exp_x = DataArray(X, dims=["x", "y"], name="x")
exp_y = DataArray(Y, dims=["x", "y"], name="y")
assert_identical(exp_x, x_bc)
assert_identical(exp_y, y_bc)
def test_to_pandas(self) -> None:
# 0d
actual_xr = DataArray(42).to_pandas()
expected = np.array(42)
assert_array_equal(actual_xr, expected)
# 1d
values = np.random.randn(3)
index = pd.Index(["a", "b", "c"], name="x")
da = DataArray(values, coords=[index])
actual_s = da.to_pandas()
assert_array_equal(np.asarray(actual_s.values), values)
assert_array_equal(actual_s.index, index)
assert_array_equal(actual_s.index.name, "x")
# 2d
values = np.random.randn(3, 2)
da = DataArray(
values, coords=[("x", ["a", "b", "c"]), ("y", [0, 1])], name="foo"
)
actual_df = da.to_pandas()
assert_array_equal(np.asarray(actual_df.values), values)
assert_array_equal(actual_df.index, ["a", "b", "c"])
assert_array_equal(actual_df.columns, [0, 1])
# roundtrips
for shape in [(3,), (3, 4)]:
dims = list("abc")[: len(shape)]
da = DataArray(np.random.randn(*shape), dims=dims)
roundtripped = DataArray(da.to_pandas()).drop_vars(dims)
assert_identical(da, roundtripped)
with pytest.raises(ValueError, match=r"Cannot convert"):
DataArray(np.random.randn(1, 2, 3, 4, 5)).to_pandas()
def test_to_dataframe(self) -> None:
# regression test for #260
arr_np = np.random.randn(3, 4)
arr = DataArray(arr_np, [("B", [1, 2, 3]), ("A", list("cdef"))], name="foo")
expected_s = arr.to_series()
actual_s = arr.to_dataframe()["foo"]
assert_array_equal(np.asarray(expected_s.values), np.asarray(actual_s.values))
assert_array_equal(np.asarray(expected_s.name), np.asarray(actual_s.name))
assert_array_equal(expected_s.index.values, actual_s.index.values)
actual_s = arr.to_dataframe(dim_order=["A", "B"])["foo"]
assert_array_equal(arr_np.transpose().reshape(-1), np.asarray(actual_s.values))
# regression test for coords with different dimensions
arr.coords["C"] = ("B", [-1, -2, -3])
expected_df = arr.to_series().to_frame()
expected_df["C"] = [-1] * 4 + [-2] * 4 + [-3] * 4
expected_df = expected_df[["C", "foo"]]
actual_df = arr.to_dataframe()
assert_array_equal(np.asarray(expected_df.values), np.asarray(actual_df.values))
assert_array_equal(expected_df.columns.values, actual_df.columns.values)
assert_array_equal(expected_df.index.values, actual_df.index.values)
with pytest.raises(ValueError, match="does not match the set of dimensions"):
arr.to_dataframe(dim_order=["B", "A", "C"])
with pytest.raises(ValueError, match=r"cannot convert a scalar"):
arr.sel(A="c", B=2).to_dataframe()
arr.name = None # unnamed
with pytest.raises(ValueError, match=r"unnamed"):
arr.to_dataframe()
def test_to_dataframe_multiindex(self) -> None:
# regression test for #3008
arr_np = np.random.randn(4, 3)
mindex = pd.MultiIndex.from_product([[1, 2], list("ab")], names=["A", "B"])
arr = DataArray(arr_np, [("MI", mindex), ("C", [5, 6, 7])], name="foo")
actual = arr.to_dataframe()
index_pd = actual.index
assert isinstance(index_pd, pd.MultiIndex)
assert_array_equal(np.asarray(actual["foo"].values), arr_np.flatten())
assert_array_equal(index_pd.names, list("ABC"))
assert_array_equal(index_pd.levels[0], [1, 2])
assert_array_equal(index_pd.levels[1], ["a", "b"])
assert_array_equal(index_pd.levels[2], [5, 6, 7])
# test converting a dataframe MultiIndexed along a single dimension
mindex_single = pd.MultiIndex.from_product(
[list(range(6)), list("ab")], names=["A", "B"]
)
arr_multi_single = DataArray(
arr_np.flatten(), [("MI", mindex_single)], dims="MI", name="test"
)
actual_df = arr_multi_single.to_dataframe()
expected_df = arr_multi_single.to_series().to_frame()
assert expected_df.equals(actual_df)
def test_to_dataframe_0length(self) -> None:
# regression test for #3008
arr_np = np.random.randn(4, 0)
mindex = pd.MultiIndex.from_product([[1, 2], list("ab")], names=["A", "B"])
arr = DataArray(arr_np, [("MI", mindex), ("C", [])], name="foo")
actual = arr.to_dataframe()
assert len(actual) == 0
assert_array_equal(actual.index.names, list("ABC"))
@pytest.mark.parametrize(
"x_dtype,y_dtype,v_dtype",
[
(np.uint32, np.float32, np.uint32),
(np.int16, np.float64, np.int64),
(np.uint8, np.float32, np.uint16),
(np.int32, np.float32, np.int8),
],
)
def test_to_dataframe_coord_dtypes_2d(self, x_dtype, y_dtype, v_dtype) -> None:
x = np.array([1], dtype=x_dtype)
y = np.array([1.0], dtype=y_dtype)
v = np.array([[42]], dtype=v_dtype)
da = DataArray(v, dims=["x", "y"], coords={"x": x, "y": y})
df = da.to_dataframe(name="v").reset_index()
# Check that coordinate dtypes are preserved
assert df["x"].dtype == np.dtype(x_dtype), (
f"x coord: expected {x_dtype}, got {df['x'].dtype}"
)
assert df["y"].dtype == np.dtype(y_dtype), (
f"y coord: expected {y_dtype}, got {df['y'].dtype}"
)
assert df["v"].dtype == np.dtype(v_dtype), (
f"v data: expected {v_dtype}, got {df['v'].dtype}"
)
@requires_dask_expr
@requires_dask
@pytest.mark.xfail(not has_dask_ge_2025_1_0, reason="dask-expr is broken")
def test_to_dask_dataframe(self) -> None:
arr_np = np.arange(3 * 4).reshape(3, 4)
arr = DataArray(arr_np, [("B", [1, 2, 3]), ("A", list("cdef"))], name="foo")
expected_s = arr.to_series()
actual = arr.to_dask_dataframe()["foo"]
assert_array_equal(actual.values, np.asarray(expected_s.values))
actual = arr.to_dask_dataframe(dim_order=["A", "B"])["foo"]
assert_array_equal(arr_np.transpose().reshape(-1), actual.values)
# regression test for coords with different dimensions
arr.coords["C"] = ("B", [-1, -2, -3])
expected_df = arr.to_series().to_frame()
expected_df["C"] = [-1] * 4 + [-2] * 4 + [-3] * 4
expected_df = expected_df[["C", "foo"]]
actual = arr.to_dask_dataframe()[["C", "foo"]]
assert_array_equal(expected_df.values, np.asarray(actual.values))
assert_array_equal(
expected_df.columns.values, np.asarray(actual.columns.values)
)
with pytest.raises(ValueError, match="does not match the set of dimensions"):
arr.to_dask_dataframe(dim_order=["B", "A", "C"])
arr.name = None
with pytest.raises(
ValueError,
match="Cannot convert an unnamed DataArray",
):
arr.to_dask_dataframe()
def test_to_pandas_name_matches_coordinate(self) -> None:
# coordinate with same name as array
arr = DataArray([1, 2, 3], dims="x", name="x")
series = arr.to_series()
assert_array_equal([1, 2, 3], list(series.values))
assert_array_equal([0, 1, 2], list(series.index.values))
assert "x" == series.name
assert "x" == series.index.name
frame = arr.to_dataframe()
expected = series.to_frame()
assert expected.equals(frame)
def test_to_and_from_series(self) -> None:
expected = self.dv.to_dataframe()["foo"]
actual = self.dv.to_series()
assert_array_equal(expected.values, actual.values)
assert_array_equal(expected.index.values, actual.index.values)
assert "foo" == actual.name
# test roundtrip
assert_identical(self.dv, DataArray.from_series(actual).drop_vars(["x", "y"]))
# test name is None
actual.name = None
expected_da = self.dv.rename(None)
assert_identical(
expected_da, DataArray.from_series(actual).drop_vars(["x", "y"])
)
def test_from_series_multiindex(self) -> None:
# GH:3951
df = pd.DataFrame({"B": [1, 2, 3], "A": [4, 5, 6]})
df = df.rename_axis("num").rename_axis("alpha", axis=1)
actual = df.stack("alpha").to_xarray()
assert (actual.sel(alpha="B") == [1, 2, 3]).all()
assert (actual.sel(alpha="A") == [4, 5, 6]).all()
@requires_sparse
def test_from_series_sparse(self) -> None:
import sparse
series = pd.Series([1, 2], index=[("a", 1), ("b", 2)])
actual_sparse = DataArray.from_series(series, sparse=True)
actual_dense = DataArray.from_series(series, sparse=False)
assert isinstance(actual_sparse.data, sparse.COO)
actual_sparse.data = actual_sparse.data.todense()
assert_identical(actual_sparse, actual_dense)
@requires_sparse
def test_from_multiindex_series_sparse(self) -> None:
# regression test for GH4019
import sparse
idx = pd.MultiIndex.from_product(
[list(np.arange(3)), list(np.arange(5))], names=["a", "b"]
)
series: pd.Series = pd.Series(
np.random.default_rng(0).random(len(idx)), index=idx
).sample(n=5, random_state=3)
dense = DataArray.from_series(series, sparse=False)
expected_coords = sparse.COO.from_numpy(dense.data, np.nan).coords
actual_sparse = xr.DataArray.from_series(series, sparse=True)
actual_coords = actual_sparse.data.coords
np.testing.assert_equal(actual_coords, expected_coords)
def test_nbytes_does_not_load_data(self) -> None:
array = InaccessibleArray(np.zeros((3, 3), dtype="uint8"))
da = xr.DataArray(array, dims=["x", "y"])
# If xarray tries to instantiate the InaccessibleArray to compute
# nbytes, the following will raise an error.
# However, it should still be able to accurately give us information
# about the number of bytes from the metadata
assert da.nbytes == 9
# Here we confirm that this does not depend on array having the
# nbytes property, since it isn't really required by the array
# interface. nbytes is more a property of arrays that have been
# cast to numpy arrays.
assert not hasattr(array, "nbytes")
def test_to_and_from_empty_series(self) -> None:
# GH697
expected: pd.Series[Any] = pd.Series([], dtype=np.float64)
da = DataArray.from_series(expected)
assert len(da) == 0
actual = da.to_series()
assert len(actual) == 0
assert expected.equals(actual)
def test_series_categorical_index(self) -> None:
# regression test for GH700
if not hasattr(pd, "CategoricalIndex"):
pytest.skip("requires pandas with CategoricalIndex")
s = pd.Series(np.arange(5), index=pd.CategoricalIndex(list("aabbc")))
arr = DataArray(s)
assert "a a b b" in repr(arr) # should not error
@pytest.mark.parametrize("use_dask", [True, False])
@pytest.mark.parametrize("data", ["list", "array", True])
@pytest.mark.parametrize("encoding", [True, False])
def test_to_and_from_dict(
self, encoding: bool, data: bool | Literal["list", "array"], use_dask: bool
) -> None:
if use_dask and not has_dask:
pytest.skip("requires dask")
encoding_data = {"bar": "spam"}
array = DataArray(
np.random.randn(2, 3), {"x": ["a", "b"]}, ["x", "y"], name="foo"
)
array.encoding = encoding_data
return_data = array.to_numpy()
coords_data = np.array(["a", "b"])
if data == "list" or data is True:
return_data = return_data.tolist()
coords_data = coords_data.tolist()
expected: dict[str, Any] = {
"name": "foo",
"dims": ("x", "y"),
"data": return_data,
"attrs": {},
"coords": {"x": {"dims": ("x",), "data": coords_data, "attrs": {}}},
}
if encoding:
expected["encoding"] = encoding_data
if has_dask:
da = array.chunk()
else:
da = array
if data == "array" or data is False:
with raise_if_dask_computes():
actual = da.to_dict(encoding=encoding, data=data)
else:
actual = da.to_dict(encoding=encoding, data=data)
# check that they are identical
np.testing.assert_equal(expected, actual)
# check roundtrip
assert_identical(da, DataArray.from_dict(actual))
# a more bare bones representation still roundtrips
d = {
"name": "foo",
"dims": ("x", "y"),
"data": da.values.tolist(),
"coords": {"x": {"dims": "x", "data": ["a", "b"]}},
}
assert_identical(da, DataArray.from_dict(d))
# and the most bare bones representation still roundtrips
d = {"name": "foo", "dims": ("x", "y"), "data": da.values}
assert_identical(da.drop_vars("x"), DataArray.from_dict(d))
# missing a dims in the coords
d = {
"dims": ("x", "y"),
"data": da.values,
"coords": {"x": {"data": ["a", "b"]}},
}
with pytest.raises(
ValueError,
match=r"cannot convert dict when coords are missing the key 'dims'",
):
DataArray.from_dict(d)
# this one is missing some necessary information
d = {"dims": "t"}
with pytest.raises(
ValueError, match=r"cannot convert dict without the key 'data'"
):
DataArray.from_dict(d)
# check the data=False option
expected_no_data = expected.copy()
del expected_no_data["data"]
del expected_no_data["coords"]["x"]["data"]
endiantype = "<U1" if sys.byteorder == "little" else ">U1"
expected_no_data["coords"]["x"].update({"dtype": endiantype, "shape": (2,)})
expected_no_data.update({"dtype": "float64", "shape": (2, 3)})
actual_no_data = da.to_dict(data=False, encoding=encoding)
assert expected_no_data == actual_no_data
def test_to_and_from_dict_with_time_dim(self) -> None:
x = np.random.randn(10, 3)
t = pd.date_range("20130101", periods=10)
lat = [77.7, 83.2, 76]
da = DataArray(x, {"t": t, "lat": lat}, dims=["t", "lat"])
roundtripped = DataArray.from_dict(da.to_dict())
assert_identical(da, roundtripped)
def test_to_and_from_dict_with_nan_nat(self) -> None:
y = np.random.randn(10, 3)
y[2] = np.nan
t = pd.Series(pd.date_range("20130101", periods=10))
t[2] = np.nan
lat = [77.7, 83.2, 76]
da = DataArray(y, {"t": t, "lat": lat}, dims=["t", "lat"])
roundtripped = DataArray.from_dict(da.to_dict())
assert_identical(da, roundtripped)
def test_to_dict_with_numpy_attrs(self) -> None:
# this doesn't need to roundtrip
x = np.random.randn(10, 3)
t = list("abcdefghij")
lat = [77.7, 83.2, 76]
attrs = {
"created": np.float64(1998),
"coords": np.array([37, -110.1, 100]),
"maintainer": "bar",
}
da = DataArray(x, {"t": t, "lat": lat}, dims=["t", "lat"], attrs=attrs)
expected_attrs = {
"created": attrs["created"].item(), # type: ignore[attr-defined]
"coords": attrs["coords"].tolist(), # type: ignore[attr-defined]
"maintainer": "bar",
}
actual = da.to_dict()
# check that they are identical
assert expected_attrs == actual["attrs"]
def test_to_masked_array(self) -> None:
rs = np.random.default_rng(44)
x = rs.random(size=(10, 20))
x_masked = np.ma.masked_where(x < 0.5, x)
da = DataArray(x_masked)
# Test round trip
x_masked_2 = da.to_masked_array()
da_2 = DataArray(x_masked_2)
assert_array_equal(x_masked, x_masked_2)
assert_equal(da, da_2)
da_masked_array = da.to_masked_array(copy=True)
assert isinstance(da_masked_array, np.ma.MaskedArray)
# Test masks
assert_array_equal(da_masked_array.mask, x_masked.mask)
# Test that mask is unpacked correctly
assert_array_equal(da.values, x_masked.filled(np.nan))
# Test that the underlying data (including nans) hasn't changed
assert_array_equal(da_masked_array, x_masked.filled(np.nan))
# Test that copy=False gives access to values
masked_array = da.to_masked_array(copy=False)
masked_array[0, 0] = 10.0
assert masked_array[0, 0] == 10.0
assert da[0, 0].values == 10.0
assert masked_array.base is da.values
assert isinstance(masked_array, np.ma.MaskedArray)
# Test with some odd arrays
for v in [4, np.nan, True, "4", "four"]:
da = DataArray(v)
ma = da.to_masked_array()
assert isinstance(ma, np.ma.MaskedArray)
# Fix GH issue 684 - masked arrays mask should be an array not a scalar
N = 4
v = range(N)
da = DataArray(v)
ma = da.to_masked_array()
assert isinstance(ma.mask, np.ndarray) and len(ma.mask) == N
def test_to_dataset_whole(self) -> None:
unnamed = DataArray([1, 2], dims="x")
with pytest.raises(ValueError, match=r"unable to convert unnamed"):
unnamed.to_dataset()
actual = unnamed.to_dataset(name="foo")
expected = Dataset({"foo": ("x", [1, 2])})
assert_identical(expected, actual)
named = DataArray([1, 2], dims="x", name="foo", attrs={"y": "testattr"})
actual = named.to_dataset()
expected = Dataset({"foo": ("x", [1, 2], {"y": "testattr"})})
assert_identical(expected, actual)
# Test promoting attrs
actual = named.to_dataset(promote_attrs=True)
expected = Dataset(
{"foo": ("x", [1, 2], {"y": "testattr"})}, attrs={"y": "testattr"}
)
assert_identical(expected, actual)
with pytest.raises(TypeError):
actual = named.to_dataset("bar")
def test_to_dataset_split(self) -> None:
array = DataArray(
[[1, 2], [3, 4], [5, 6]],
coords=[("x", list("abc")), ("y", [0.0, 0.1])],
attrs={"a": 1},
)
expected = Dataset(
{"a": ("y", [1, 2]), "b": ("y", [3, 4]), "c": ("y", [5, 6])},
coords={"y": [0.0, 0.1]},
attrs={"a": 1},
)
actual = array.to_dataset("x")
assert_identical(expected, actual)
with pytest.raises(TypeError):
array.to_dataset("x", name="foo")
roundtripped = actual.to_dataarray(dim="x")
assert_identical(array, roundtripped)
array = DataArray([1, 2, 3], dims="x")
expected = Dataset({0: 1, 1: 2, 2: 3})
actual = array.to_dataset("x")
assert_identical(expected, actual)
def test_to_dataset_retains_keys(self) -> None:
# use dates as convenient non-str objects. Not a specific date test
import datetime
dates = [datetime.date(2000, 1, d) for d in range(1, 4)]
array = DataArray([1, 2, 3], coords=[("x", dates)], attrs={"a": 1})
# convert to dataset and back again
result = array.to_dataset("x").to_dataarray(dim="x")
assert_equal(array, result)
def test_to_dataset_coord_value_is_dim(self) -> None:
# github issue #7823
array = DataArray(
np.zeros((3, 3)),
coords={
# 'a' is both a coordinate value and the name of a coordinate
"x": ["a", "b", "c"],
"a": [1, 2, 3],
},
)
with pytest.raises(
ValueError,
match=(
re.escape("dimension 'x' would produce the variables ('a',)")
+ ".*"
+ re.escape("DataArray.rename(a=...) or DataArray.assign_coords(x=...)")
),
):
array.to_dataset("x")
# test error message formatting when there are multiple ambiguous
# values/coordinates
array2 = DataArray(
np.zeros((3, 3, 2)),
coords={
"x": ["a", "b", "c"],
"a": [1, 2, 3],
"b": [0.0, 0.1],
},
)
with pytest.raises(
ValueError,
match=(
re.escape("dimension 'x' would produce the variables ('a', 'b')")
+ ".*"
+ re.escape(
"DataArray.rename(a=..., b=...) or DataArray.assign_coords(x=...)"
)
),
):
array2.to_dataset("x")
def test__title_for_slice(self) -> None:
array = DataArray(
np.ones((4, 3, 2)),
dims=["a", "b", "c"],
coords={"a": range(4), "b": range(3), "c": range(2)},
)
assert "" == array._title_for_slice()
assert "c = 0" == array.isel(c=0)._title_for_slice()
title = array.isel(b=1, c=0)._title_for_slice()
assert title in {"b = 1, c = 0", "c = 0, b = 1"}
a2 = DataArray(np.ones((4, 1)), dims=["a", "b"])
assert "" == a2._title_for_slice()
def test__title_for_slice_truncate(self) -> None:
array = DataArray(np.ones(4))
array.coords["a"] = "a" * 100
array.coords["b"] = "b" * 100
nchar = 80
title = array._title_for_slice(truncate=nchar)
assert nchar == len(title)
assert title.endswith("...")
def test_dataarray_diff_n1(self) -> None:
da = DataArray(np.random.randn(3, 4), dims=["x", "y"])
actual = da.diff("y")
expected = DataArray(np.diff(da.values, axis=1), dims=["x", "y"])
assert_equal(expected, actual)
def test_coordinate_diff(self) -> None:
# regression test for GH634
arr = DataArray(range(0, 20, 2), dims=["lon"], coords=[range(10)])
lon = arr.coords["lon"]
expected = DataArray([1] * 9, dims=["lon"], coords=[range(1, 10)], name="lon")
actual = lon.diff("lon")
assert_equal(expected, actual)
@pytest.mark.parametrize("offset", [-5, 0, 1, 2])
@pytest.mark.parametrize("fill_value, dtype", [(2, int), (dtypes.NA, float)])
def test_shift(self, offset, fill_value, dtype) -> None:
arr = DataArray([1, 2, 3], dims="x")
actual = arr.shift(x=1, fill_value=fill_value)
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value = np.nan
expected = DataArray([fill_value, 1, 2], dims="x")
assert_identical(expected, actual)
assert actual.dtype == dtype
arr = DataArray([1, 2, 3], [("x", ["a", "b", "c"])])
expected = DataArray(arr.to_pandas().shift(offset))
actual = arr.shift(x=offset)
assert_identical(expected, actual)
def test_roll_coords(self) -> None:
arr = DataArray([1, 2, 3], coords={"x": range(3)}, dims="x")
actual = arr.roll(x=1, roll_coords=True)
expected = DataArray([3, 1, 2], coords=[("x", [2, 0, 1])])
assert_identical(expected, actual)
def test_roll_no_coords(self) -> None:
arr = DataArray([1, 2, 3], coords={"x": range(3)}, dims="x")
actual = arr.roll(x=1)
expected = DataArray([3, 1, 2], coords=[("x", [0, 1, 2])])
assert_identical(expected, actual)
def test_copy_with_data(self) -> None:
orig = DataArray(
np.random.random(size=(2, 2)),
dims=("x", "y"),
attrs={"attr1": "value1"},
coords={"x": [4, 3]},
name="helloworld",
)
new_data = np.arange(4).reshape(2, 2)
actual = orig.copy(data=new_data)
expected = orig.copy()
expected.data = new_data
assert_identical(expected, actual)
@pytest.mark.xfail(raises=AssertionError)
@pytest.mark.parametrize(
"deep, expected_orig",
[
[
True,
xr.DataArray(
xr.IndexVariable("a", np.array([1, 2])),
coords={"a": [1, 2]},
dims=["a"],
),
],
[
False,
xr.DataArray(
xr.IndexVariable("a", np.array([999, 2])),
coords={"a": [999, 2]},
dims=["a"],
),
],
],
)
def test_copy_coords(self, deep, expected_orig) -> None:
"""The test fails for the shallow copy, and apparently only on Windows
for some reason. In windows coords seem to be immutable unless it's one
dataarray deep copied from another."""
da = xr.DataArray(
np.ones([2, 2, 2]),
coords={"a": [1, 2], "b": ["x", "y"], "c": [0, 1]},
dims=["a", "b", "c"],
)
da_cp = da.copy(deep)
new_a = np.array([999, 2])
da_cp.coords["a"] = da_cp["a"].copy(data=new_a)
expected_cp = xr.DataArray(
xr.IndexVariable("a", np.array([999, 2])),
coords={"a": [999, 2]},
dims=["a"],
)
assert_identical(da_cp["a"], expected_cp)
assert_identical(da["a"], expected_orig)
def test_real_and_imag(self) -> None:
array = DataArray(1 + 2j)
assert_identical(array.real, DataArray(1))
assert_identical(array.imag, DataArray(2))
def test_setattr_raises(self) -> None:
array = DataArray(0, coords={"scalar": 1}, attrs={"foo": "bar"})
with pytest.raises(AttributeError, match=r"cannot set attr"):
array.scalar = 2
with pytest.raises(AttributeError, match=r"cannot set attr"):
array.foo = 2
with pytest.raises(AttributeError, match=r"cannot set attr"):
array.other = 2
def test_full_like(self) -> None:
# For more thorough tests, see test_variable.py
da = DataArray(
np.random.random(size=(2, 2)),
dims=("x", "y"),
attrs={"attr1": "value1"},
coords={"x": [4, 3]},
name="helloworld",
)
actual = full_like(da, 2)
expect = da.copy(deep=True)
expect.values = np.array([[2.0, 2.0], [2.0, 2.0]])
assert_identical(expect, actual)
# override dtype
actual = full_like(da, fill_value=True, dtype=bool)
expect.values = np.array([[True, True], [True, True]])
assert expect.dtype == bool
assert_identical(expect, actual)
with pytest.raises(ValueError, match="'dtype' cannot be dict-like"):
full_like(da, fill_value=True, dtype={"x": bool})
def test_dot(self) -> None:
x = np.linspace(-3, 3, 6)
y = np.linspace(-3, 3, 5)
z = range(4)
da_vals = np.arange(6 * 5 * 4).reshape((6, 5, 4))
da = DataArray(da_vals, coords=[x, y, z], dims=["x", "y", "z"])
dm_vals1 = range(4)
dm1 = DataArray(dm_vals1, coords=[z], dims=["z"])
# nd dot 1d
actual1 = da.dot(dm1)
expected_vals1 = np.tensordot(da_vals, dm_vals1, (2, 0))
expected1 = DataArray(expected_vals1, coords=[x, y], dims=["x", "y"])
assert_equal(expected1, actual1)
# all shared dims
actual2 = da.dot(da)
expected_vals2 = np.tensordot(da_vals, da_vals, axes=([0, 1, 2], [0, 1, 2]))
expected2 = DataArray(expected_vals2)
assert_equal(expected2, actual2)
# multiple shared dims
dm_vals3 = np.arange(20 * 5 * 4).reshape((20, 5, 4))
j = np.linspace(-3, 3, 20)
dm3 = DataArray(dm_vals3, coords=[j, y, z], dims=["j", "y", "z"])
actual3 = da.dot(dm3)
expected_vals3 = np.tensordot(da_vals, dm_vals3, axes=([1, 2], [1, 2]))
expected3 = DataArray(expected_vals3, coords=[x, j], dims=["x", "j"])
assert_equal(expected3, actual3)
# Ellipsis: all dims are shared
actual4 = da.dot(da, dim=...)
expected4 = da.dot(da)
assert_equal(expected4, actual4)
# Ellipsis: not all dims are shared
actual5 = da.dot(dm3, dim=...)
expected5 = da.dot(dm3, dim=("j", "x", "y", "z"))
assert_equal(expected5, actual5)
with pytest.raises(NotImplementedError):
da.dot(dm3.to_dataset(name="dm"))
with pytest.raises(TypeError):
da.dot(dm3.values) # type: ignore[type-var]
def test_dot_align_coords(self) -> None:
# GH 3694
x = np.linspace(-3, 3, 6)
y = np.linspace(-3, 3, 5)
z_a = range(4)
da_vals = np.arange(6 * 5 * 4).reshape((6, 5, 4))
da = DataArray(da_vals, coords=[x, y, z_a], dims=["x", "y", "z"])
z_m = range(2, 6)
dm_vals1 = range(4)
dm1 = DataArray(dm_vals1, coords=[z_m], dims=["z"])
with xr.set_options(arithmetic_join="exact"):
with pytest.raises(
ValueError, match=r"cannot align.*join.*exact.*not equal.*"
):
da.dot(dm1)
da_aligned, dm_aligned = xr.align(da, dm1, join="inner")
# nd dot 1d
actual1 = da.dot(dm1)
expected_vals1 = np.tensordot(da_aligned.values, dm_aligned.values, (2, 0))
expected1 = DataArray(expected_vals1, coords=[x, da_aligned.y], dims=["x", "y"])
assert_equal(expected1, actual1)
# multiple shared dims
dm_vals2 = np.arange(20 * 5 * 4).reshape((20, 5, 4))
j = np.linspace(-3, 3, 20)
dm2 = DataArray(dm_vals2, coords=[j, y, z_m], dims=["j", "y", "z"])
da_aligned, dm_aligned = xr.align(da, dm2, join="inner")
actual2 = da.dot(dm2)
expected_vals2 = np.tensordot(
da_aligned.values, dm_aligned.values, axes=([1, 2], [1, 2])
)
expected2 = DataArray(expected_vals2, coords=[x, j], dims=["x", "j"])
assert_equal(expected2, actual2)
def test_matmul(self) -> None:
# copied from above (could make a fixture)
x = np.linspace(-3, 3, 6)
y = np.linspace(-3, 3, 5)
z = range(4)
da_vals = np.arange(6 * 5 * 4).reshape((6, 5, 4))
da = DataArray(da_vals, coords=[x, y, z], dims=["x", "y", "z"])
result = da @ da
expected = da.dot(da)
assert_identical(result, expected)
def test_matmul_align_coords(self) -> None:
# GH 3694
x_a = np.arange(6)
x_b = np.arange(2, 8)
da_vals = np.arange(6)
da_a = DataArray(da_vals, coords=[x_a], dims=["x"])
da_b = DataArray(da_vals, coords=[x_b], dims=["x"])
# only test arithmetic_join="inner" (=default)
result = da_a @ da_b
expected = da_a.dot(da_b)
assert_identical(result, expected)
with xr.set_options(arithmetic_join="exact"):
with pytest.raises(
ValueError, match=r"cannot align.*join.*exact.*not equal.*"
):
da_a @ da_b
def test_binary_op_propagate_indexes(self) -> None:
# regression test for GH2227
self.dv["x"] = np.arange(self.dv.sizes["x"])
expected = self.dv.xindexes["x"]
actual = (self.dv * 10).xindexes["x"]
assert expected is actual
actual = (self.dv > 10).xindexes["x"]
assert expected is actual
# use mda for bitshift test as it's type int
actual = (self.mda << 2).xindexes["x"]
expected = self.mda.xindexes["x"]
assert expected is actual
def test_binary_op_join_setting(self) -> None:
dim = "x"
align_type: Final = "outer"
coords_l, coords_r = [0, 1, 2], [1, 2, 3]
missing_3 = xr.DataArray(coords_l, [(dim, coords_l)])
missing_0 = xr.DataArray(coords_r, [(dim, coords_r)])
with xr.set_options(arithmetic_join=align_type):
actual = missing_0 + missing_3
_missing_0_aligned, _missing_3_aligned = xr.align(
missing_0, missing_3, join=align_type
)
expected = xr.DataArray([np.nan, 2, 4, np.nan], [(dim, [0, 1, 2, 3])])
assert_equal(actual, expected)
def test_combine_first(self) -> None:
ar0 = DataArray([[0, 0], [0, 0]], [("x", ["a", "b"]), ("y", [-1, 0])])
ar1 = DataArray([[1, 1], [1, 1]], [("x", ["b", "c"]), ("y", [0, 1])])
ar2 = DataArray([2], [("x", ["d"])])
actual = ar0.combine_first(ar1)
expected = DataArray(
[[0, 0, np.nan], [0, 0, 1], [np.nan, 1, 1]],
[("x", ["a", "b", "c"]), ("y", [-1, 0, 1])],
)
assert_equal(actual, expected)
actual = ar1.combine_first(ar0)
expected = DataArray(
[[0, 0, np.nan], [0, 1, 1], [np.nan, 1, 1]],
[("x", ["a", "b", "c"]), ("y", [-1, 0, 1])],
)
assert_equal(actual, expected)
actual = ar0.combine_first(ar2)
expected = DataArray(
[[0, 0], [0, 0], [2, 2]], [("x", ["a", "b", "d"]), ("y", [-1, 0])]
)
assert_equal(actual, expected)
def test_sortby(self) -> None:
da = DataArray(
[[1, 2], [3, 4], [5, 6]], [("x", ["c", "b", "a"]), ("y", [1, 0])]
)
sorted1d = DataArray(
[[5, 6], [3, 4], [1, 2]], [("x", ["a", "b", "c"]), ("y", [1, 0])]
)
sorted2d = DataArray(
[[6, 5], [4, 3], [2, 1]], [("x", ["a", "b", "c"]), ("y", [0, 1])]
)
expected = sorted1d
dax = DataArray([100, 99, 98], [("x", ["c", "b", "a"])])
actual = da.sortby(dax)
assert_equal(actual, expected)
# test descending order sort
actual = da.sortby(dax, ascending=False)
assert_equal(actual, da)
# test alignment (fills in nan for 'c')
dax_short = DataArray([98, 97], [("x", ["b", "a"])])
actual = da.sortby(dax_short)
assert_equal(actual, expected)
# test multi-dim sort by 1D dataarray values
expected = sorted2d
dax = DataArray([100, 99, 98], [("x", ["c", "b", "a"])])
day = DataArray([90, 80], [("y", [1, 0])])
actual = da.sortby([day, dax])
assert_equal(actual, expected)
expected = sorted1d
actual = da.sortby("x")
assert_equal(actual, expected)
expected = sorted2d
actual = da.sortby(["x", "y"])
assert_equal(actual, expected)
@requires_bottleneck
def test_rank(self) -> None:
# floats
ar = DataArray([[3, 4, np.nan, 1]])
expect_0 = DataArray([[1, 1, np.nan, 1]])
expect_1 = DataArray([[2, 3, np.nan, 1]])
assert_equal(ar.rank("dim_0"), expect_0)
assert_equal(ar.rank("dim_1"), expect_1)
# int
x = DataArray([3, 2, 1])
assert_equal(x.rank("dim_0"), x)
# str
y = DataArray(["c", "b", "a"])
assert_equal(y.rank("dim_0"), x)
x = DataArray([3.0, 1.0, np.nan, 2.0, 4.0], dims=("z",))
y = DataArray([0.75, 0.25, np.nan, 0.5, 1.0], dims=("z",))
assert_equal(y.rank("z", pct=True), y)
@pytest.mark.parametrize("use_dask", [True, False])
@pytest.mark.parametrize("use_datetime", [True, False])
@pytest.mark.filterwarnings("ignore:overflow encountered in multiply")
def test_polyfit(self, use_dask, use_datetime) -> None:
if use_dask and not has_dask:
pytest.skip("requires dask")
xcoord = xr.DataArray(
pd.date_range("1970-01-01", freq="D", periods=10), dims=("x",), name="x"
)
x = xr.core.missing.get_clean_interp_index(xcoord, "x")
if not use_datetime:
xcoord = x
da_raw = DataArray(
np.stack((10 + 1e-15 * x + 2e-28 * x**2, 30 + 2e-14 * x + 1e-29 * x**2)),
dims=("d", "x"),
coords={"x": xcoord, "d": [0, 1]},
)
if use_dask:
da = da_raw.chunk({"d": 1})
else:
da = da_raw
out = da.polyfit("x", 2)
expected = DataArray(
[[2e-28, 1e-15, 10], [1e-29, 2e-14, 30]],
dims=("d", "degree"),
coords={"degree": [2, 1, 0], "d": [0, 1]},
).T
assert_allclose(out.polyfit_coefficients, expected, rtol=1e-3)
# Full output and deficient rank
with warnings.catch_warnings():
warnings.simplefilter("ignore", RankWarning)
out = da.polyfit("x", 12, full=True)
assert out.polyfit_residuals.isnull().all()
# With NaN
da_raw[0, 1:3] = np.nan
if use_dask:
da = da_raw.chunk({"d": 1})
else:
da = da_raw
out = da.polyfit("x", 2, skipna=True, cov=True)
assert_allclose(out.polyfit_coefficients, expected, rtol=1e-3)
assert "polyfit_covariance" in out
# Skipna + Full output
out = da.polyfit("x", 2, skipna=True, full=True)
assert_allclose(out.polyfit_coefficients, expected, rtol=1e-3)
assert out.x_matrix_rank == 3
np.testing.assert_almost_equal(out.polyfit_residuals, [0, 0])
with warnings.catch_warnings():
warnings.simplefilter("ignore", RankWarning)
out = da.polyfit("x", 8, full=True)
np.testing.assert_array_equal(out.polyfit_residuals.isnull(), [True, False])
@requires_dask
def test_polyfit_nd_dask(self) -> None:
da = (
DataArray(np.arange(120), dims="time", coords={"time": np.arange(120)})
.chunk({"time": 20})
.expand_dims(lat=5, lon=5)
.chunk({"lat": 2, "lon": 2})
)
actual = da.polyfit("time", 1, skipna=False)
expected = da.compute().polyfit("time", 1, skipna=False)
assert_allclose(actual, expected)
def test_pad_constant(self) -> None:
ar = DataArray(np.arange(3 * 4 * 5).reshape(3, 4, 5))
actual = ar.pad(dim_0=(1, 3))
expected = DataArray(
np.pad(
np.arange(3 * 4 * 5).reshape(3, 4, 5).astype(np.float32),
mode="constant",
pad_width=((1, 3), (0, 0), (0, 0)),
constant_values=np.nan,
)
)
assert actual.shape == (7, 4, 5)
assert_identical(actual, expected)
ar = xr.DataArray([9], dims="x")
actual = ar.pad(x=1)
expected = xr.DataArray([np.nan, 9, np.nan], dims="x")
assert_identical(actual, expected)
actual = ar.pad(x=1, constant_values=1.23456)
expected = xr.DataArray([1, 9, 1], dims="x")
assert_identical(actual, expected)
with pytest.raises(ValueError, match="cannot convert float NaN to integer"):
ar.pad(x=1, constant_values=np.nan)
def test_pad_coords(self) -> None:
ar = DataArray(
np.arange(3 * 4 * 5).reshape(3, 4, 5),
[("x", np.arange(3)), ("y", np.arange(4)), ("z", np.arange(5))],
)
actual = ar.pad(x=(1, 3), constant_values=1)
expected = DataArray(
np.pad(
np.arange(3 * 4 * 5).reshape(3, 4, 5),
mode="constant",
pad_width=((1, 3), (0, 0), (0, 0)),
constant_values=1,
),
[
(
"x",
np.pad(
np.arange(3).astype(np.float32),
mode="constant",
pad_width=(1, 3),
constant_values=np.nan,
),
),
("y", np.arange(4)),
("z", np.arange(5)),
],
)
assert_identical(actual, expected)
@pytest.mark.parametrize("mode", ("minimum", "maximum", "mean", "median"))
@pytest.mark.parametrize(
"stat_length", (None, 3, (1, 3), {"dim_0": (2, 1), "dim_2": (4, 2)})
)
def test_pad_stat_length(self, mode, stat_length) -> None:
ar = DataArray(np.arange(3 * 4 * 5).reshape(3, 4, 5))
actual = ar.pad(dim_0=(1, 3), dim_2=(2, 2), mode=mode, stat_length=stat_length)
if isinstance(stat_length, dict):
stat_length = (stat_length["dim_0"], (4, 4), stat_length["dim_2"])
expected = DataArray(
np.pad(
np.arange(3 * 4 * 5).reshape(3, 4, 5),
pad_width=((1, 3), (0, 0), (2, 2)),
mode=mode,
stat_length=stat_length,
)
)
assert actual.shape == (7, 4, 9)
assert_identical(actual, expected)
@pytest.mark.parametrize(
"end_values", (None, 3, (3, 5), {"dim_0": (2, 1), "dim_2": (4, 2)})
)
def test_pad_linear_ramp(self, end_values) -> None:
ar = DataArray(np.arange(3 * 4 * 5).reshape(3, 4, 5))
actual = ar.pad(
dim_0=(1, 3), dim_2=(2, 2), mode="linear_ramp", end_values=end_values
)
if end_values is None:
end_values = 0
elif isinstance(end_values, dict):
end_values = (end_values["dim_0"], (4, 4), end_values["dim_2"])
expected = DataArray(
np.pad(
np.arange(3 * 4 * 5).reshape(3, 4, 5),
pad_width=((1, 3), (0, 0), (2, 2)),
mode="linear_ramp",
end_values=end_values,
)
)
assert actual.shape == (7, 4, 9)
assert_identical(actual, expected)
@pytest.mark.parametrize("mode", ("reflect", "symmetric"))
@pytest.mark.parametrize("reflect_type", (None, "even", "odd"))
def test_pad_reflect(self, mode, reflect_type) -> None:
ar = DataArray(np.arange(3 * 4 * 5).reshape(3, 4, 5))
actual = ar.pad(
dim_0=(1, 3), dim_2=(2, 2), mode=mode, reflect_type=reflect_type
)
np_kwargs = {
"array": np.arange(3 * 4 * 5).reshape(3, 4, 5),
"pad_width": ((1, 3), (0, 0), (2, 2)),
"mode": mode,
}
# numpy does not support reflect_type=None
if reflect_type is not None:
np_kwargs["reflect_type"] = reflect_type
expected = DataArray(np.pad(**np_kwargs))
assert actual.shape == (7, 4, 9)
assert_identical(actual, expected)
@pytest.mark.parametrize(
["keep_attrs", "attrs", "expected"],
[
pytest.param(None, {"a": 1, "b": 2}, {"a": 1, "b": 2}, id="default"),
pytest.param(False, {"a": 1, "b": 2}, {}, id="False"),
pytest.param(True, {"a": 1, "b": 2}, {"a": 1, "b": 2}, id="True"),
],
)
def test_pad_keep_attrs(self, keep_attrs, attrs, expected) -> None:
arr = xr.DataArray(
[1, 2], dims="x", coords={"c": ("x", [-1, 1], attrs)}, attrs=attrs
)
expected = xr.DataArray(
[0, 1, 2, 0],
dims="x",
coords={"c": ("x", [np.nan, -1, 1, np.nan], expected)},
attrs=expected,
)
keep_attrs_ = "default" if keep_attrs is None else keep_attrs
with set_options(keep_attrs=keep_attrs_):
actual = arr.pad({"x": (1, 1)}, mode="constant", constant_values=0)
xr.testing.assert_identical(actual, expected)
actual = arr.pad(
{"x": (1, 1)}, mode="constant", constant_values=0, keep_attrs=keep_attrs
)
xr.testing.assert_identical(actual, expected)
@pytest.mark.parametrize("parser", ["pandas", "python"])
@pytest.mark.parametrize(
"engine", ["python", None, pytest.param("numexpr", marks=[requires_numexpr])]
)
@pytest.mark.parametrize(
"backend", ["numpy", pytest.param("dask", marks=[requires_dask])]
)
def test_query(
self, backend, engine: QueryEngineOptions, parser: QueryParserOptions
) -> None:
"""Test querying a dataset."""
# setup test data
np.random.seed(42)
a = np.arange(0, 10, 1)
b = np.random.randint(0, 100, size=10)
c = np.linspace(0, 1, 20)
d = np.random.choice(["foo", "bar", "baz"], size=30, replace=True).astype(
object
)
aa = DataArray(data=a, dims=["x"], name="a", coords={"a2": ("x", a)})
bb = DataArray(data=b, dims=["x"], name="b", coords={"b2": ("x", b)})
cc = DataArray(data=c, dims=["y"], name="c", coords={"c2": ("y", c)})
dd = DataArray(data=d, dims=["z"], name="d", coords={"d2": ("z", d)})
if backend == "dask":
import dask.array as da
aa = aa.copy(data=da.from_array(a, chunks=3))
bb = bb.copy(data=da.from_array(b, chunks=3))
cc = cc.copy(data=da.from_array(c, chunks=7))
dd = dd.copy(data=da.from_array(d, chunks=12))
# query single dim, single variable
with raise_if_dask_computes():
actual = aa.query(x="a2 > 5", engine=engine, parser=parser)
expect = aa.isel(x=(a > 5))
assert_identical(expect, actual)
# query single dim, single variable, via dict
with raise_if_dask_computes():
actual = aa.query(dict(x="a2 > 5"), engine=engine, parser=parser)
expect = aa.isel(dict(x=(a > 5)))
assert_identical(expect, actual)
# query single dim, single variable
with raise_if_dask_computes():
actual = bb.query(x="b2 > 50", engine=engine, parser=parser)
expect = bb.isel(x=(b > 50))
assert_identical(expect, actual)
# query single dim, single variable
with raise_if_dask_computes():
actual = cc.query(y="c2 < .5", engine=engine, parser=parser)
expect = cc.isel(y=(c < 0.5))
assert_identical(expect, actual)
# query single dim, single string variable
if parser == "pandas":
# N.B., this query currently only works with the pandas parser
# xref https://github.com/pandas-dev/pandas/issues/40436
with raise_if_dask_computes():
actual = dd.query(z='d2 == "bar"', engine=engine, parser=parser)
expect = dd.isel(z=(d == "bar"))
assert_identical(expect, actual)
# test error handling
with pytest.raises(ValueError):
aa.query("a > 5") # type: ignore[arg-type] # must be dict or kwargs
with pytest.raises(ValueError):
aa.query(x=(a > 5)) # must be query string
with pytest.raises(UndefinedVariableError):
aa.query(x="spam > 50") # name not present
@requires_scipy
@pytest.mark.parametrize("use_dask", [True, False])
def test_curvefit(self, use_dask) -> None:
if use_dask and not has_dask:
pytest.skip("requires dask")
def exp_decay(t, n0, tau=1):
return n0 * np.exp(-t / tau)
t = np.arange(0, 5, 0.5)
da = DataArray(
np.stack([exp_decay(t, 3, 3), exp_decay(t, 5, 4), np.nan * t], axis=-1),
dims=("t", "x"),
coords={"t": t, "x": [0, 1, 2]},
)
da[0, 0] = np.nan
expected = DataArray(
[[3, 3], [5, 4], [np.nan, np.nan]],
dims=("x", "param"),
coords={"x": [0, 1, 2], "param": ["n0", "tau"]},
)
if use_dask:
da = da.chunk({"x": 1})
fit = da.curvefit(
coords=[da.t], func=exp_decay, p0={"n0": 4}, bounds={"tau": (2, 6)}
)
assert_allclose(fit.curvefit_coefficients, expected, rtol=1e-3)
da = da.compute()
fit = da.curvefit(coords="t", func=np.power, reduce_dims="x", param_names=["a"])
assert "a" in fit.param
assert "x" not in fit.dims
def test_curvefit_helpers(self) -> None:
def exp_decay(t, n0, tau=1):
return n0 * np.exp(-t / tau)
from xarray.computation.fit import _get_func_args, _initialize_curvefit_params
params, func_args = _get_func_args(exp_decay, [])
assert params == ["n0", "tau"]
param_defaults, bounds_defaults = _initialize_curvefit_params(
params, {"n0": 4}, {"tau": [5, np.inf]}, func_args
)
assert param_defaults == {"n0": 4, "tau": 6}
assert bounds_defaults == {"n0": (-np.inf, np.inf), "tau": (5, np.inf)}
# DataArray as bound
param_defaults, bounds_defaults = _initialize_curvefit_params(
params=params,
p0={"n0": 4},
bounds={"tau": [DataArray([3, 4], coords=[("x", [1, 2])]), np.inf]},
func_args=func_args,
)
assert param_defaults["n0"] == 4
assert (
param_defaults["tau"] == xr.DataArray([4, 5], coords=[("x", [1, 2])])
).all()
assert bounds_defaults["n0"] == (-np.inf, np.inf)
assert (
bounds_defaults["tau"][0] == DataArray([3, 4], coords=[("x", [1, 2])])
).all()
assert bounds_defaults["tau"][1] == np.inf
param_names = ["a"]
params, func_args = _get_func_args(np.power, param_names)
assert params == param_names
with pytest.raises(ValueError):
_get_func_args(np.power, [])
@requires_scipy
@pytest.mark.parametrize("use_dask", [True, False])
def test_curvefit_multidimensional_guess(self, use_dask: bool) -> None:
if use_dask and not has_dask:
pytest.skip("requires dask")
def sine(t, a, f, p):
return a * np.sin(2 * np.pi * (f * t + p))
t = np.arange(0, 2, 0.02)
da = DataArray(
np.stack([sine(t, 1.0, 2, 0), sine(t, 1.0, 2, 0)]),
coords={"x": [0, 1], "t": t},
)
# Fitting to a sine curve produces a different result depending on the
# initial guess: either the phase is zero and the amplitude is positive
# or the phase is 0.5 * 2pi and the amplitude is negative.
expected = DataArray(
[[1, 2, 0], [-1, 2, 0.5]],
coords={"x": [0, 1], "param": ["a", "f", "p"]},
)
# Different initial guesses for different values of x
a_guess = DataArray([1, -1], coords=[da.x])
p_guess = DataArray([0, 0.5], coords=[da.x])
if use_dask:
da = da.chunk({"x": 1})
fit = da.curvefit(
coords=[da.t],
func=sine,
p0={"a": a_guess, "p": p_guess, "f": 2},
)
assert_allclose(fit.curvefit_coefficients, expected)
with pytest.raises(
ValueError,
match=r"Initial guess for 'a' has unexpected dimensions .* should only have "
"dimensions that are in data dimensions",
):
# initial guess with additional dimensions should be an error
da.curvefit(
coords=[da.t],
func=sine,
p0={"a": DataArray([1, 2], coords={"foo": [1, 2]})},
)
@requires_scipy
@pytest.mark.parametrize("use_dask", [True, False])
def test_curvefit_multidimensional_bounds(self, use_dask: bool) -> None:
if use_dask and not has_dask:
pytest.skip("requires dask")
def sine(t, a, f, p):
return a * np.sin(2 * np.pi * (f * t + p))
t = np.arange(0, 2, 0.02)
da = xr.DataArray(
np.stack([sine(t, 1.0, 2, 0), sine(t, 1.0, 2, 0)]),
coords={"x": [0, 1], "t": t},
)
# Fit a sine with different bounds: positive amplitude should result in a fit with
# phase 0 and negative amplitude should result in phase 0.5 * 2pi.
expected = DataArray(
[[1, 2, 0], [-1, 2, 0.5]],
coords={"x": [0, 1], "param": ["a", "f", "p"]},
)
if use_dask:
da = da.chunk({"x": 1})
fit = da.curvefit(
coords=[da.t],
func=sine,
p0={"f": 2, "p": 0.25}, # this guess is needed to get the expected result
bounds={
"a": (
DataArray([0, -2], coords=[da.x]),
DataArray([2, 0], coords=[da.x]),
),
},
)
assert_allclose(fit.curvefit_coefficients, expected)
# Scalar lower bound with array upper bound
fit2 = da.curvefit(
coords=[da.t],
func=sine,
p0={"f": 2, "p": 0.25}, # this guess is needed to get the expected result
bounds={
"a": (-2, DataArray([2, 0], coords=[da.x])),
},
)
assert_allclose(fit2.curvefit_coefficients, expected)
with pytest.raises(
ValueError,
match=r"Upper bound for 'a' has unexpected dimensions .* should only have "
"dimensions that are in data dimensions",
):
# bounds with additional dimensions should be an error
da.curvefit(
coords=[da.t],
func=sine,
bounds={"a": (0, DataArray([1], coords={"foo": [1]}))},
)
@requires_scipy
@pytest.mark.parametrize("use_dask", [True, False])
def test_curvefit_ignore_errors(self, use_dask: bool) -> None:
if use_dask and not has_dask:
pytest.skip("requires dask")
# nonsense function to make the optimization fail
def line(x, a, b):
if a > 10:
return 0
return a * x + b
da = DataArray(
[[1, 3, 5], [0, 20, 40]],
coords={"i": [1, 2], "x": [0.0, 1.0, 2.0]},
)
if use_dask:
da = da.chunk({"i": 1})
expected = DataArray(
[[2, 1], [np.nan, np.nan]], coords={"i": [1, 2], "param": ["a", "b"]}
)
with pytest.raises(RuntimeError, match="calls to function has reached maxfev"):
da.curvefit(
coords="x",
func=line,
# limit maximum number of calls so the optimization fails
kwargs=dict(maxfev=5),
).compute() # have to compute to raise the error
fit = da.curvefit(
coords="x",
func=line,
errors="ignore",
# limit maximum number of calls so the optimization fails
kwargs=dict(maxfev=5),
).compute()
assert_allclose(fit.curvefit_coefficients, expected)
| TestDataArray |
python | django-extensions__django-extensions | tests/management/commands/test_set_default_site.py | {
"start": 320,
"end": 4153
} | class ____(TestCase):
"""Tests for set_default_site command."""
@override_settings(SITE_ID=321)
def test_should_raise_CommandError_when_Site_object_does_not_exist(self):
with self.assertRaisesRegex(
CommandError, "Default site with pk=321 does not exist"
):
call_command("set_default_site")
@patch("django_extensions.management.commands.set_default_site.socket")
def test_should_raise_CommandError_if_system_fqdn_return_None(self, m_socket):
m_socket.getfqdn.return_value = None
with self.assertRaisesRegex(CommandError, "Cannot find systems FQDN"):
call_command("set_default_site", "--system-fqdn")
def test_should_raise_CommandError_if_both_domain_and_set_as_system_fqdn_are_present(
self,
):
with self.assertRaisesRegex(
CommandError, "The set_as_system_fqdn cannot be used with domain option."
):
call_command("set_default_site", "--domain=foo", "--system-fqdn")
@override_settings(
INSTALLED_APPS=[
app for app in settings.INSTALLED_APPS if app != "django.contrib.sites"
]
)
def test_should_raise_CommandError_Sites_framework_not_installed(self):
with self.assertRaisesRegex(
CommandError, "The sites framework is not installed."
):
call_command("set_default_site", "--domain=foo", "--system-fqdn")
@patch("sys.stdout", new_callable=StringIO)
def test_should_print_Nothing_to_update(self, m_stdout):
call_command("set_default_site")
self.assertIn(
"Nothing to update (need --name, --domain and/or --system-fqdn)\n",
m_stdout.getvalue(),
)
@patch("django_extensions.management.commands.set_default_site.socket")
def test_should_use_domain_as_name_if_system_fqdn_return_domain_and_name_is_not_provided(
self, m_socket
):
m_socket.getfqdn.return_value = "test.com"
call_command("set_default_site", "--system-fqdn")
result = Site.objects.get(pk=settings.SITE_ID)
self.assertEqual(result.name, "test.com")
self.assertEqual(result.domain, "test.com")
@patch("django_extensions.management.commands.set_default_site.socket")
def test_should_set_custom_nameif_system_fqdn_return_domain_and_name_is_provided(
self, m_socket
):
m_socket.getfqdn.return_value = "test.com"
call_command("set_default_site", "--system-fqdn", "--name=foo")
result = Site.objects.get(pk=settings.SITE_ID)
self.assertEqual(result.name, "foo")
self.assertEqual(result.domain, "test.com")
def test_should_set_name_and_domain_if_provided(self):
call_command("set_default_site", "--name=foo", "--domain=bar")
result = Site.objects.get(pk=settings.SITE_ID)
self.assertEqual(result.name, "foo")
self.assertEqual(result.domain, "bar")
def test_should_set_name_only(self):
call_command("set_default_site", "--name=foo")
result = Site.objects.get(pk=settings.SITE_ID)
self.assertEqual(result.name, "foo")
self.assertEqual(result.domain, "example.com")
def test_should_set_domain_only(self):
call_command("set_default_site", "--domain=bar")
result = Site.objects.get(pk=settings.SITE_ID)
self.assertEqual(result.name, "example.com")
self.assertEqual(result.domain, "bar")
def test_should_not_raise_if_sites_installed_through_appconfig(self):
with self.modify_settings(
INSTALLED_APPS={
"append": "django.contrib.sites.apps.SitesConfig",
"remove": "django.contrib.sites",
}
):
call_command("set_default_site", "--name=foo", "--domain=foo.bar")
| SetDefaultSiteTests |
python | doocs__leetcode | solution/3000-3099/3080.Mark Elements on Array by Performing Queries/Solution.py | {
"start": 0,
"end": 640
} | class ____:
def unmarkedSumArray(self, nums: List[int], queries: List[List[int]]) -> List[int]:
n = len(nums)
s = sum(nums)
mark = [False] * n
arr = sorted((x, i) for i, x in enumerate(nums))
j = 0
ans = []
for index, k in queries:
if not mark[index]:
mark[index] = True
s -= nums[index]
while k and j < n:
if not mark[arr[j][1]]:
mark[arr[j][1]] = True
s -= arr[j][0]
k -= 1
j += 1
ans.append(s)
return ans
| Solution |
python | openai__openai-python | src/openai/types/chat/chat_completion_custom_tool_param.py | {
"start": 391,
"end": 529
} | class ____(TypedDict, total=False):
type: Required[Literal["text"]]
"""Unconstrained text format. Always `text`."""
| CustomFormatText |
python | huggingface__transformers | src/transformers/models/jetmoe/modular_jetmoe.py | {
"start": 1830,
"end": 1878
} | class ____(MixtralRMSNorm):
pass
| JetMoeRMSNorm |
python | getsentry__sentry | tests/sentry/monitors/consumers/test_end_to_end.py | {
"start": 1392,
"end": 1711
} | class ____:
pass
partition = Partition(Topic("test"), 0)
def create_consumer():
factory = StoreMonitorCheckInStrategyFactory()
commit = mock.Mock()
return factory.create_with_partitions(commit, {partition: 0})
@thread_leaks.thread_leak_allowlist(reason="monitors", issue=97032)
| ExpectNoProcessingError |
python | kamyu104__LeetCode-Solutions | Python/calculate-score-after-performing-instructions.py | {
"start": 42,
"end": 598
} | class ____(object):
def calculateScore(self, instructions, values):
"""
:type instructions: List[str]
:type values: List[int]
:rtype: int
"""
result = 0
lookup = [False]*len(instructions)
i = 0
while 0 <= i < len(instructions):
if lookup[i]:
break
lookup[i] = True
if instructions[i] == "add":
result += values[i]
i += 1
else:
i += values[i]
return result
| Solution |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/coinbase/tests.py | {
"start": 244,
"end": 1024
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = CoinbaseProvider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""{
"id": "9da7a204-544e-5fd1-9a12-61176c5d4cd8",
"name": "User One",
"username": "user1",
"email": "user1@example.com",
"profile_location": null,
"profile_bio": null,
"profile_url": "https://coinbase.com/user1",
"avatar_url": "https://images.coinbase.com/avatar?h=vR%2FY8igBoPwuwGren5JMwvDNGpURAY%2F0nRIOgH%2FY2Qh%2BQ6nomR3qusA%2Bh6o2%0Af9rH&s=128",
"resource": "user",
"resource_path": "/v2/user"
}""",
)
def get_expected_to_str(self):
return "user1"
| CoinbaseTests |
python | ansible__ansible | lib/ansible/_internal/_templating/_jinja_bits.py | {
"start": 12053,
"end": 12718
} | class ____:
"""
Wrapper around Jinja's TemplateExpression for converting MarkerError back into Marker.
This is needed to make expression error handling consistent with templates, since Jinja does not support a custom type for Environment.compile_expression.
"""
def __init__(self, template_expression: TemplateExpression) -> None:
self._template_expression = template_expression
def __call__(self, jinja_vars: c.Mapping[str, t.Any]) -> t.Any:
try:
return self._template_expression(ArgSmuggler.package_jinja_vars(jinja_vars))
except MarkerError as ex:
return ex.source
| AnsibleTemplateExpression |
python | ray-project__ray | python/ray/tune/search/optuna/optuna_search.py | {
"start": 1285,
"end": 2336
} | class ____:
"""Utility to capture returned values from Optuna's suggest_ methods.
This will wrap around the ``optuna.Trial` object and decorate all
`suggest_` callables with a function capturing the returned value,
which will be saved in the ``captured_values`` dict.
"""
def __init__(self, ot_trial: OptunaTrial) -> None:
self.ot_trial = ot_trial
self.captured_values: Dict[str, Any] = {}
def _get_wrapper(self, func: Callable) -> Callable:
@functools.wraps(func)
def wrapper(*args, **kwargs):
# name is always the first arg for suggest_ methods
name = kwargs.get("name", args[0])
ret = func(*args, **kwargs)
self.captured_values[name] = ret
return ret
return wrapper
def __getattr__(self, item_name: str) -> Any:
item = getattr(self.ot_trial, item_name)
if item_name.startswith("suggest_") and callable(item):
return self._get_wrapper(item)
return item
| _OptunaTrialSuggestCaptor |
python | django__django | django/contrib/postgres/fields/ranges.py | {
"start": 5648,
"end": 5867
} | class ____(ContinuousRangeField):
base_field = models.DecimalField
range_type = NumericRange
form_field = forms.DecimalRangeField
def db_type(self, connection):
return "numrange"
| DecimalRangeField |
python | kamyu104__LeetCode-Solutions | Python/vowels-game-in-a-string.py | {
"start": 36,
"end": 201
} | class ____(object):
def doesAliceWin(self, s):
"""
:type s: str
:rtype: bool
"""
return any(x in "aeiou" for x in s)
| Solution |
python | huggingface__transformers | tests/models/pix2struct/test_processing_pix2struct.py | {
"start": 930,
"end": 8911
} | class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = Pix2StructProcessor
text_input_name = "decoder_input_ids"
images_input_name = "flattened_patches"
@classmethod
def _setup_tokenizer(cls):
tokenizer_class = cls._get_component_class_from_processor("tokenizer")
return tokenizer_class.from_pretrained("google-t5/t5-small")
def test_processor_max_patches(self):
processor = self.get_processor()
input_str = self.prepare_text_inputs()
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
max_patches = [512, 1024, 2048, 4096]
expected_hidden_size = [770, 770, 770, 770]
# with text
for i, max_patch in enumerate(max_patches):
inputs = processor(text=input_str, images=image_input, max_patches=max_patch)
self.assertEqual(inputs["flattened_patches"][0].shape[0], max_patch)
self.assertEqual(inputs["flattened_patches"][0].shape[1], expected_hidden_size[i])
# without text input
for i, max_patch in enumerate(max_patches):
inputs = processor(images=image_input, max_patches=max_patch)
self.assertEqual(inputs["flattened_patches"][0].shape[0], max_patch)
self.assertEqual(inputs["flattened_patches"][0].shape[1], expected_hidden_size[i])
@require_torch
@require_vision
def test_image_processor_defaults_preserved_by_image_kwargs(self):
# Rewrite as pix2struct processor return "flattened_patches" and not "pixel_values"
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor", max_patches=1024, patch_size={"height": 8, "width": 8})
print("image_processor", image_processor)
tokenizer = self.get_component("tokenizer", max_length=117, padding="max_length")
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertEqual(len(inputs["flattened_patches"][0][0]), 194)
@require_torch
@require_vision
def test_kwargs_overrides_default_image_processor_kwargs(self):
# Rewrite as pix2struct processor return "flattened_patches" and not "pixel_values"
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor", max_patches=4096)
tokenizer = self.get_component("tokenizer", max_length=117, padding="max_length")
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input, max_patches=1024)
self.assertEqual(len(inputs["flattened_patches"][0]), 1024)
@require_torch
@require_vision
def test_unstructured_kwargs(self):
# Rewrite as pix2struct processor return "decoder_input_ids" and not "input_ids"
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
image_input = self.prepare_image_inputs()
inputs = processor(
text=input_str,
images=image_input,
return_tensors="pt",
max_patches=1024,
padding="max_length",
max_length=76,
)
self.assertEqual(inputs["flattened_patches"].shape[1], 1024)
self.assertEqual(len(inputs["decoder_input_ids"][0]), 76)
@require_torch
@require_vision
def test_unstructured_kwargs_batched(self):
# Rewrite as pix2struct processor return "decoder_input_ids" and not "input_ids"
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(batch_size=2)
image_input = self.prepare_image_inputs(batch_size=2)
inputs = processor(
text=input_str,
images=image_input,
return_tensors="pt",
max_patches=1024,
padding="longest",
max_length=76,
)
self.assertEqual(inputs["flattened_patches"].shape[1], 1024)
self.assertEqual(len(inputs["decoder_input_ids"][0]), 5)
@require_torch
@require_vision
def test_structured_kwargs_nested(self):
# Rewrite as pix2struct processor return "decoder_input_ids" and not "input_ids"
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
image_input = self.prepare_image_inputs()
# Define the kwargs for each modality
all_kwargs = {
"common_kwargs": {"return_tensors": "pt"},
"images_kwargs": {"max_patches": 1024},
"text_kwargs": {"padding": "max_length", "max_length": 76},
}
inputs = processor(text=input_str, images=image_input, **all_kwargs)
self.skip_processor_without_typed_kwargs(processor)
self.assertEqual(inputs["flattened_patches"].shape[1], 1024)
self.assertEqual(len(inputs["decoder_input_ids"][0]), 76)
@require_torch
@require_vision
def test_structured_kwargs_nested_from_dict(self):
# Rewrite as pix2struct processor return "decoder_input_ids" and not "input_ids"
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
image_input = self.prepare_image_inputs()
# Define the kwargs for each modality
all_kwargs = {
"common_kwargs": {"return_tensors": "pt"},
"images_kwargs": {"max_patches": 1024},
"text_kwargs": {"padding": "max_length", "max_length": 76},
}
inputs = processor(text=input_str, images=image_input, **all_kwargs)
self.assertEqual(inputs["flattened_patches"].shape[1], 1024)
self.assertEqual(len(inputs["decoder_input_ids"][0]), 76)
| Pix2StructProcessorTest |
python | apache__airflow | providers/edge3/src/airflow/providers/edge3/worker_api/datamodels_ui.py | {
"start": 1128,
"end": 1534
} | class ____(WorkerStateBody):
"""Details of the worker state sent to the scheduler."""
worker_name: Annotated[str, Field(description="Name of the worker.")]
first_online: Annotated[datetime | None, Field(description="When the worker was first online.")] = None
last_heartbeat: Annotated[
datetime | None, Field(description="When the worker last sent a heartbeat.")
] = None
| Worker |
python | scrapy__scrapy | tests/test_downloadermiddleware_httpcache.py | {
"start": 25267,
"end": 25509
} | class ____(
TestBase, StorageTestMixin, RFC2616PolicyTestMixin
):
storage_class = "scrapy.extensions.httpcache.FilesystemCacheStorage"
policy_class = "scrapy.extensions.httpcache.RFC2616Policy"
| TestFilesystemStorageWithRFC2616Policy |
python | joke2k__faker | tests/providers/test_phone_number.py | {
"start": 19986,
"end": 20075
} | class ____(TestEnPh):
"""Test tl_PH phone number provider methods"""
pass
| TestTlPh |
python | coleifer__peewee | playhouse/sqlite_ext.py | {
"start": 8448,
"end": 9748
} | class ____(Field):
def __init__(self, unindexed=False, column_name=None, **k):
if k:
raise ValueError('SearchField does not accept these keyword '
'arguments: %s.' % sorted(k))
super(SearchField, self).__init__(unindexed=unindexed,
column_name=column_name, null=True)
def match(self, term):
return match(self, term)
@property
def fts_column_index(self):
if not hasattr(self, '_fts_column_index'):
search_fields = [f.name for f in self.model._meta.sorted_fields
if isinstance(f, SearchField)]
self._fts_column_index = search_fields.index(self.name)
return self._fts_column_index
def highlight(self, left, right):
column_idx = self.fts_column_index
return fn.highlight(self.model._meta.entity, column_idx, left, right)
def snippet(self, left, right, over_length='...', max_tokens=16):
if not (0 < max_tokens < 65):
raise ValueError('max_tokens must be between 1 and 64 (inclusive)')
column_idx = self.fts_column_index
return fn.snippet(self.model._meta.entity, column_idx, left, right,
over_length, max_tokens)
| SearchField |
python | ray-project__ray | python/ray/llm/_internal/common/utils/cloud_utils.py | {
"start": 1093,
"end": 2397
} | class ____(BaseModelExtended):
"""Unified mirror config for cloud storage (S3, GCS, or Azure).
Args:
bucket_uri: URI of the bucket (s3://, gs://, abfss://, or azure://)
extra_files: Additional files to download
"""
bucket_uri: Optional[str] = None
extra_files: List[ExtraFiles] = Field(default_factory=list)
@field_validator("bucket_uri")
@classmethod
def check_uri_format(cls, value):
if value is None:
return value
if not is_remote_path(value):
raise ValueError(
f'Got invalid value "{value}" for bucket_uri. '
'Expected a URI that starts with "s3://", "gs://", "abfss://", or "azure://".'
)
return value
@property
def storage_type(self) -> str:
"""Returns the storage type ('s3', 'gcs', 'abfss', or 'azure') based on the URI prefix."""
if self.bucket_uri is None:
return None
elif self.bucket_uri.startswith("s3://"):
return "s3"
elif self.bucket_uri.startswith("gs://"):
return "gcs"
elif self.bucket_uri.startswith("abfss://"):
return "abfss"
elif self.bucket_uri.startswith("azure://"):
return "azure"
return None
| CloudMirrorConfig |
python | doocs__leetcode | solution/2400-2499/2499.Minimum Total Cost to Make Arrays Unequal/Solution.py | {
"start": 0,
"end": 651
} | class ____:
def minimumTotalCost(self, nums1: List[int], nums2: List[int]) -> int:
ans = same = 0
cnt = Counter()
for i, (a, b) in enumerate(zip(nums1, nums2)):
if a == b:
same += 1
ans += i
cnt[a] += 1
m = lead = 0
for k, v in cnt.items():
if v * 2 > same:
m = v * 2 - same
lead = k
break
for i, (a, b) in enumerate(zip(nums1, nums2)):
if m and a != b and a != lead and b != lead:
ans += i
m -= 1
return -1 if m else ans
| Solution |
python | pytorch__pytorch | torch/_inductor/autoheuristic/learnedheuristic_interface.py | {
"start": 156,
"end": 790
} | class ____:
"""
LearnedHeuristic is a base class for all learned heuristics.
"""
def __init__(self) -> None:
pass
def check_precondition(
self,
metadata: AHMetadata,
context: AHContext,
) -> bool:
return True
def get_decision(
self, context: AHContext, choices: list[Choice]
) -> Optional[Choice]:
return None
def get_confidence_threshold(self) -> float:
return 1.0
def get_name(self) -> str:
return ""
def get_decisions_ranked(self, context: AHContext) -> Optional[list[str]]:
return None
| LearnedHeuristic |
python | Lightning-AI__lightning | examples/pytorch/domain_templates/imagenet.py | {
"start": 1846,
"end": 7286
} | class ____(LightningModule):
"""
>>> ImageNetLightningModel(data_path='missing') # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ImageNetLightningModel(
(model): ResNet(...)
)
"""
def __init__(
self,
data_path: str,
arch: str = "resnet18",
weights: Optional[str] = None,
lr: float = 0.1,
momentum: float = 0.9,
weight_decay: float = 1e-4,
batch_size: int = 256,
workers: int = 4,
):
super().__init__()
self.arch = arch
self.weights = weights
self.lr = lr
self.momentum = momentum
self.weight_decay = weight_decay
self.data_path = data_path
self.batch_size = batch_size
self.workers = workers
self.model = get_torchvision_model(self.arch, weights=self.weights)
self.train_dataset: Optional[Dataset] = None
self.eval_dataset: Optional[Dataset] = None
# ToDo: this number of classes hall be parsed when the dataset is loaded from folder
self.train_acc1 = Accuracy(task="multiclass", num_classes=1000, top_k=1)
self.train_acc5 = Accuracy(task="multiclass", num_classes=1000, top_k=5)
self.eval_acc1 = Accuracy(task="multiclass", num_classes=1000, top_k=1)
self.eval_acc5 = Accuracy(task="multiclass", num_classes=1000, top_k=5)
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
images, target = batch
output = self.model(images)
loss_train = F.cross_entropy(output, target)
self.log("train_loss", loss_train)
# update metrics
self.train_acc1(output, target)
self.train_acc5(output, target)
self.log("train_acc1", self.train_acc1, prog_bar=True)
self.log("train_acc5", self.train_acc5, prog_bar=True)
return loss_train
def eval_step(self, batch, batch_idx, prefix: str):
images, target = batch
output = self.model(images)
loss_val = F.cross_entropy(output, target)
self.log(f"{prefix}_loss", loss_val)
# update metrics
self.eval_acc1(output, target)
self.eval_acc5(output, target)
self.log(f"{prefix}_acc1", self.eval_acc1, prog_bar=True)
self.log(f"{prefix}_acc5", self.eval_acc5, prog_bar=True)
return loss_val
def validation_step(self, batch, batch_idx):
return self.eval_step(batch, batch_idx, "val")
def test_step(self, batch, batch_idx):
return self.eval_step(batch, batch_idx, "test")
def configure_optimizers(self):
optimizer = optim.SGD(self.parameters(), lr=self.lr, momentum=self.momentum, weight_decay=self.weight_decay)
scheduler = lr_scheduler.LambdaLR(optimizer, lambda epoch: 0.1 ** (epoch // 30))
return [optimizer], [scheduler]
def setup(self, stage: str):
if isinstance(self.trainer.strategy, ParallelStrategy):
# When using a single GPU per process and per `DistributedDataParallel`, we need to divide the batch size
# ourselves based on the total number of GPUs we have
num_processes = max(1, self.trainer.strategy.num_processes)
self.batch_size = int(self.batch_size / num_processes)
self.workers = int(self.workers / num_processes)
if stage == "fit":
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_dir = os.path.join(self.data_path, "train")
self.train_dataset = datasets.ImageFolder(
train_dir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]),
)
# all stages will use the eval dataset
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
val_dir = os.path.join(self.data_path, "val")
self.eval_dataset = datasets.ImageFolder(
val_dir,
transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]),
)
def train_dataloader(self):
return torch.utils.data.DataLoader(
dataset=self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.workers,
pin_memory=True,
)
def val_dataloader(self):
return torch.utils.data.DataLoader(
self.eval_dataset, batch_size=self.batch_size, num_workers=self.workers, pin_memory=True
)
def test_dataloader(self):
return self.val_dataloader()
if __name__ == "__main__":
LightningCLI(
ImageNetLightningModel,
trainer_defaults={
"max_epochs": 90,
"accelerator": "auto",
"devices": 1,
"logger": False,
"benchmark": True,
"callbacks": [
# the PyTorch example refreshes every 10 batches
TQDMProgressBar(refresh_rate=10),
# save when the validation top1 accuracy improves
ModelCheckpoint(monitor="val_acc1", mode="max"),
],
},
seed_everything_default=42,
save_config_kwargs={"overwrite": True},
)
| ImageNetLightningModel |
python | django__django | django/db/models/query.py | {
"start": 8182,
"end": 8977
} | class ____(BaseIterable):
"""
Iterable returned by QuerySet.values() that yields a dict for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
if query.selected:
names = list(query.selected)
else:
# extra(select=...) cols are always at the start of the row.
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
indexes = range(len(names))
for row in compiler.results_iter(
chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
):
yield {names[i]: row[i] for i in indexes}
| ValuesIterable |
python | tiangolo__fastapi | docs_src/cookie_param_models/tutorial002_pv1_an_py310.py | {
"start": 116,
"end": 387
} | class ____(BaseModel):
class Config:
extra = "forbid"
session_id: str
fatebook_tracker: str | None = None
googall_tracker: str | None = None
@app.get("/items/")
async def read_items(cookies: Annotated[Cookies, Cookie()]):
return cookies
| Cookies |
python | openai__openai-python | src/openai/_streaming.py | {
"start": 587,
"end": 3923
} | class ____(Generic[_T]):
"""Provides the core interface to iterate over a synchronous stream response."""
response: httpx.Response
_decoder: SSEBytesDecoder
def __init__(
self,
*,
cast_to: type[_T],
response: httpx.Response,
client: OpenAI,
) -> None:
self.response = response
self._cast_to = cast_to
self._client = client
self._decoder = client._make_sse_decoder()
self._iterator = self.__stream__()
def __next__(self) -> _T:
return self._iterator.__next__()
def __iter__(self) -> Iterator[_T]:
for item in self._iterator:
yield item
def _iter_events(self) -> Iterator[ServerSentEvent]:
yield from self._decoder.iter_bytes(self.response.iter_bytes())
def __stream__(self) -> Iterator[_T]:
cast_to = cast(Any, self._cast_to)
response = self.response
process_data = self._client._process_response_data
iterator = self._iter_events()
for sse in iterator:
if sse.data.startswith("[DONE]"):
break
# we have to special case the Assistants `thread.` events since we won't have an "event" key in the data
if sse.event and sse.event.startswith("thread."):
data = sse.json()
if sse.event == "error" and is_mapping(data) and data.get("error"):
message = None
error = data.get("error")
if is_mapping(error):
message = error.get("message")
if not message or not isinstance(message, str):
message = "An error occurred during streaming"
raise APIError(
message=message,
request=self.response.request,
body=data["error"],
)
yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response)
else:
data = sse.json()
if is_mapping(data) and data.get("error"):
message = None
error = data.get("error")
if is_mapping(error):
message = error.get("message")
if not message or not isinstance(message, str):
message = "An error occurred during streaming"
raise APIError(
message=message,
request=self.response.request,
body=data["error"],
)
yield process_data(data=data, cast_to=cast_to, response=response)
# As we might not fully consume the response stream, we need to close it explicitly
response.close()
def __enter__(self) -> Self:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self.close()
def close(self) -> None:
"""
Close the response and release the connection.
Automatically called if the response body is read to completion.
"""
self.response.close()
| Stream |
python | PyCQA__pylint | tests/functional/b/bad_reversed_sequence_py37.py | {
"start": 191,
"end": 293
} | class ____(dict):
pass
reversed(InheritDict({'a': 1, 'b': 2})) # [bad-reversed-sequence]
| InheritDict |
python | allegroai__clearml | clearml/automation/job.py | {
"start": 27530,
"end": 32064
} | class ____(ClearmlJob):
"""
Run jobs locally as a sub-process, use only when no agents are available (this will not use queues)
or for debug purposes.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(LocalClearmlJob, self).__init__(*args, **kwargs)
self._job_process = None
self._local_temp_file = None
def launch(self, queue_name: str = None) -> bool:
"""
Launch job as a subprocess, ignores "queue_name"
:param queue_name: Ignored
:return: True if successful
"""
if self._is_cached_task:
return False
# check if standalone
diff = self.task.data.script.diff
if diff and not diff.lstrip().startswith("diff "):
# standalone, we need to create if
fd, local_filename = tempfile.mkstemp(suffix=".py")
os.close(fd)
with open(local_filename, "wt") as f:
f.write(diff)
self._local_temp_file = local_filename
else:
local_filename = self.task.data.script.entry_point
cwd = os.path.join(os.getcwd(), self.task.data.script.working_dir or "")
# try to check based on current root repo + entrypoint
if Task.current_task() and not (Path(cwd) / local_filename).is_file():
working_dir = Task.current_task().data.script.working_dir or ""
working_dir = working_dir.strip(".")
levels = 0
if working_dir:
levels = 1 + sum(1 for c in working_dir if c == "/")
cwd = os.path.abspath(os.path.join(os.getcwd(), os.sep.join([".."] * levels))) if levels else os.getcwd()
cwd = os.path.join(cwd, working_dir)
python = sys.executable
env = dict(**os.environ)
env.pop("CLEARML_PROC_MASTER_ID", None)
env.pop("TRAINS_PROC_MASTER_ID", None)
env["CLEARML_TASK_ID"] = env["TRAINS_TASK_ID"] = str(self.task.id)
env["CLEARML_LOG_TASK_TO_BACKEND"] = "1"
env["CLEARML_SIMULATE_REMOTE_TASK"] = "1"
try:
if self._enable_local_imports:
current_python_path = env.get("PYTHONPATH")
env["PYTHONPATH"] = (
"{}:{}".format(current_python_path, sys.path[0]) if current_python_path else sys.path[0]
)
except Exception as e:
logger.warning("Could not append local path to PYTHONPATH: {}".format(e))
self.task.mark_started()
self._job_process = subprocess.Popen(args=[python, local_filename], cwd=cwd, env=env)
return True
def wait_for_process(self, timeout: Optional[int] = None) -> Optional[int]:
"""
Wait until Job subprocess completed/exited
:param timeout: Timeout in seconds to wait for the subprocess to finish. Default: None => infinite
:return Sub-process exit code. 0 is success, None if subprocess is not running or timeout
"""
if not self._job_process:
return None
try:
exit_code = self._job_process.wait(timeout=timeout)
except subprocess.TimeoutExpired:
return None
self._job_process = None
if self._local_temp_file:
# noinspection PyBroadException
try:
Path(self._local_temp_file).unlink()
except Exception:
pass
self._local_temp_file = None
if exit_code == 0:
self.task.mark_completed()
else:
user_aborted = False
if self.task.status == Task.TaskStatusEnum.stopped:
self.task.reload()
if str(self.task.data.status_reason).lower().startswith("user aborted"):
user_aborted = True
if not user_aborted:
self.task.mark_failed(force=True)
return exit_code
def status(self, force: bool = False) -> str:
"""
Return the Job Task current status. Options are: "created", "queued", "in_progress", "stopped", "published",
"publishing", "closed", "failed", "completed", "unknown".
:param force: Force status update, otherwise, only refresh state every 1 sec
:return: Task status Task.TaskStatusEnum in string.
"""
if self._job_process:
# refresh the task state, we need to do it manually
self.wait_for_process(timeout=0)
return super(LocalClearmlJob, self).status(force=force)
| LocalClearmlJob |
python | bokeh__bokeh | src/bokeh/sphinxext/_internal/bokeh_autodoc.py | {
"start": 2418,
"end": 2918
} | class ____(ModuleLevelDocumenter):
directivetype = "bokeh-enum"
objtype = "enum"
priority = 20
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return isinstance(member, Enumeration)
# Override the Sphinx default `Documenter.get_object_members()`
# which is deprecated, and will soon be removed.
# Ref: https://github.com/bokeh/bokeh/issues/12462
def get_object_members(self, want_all):
return False, []
| EnumDocumenter |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/datacatalog.py | {
"start": 57171,
"end": 61044
} | class ____(GoogleCloudBaseOperator):
"""
Gets a tag template.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogGetTagTemplateOperator`
:param location: Required. The location of the tag template to get.
:param tag_template: Required. The ID of the tag template to get.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"tag_template",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataCatalogTagTemplateLink(),)
def __init__(
self,
*,
location: str,
tag_template: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.tag_template = tag_template
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
result = hook.get_tag_template(
location=self.location,
tag_template=self.tag_template,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataCatalogTagTemplateLink.persist(
context=context,
tag_template_id=self.tag_template,
location_id=self.location,
project_id=self.project_id or hook.project_id,
)
return TagTemplate.to_dict(result)
@deprecated(
planned_removal_date="January 30, 2026",
use_instead="airflow.providers.google.cloud.operators.dataplex.DataplexCatalogGetEntryOperator",
reason="The Data Catalog will be discontinued on January 30, 2026 "
"in favor of Dataplex Universal Catalog.",
category=AirflowProviderDeprecationWarning,
)
| CloudDataCatalogGetTagTemplateOperator |
python | numba__numba | numba/tests/test_func_interface.py | {
"start": 40,
"end": 1038
} | class ____(unittest.TestCase):
def test_jit_function_docstring(self):
def add(x, y):
'''Return sum of two numbers'''
return x + y
c_add = jit(add)
self.assertEqual(c_add.__doc__, 'Return sum of two numbers')
def test_jit_function_name(self):
def add(x, y):
return x + y
c_add = jit(add)
self.assertEqual(c_add.__name__, 'add')
def test_jit_function_module(self):
def add(x, y):
return x + y
c_add = jit(add)
# Expected answer depends on how you run this test.
# Compare to python function instead.
self.assertEqual(c_add.__module__, add.__module__)
def test_jit_function_code_object(self):
def add(x, y):
return x + y
c_add = jit(add)
self.assertEqual(c_add.__code__, add.__code__)
self.assertEqual(c_add.func_code, add.__code__)
if __name__ == '__main__':
unittest.main()
| TestFuncInterface |
python | doocs__leetcode | solution/0300-0399/0326.Power of Three/Solution.py | {
"start": 0,
"end": 176
} | class ____:
def isPowerOfThree(self, n: int) -> bool:
while n > 2:
if n % 3:
return False
n //= 3
return n == 1
| Solution |
python | ApeWorX__ape | src/ape/api/address.py | {
"start": 530,
"end": 9569
} | class ____(BaseInterface):
"""
A base address API class. All account-types subclass this type.
"""
@property
def _base_dir_values(self) -> list[str]:
"""
This exists because when you call ``dir(BaseAddress)``, you get the type's return
value and not the instances. This allows base-classes to make use of shared
``IPython`` ``__dir__`` values.
"""
# NOTE: mypy is confused by properties.
# https://github.com/python/typing/issues/1112
return [
str(BaseAddress.address.fget.__name__), # type: ignore[attr-defined]
str(BaseAddress.balance.fget.__name__), # type: ignore[attr-defined]
str(BaseAddress.code.fget.__name__), # type: ignore[attr-defined]
str(BaseAddress.codesize.fget.__name__), # type: ignore[attr-defined]
str(BaseAddress.nonce.fget.__name__), # type: ignore[attr-defined]
str(BaseAddress.is_contract.fget.__name__), # type: ignore[attr-defined]
"provider", # Is a class property
]
@property
@abstractmethod
def address(self) -> AddressType:
"""
The address of this account. Subclasses must override and provide this value.
"""
def __eq__(self, other: object) -> bool:
"""
Compares :class:`~ape.api.BaseAddress` or ``str`` objects by converting to
:class:`~ape.types.address.AddressType`.
Returns:
bool: comparison result
"""
convert = self.conversion_manager.convert
try:
return convert(self, AddressType) == convert(other, AddressType)
except ConversionError:
# Check other __eq__
return NotImplemented
def __dir__(self) -> list[str]:
"""
Display methods to IPython on ``a.[TAB]`` tab completion.
Overridden to lessen amount of methods shown to only those that are useful.
Returns:
list[str]: Method names that IPython uses for tab completion.
"""
return self._base_dir_values
@log_instead_of_fail(default="<BaseAddress>")
def __repr__(self) -> str:
cls_name = getattr(type(self), "__name__", BaseAddress.__name__)
return f"<{cls_name} {self.address}>"
def __str__(self) -> str:
"""
Convert this class to a ``str`` address.
Returns:
str: The stringified address.
"""
return self.address
def __hash__(self) -> int:
"""Return consistent hash for all address representations with the same value."""
return hash(int(self.address, base=16))
def __call__(self, **kwargs) -> "ReceiptAPI":
"""
Call this address directly. For contracts, this may mean invoking their
default handler.
Args:
**kwargs: Transaction arguments, such as ``sender`` or ``data``.
Returns:
:class:`~ape.api.transactions.ReceiptAPI`
"""
txn = self.as_transaction(**kwargs)
if "sender" in kwargs:
if hasattr(kwargs["sender"], "call"):
# AccountAPI
sender = kwargs["sender"]
return sender.call(txn, **kwargs)
elif hasattr(kwargs["sender"], "prepare_transaction"):
# BaseAddress (likely, a ContractInstance)
prepare_transaction = kwargs["sender"].prepare_transaction(txn)
return self.provider.send_transaction(prepare_transaction)
elif "sender" not in kwargs and self.account_manager.default_sender is not None:
return self.account_manager.default_sender.call(txn, **kwargs)
return self.provider.send_transaction(txn)
@property
def nonce(self) -> int:
"""
The number of transactions associated with the address.
"""
return self.provider.get_nonce(self.address)
@property
def balance(self) -> int:
"""
The total balance of the account.
"""
bal = self.provider.get_balance(self.address)
# By using CurrencyValue, we can compare with
# strings like "1 ether".
return CurrencyValue(bal)
# @balance.setter
# NOTE: commented out because of failure noted within `__setattr__`
def _set_balance_(self, value: Any):
if isinstance(value, str):
value = self.conversion_manager.convert(value, int)
self.provider.set_balance(self.address, value)
def __setattr__(self, attr: str, value: Any) -> None:
# NOTE: Need to do this until https://github.com/pydantic/pydantic/pull/2625 is figured out
if attr == "balance":
self._set_balance_(value)
else:
super().__setattr__(attr, value)
@property
def code(self) -> "ContractCode":
"""
The raw bytes of the smart-contract code at the address.
"""
# NOTE: Chain manager handles code caching.
return self.chain_manager.get_code(self.address)
@property
def codesize(self) -> int:
"""
The number of bytes in the smart contract.
"""
code = self.code
return len(code) if isinstance(code, bytes) else len(bytes.fromhex(code.lstrip("0x")))
@property
def is_contract(self) -> bool:
"""
``True`` when there is code associated with the address.
"""
return self.codesize > 0
@property
def delegate(self) -> Optional["BaseAddress"]:
"""
Check and see if Account has a "delegate" contract, which is a contract that this account
delegates functionality to. This could be from many contexts, such as a Smart Wallet like
Safe (https://github.com/ApeWorX/ape-safe) which has a Singleton class it forwards to, or
an EOA using an EIP7702-style delegate. Returning ``None`` means that the account does not
have a delegate.
The default behavior is to use `:class:~ape.managers.ChainManager.get_delegate` to check if
the account has a proxy, such as ``SafeProxy`` for ``ape-safe`` or an EIP7702 delegate.
Returns:
Optional[`:class:~ape.contracts.ContractInstance`]:
The contract instance of the delegate contract (if available).
"""
return self.chain_manager.get_delegate(self.address)
@cached_property
def history(self) -> "AccountHistory":
"""
The list of transactions that this account has made on the current chain.
"""
return self.chain_manager.history[self.address]
def as_transaction(self, **kwargs) -> "TransactionAPI":
sign = kwargs.pop("sign", False)
converted_kwargs = self.conversion_manager.convert_method_kwargs(kwargs)
tx = self.provider.network.ecosystem.create_transaction(
receiver=self.address, **converted_kwargs
)
if sender := kwargs.get("sender"):
if hasattr(sender, "prepare_transaction"):
prepared = sender.prepare_transaction(tx)
return (sender.sign_transaction(prepared) or prepared) if sign else prepared
return tx
def estimate_gas_cost(self, **kwargs) -> int:
txn = self.as_transaction(**kwargs)
return self.provider.estimate_gas_cost(txn)
def prepare_transaction(self, txn: "TransactionAPI", **kwargs) -> "TransactionAPI":
"""
Set default values on a transaction.
Raises:
:class:`~ape.exceptions.AccountsError`: When the account cannot afford the transaction
or the nonce is invalid.
:class:`~ape.exceptions.TransactionError`: When given negative required confirmations.
Args:
txn (:class:`~ape.api.transactions.TransactionAPI`): The transaction to prepare.
**kwargs: Sub-classes, such as :class:`~ape.api.accounts.AccountAPI`, use additional kwargs.
Returns:
:class:`~ape.api.transactions.TransactionAPI`
"""
# NOTE: Allow overriding nonce, assume user understands what this does
if txn.nonce is None:
txn.nonce = self.nonce
elif txn.nonce < self.nonce:
raise AccountsError("Invalid nonce, will not publish.")
txn = self.provider.prepare_transaction(txn)
if (
txn.sender not in self.account_manager.test_accounts._impersonated_accounts
and txn.total_transfer_value > self.balance
):
raise AccountsError(
f"Transfer value meets or exceeds account balance "
f"for account '{self.address}' on chain '{self.chain_manager.chain_id}' "
f"using provider '{self.provider.name}'.\n"
"Are you using the correct account / chain / provider combination?\n"
f"(transfer_value={txn.total_transfer_value}, balance={self.balance})."
)
txn.sender = txn.sender or self.address
return txn
| BaseAddress |
python | pandas-dev__pandas | pandas/tests/io/formats/test_format.py | {
"start": 66944,
"end": 68054
} | class ____:
def test_1d_array(self):
# _GenericArrayFormatter is used on types for which there isn't a dedicated
# formatter. np.bool_ is one of those types.
obj = fmt._GenericArrayFormatter(np.array([True, False]))
res = obj.get_result()
assert len(res) == 2
# Results should be right-justified.
assert res[0] == " True"
assert res[1] == " False"
def test_2d_array(self):
obj = fmt._GenericArrayFormatter(np.array([[True, False], [False, True]]))
res = obj.get_result()
assert len(res) == 2
assert res[0] == " [True, False]"
assert res[1] == " [False, True]"
def test_3d_array(self):
obj = fmt._GenericArrayFormatter(
np.array([[[True, True], [False, False]], [[False, True], [True, False]]])
)
res = obj.get_result()
assert len(res) == 2
assert res[0] == " [[True, True], [False, False]]"
assert res[1] == " [[False, True], [True, False]]"
def _three_digit_exp():
return f"{1.7e8:.4g}" == "1.7e+008"
| TestGenericArrayFormatter |
python | apache__thrift | test/py/TestServer.py | {
"start": 7336,
"end": 16123
} | class ____(TProtocol.TProtocolFactory):
def __init__(self, encapsulated):
super(TPedanticSequenceIdProtocolFactory, self).__init__()
self.encapsulated = encapsulated
def getProtocol(self, trans):
return make_pedantic(self.encapsulated.getProtocol(trans))
def main(options):
# common header allowed client types
allowed_client_types = [
THeaderTransport.THeaderClientType.HEADERS,
THeaderTransport.THeaderClientType.FRAMED_BINARY,
THeaderTransport.THeaderClientType.UNFRAMED_BINARY,
THeaderTransport.THeaderClientType.FRAMED_COMPACT,
THeaderTransport.THeaderClientType.UNFRAMED_COMPACT,
]
# set up the protocol factory form the --protocol option
prot_factories = {
'accel': TBinaryProtocol.TBinaryProtocolAcceleratedFactory(),
'multia': TBinaryProtocol.TBinaryProtocolAcceleratedFactory(),
'accelc': TCompactProtocol.TCompactProtocolAcceleratedFactory(),
'multiac': TCompactProtocol.TCompactProtocolAcceleratedFactory(),
'binary': TPedanticSequenceIdProtocolFactory(TBinaryProtocol.TBinaryProtocolFactory()),
'multi': TPedanticSequenceIdProtocolFactory(TBinaryProtocol.TBinaryProtocolFactory()),
'compact': TCompactProtocol.TCompactProtocolFactory(),
'multic': TCompactProtocol.TCompactProtocolFactory(),
'header': THeaderProtocol.THeaderProtocolFactory(allowed_client_types),
'multih': THeaderProtocol.THeaderProtocolFactory(allowed_client_types),
'json': TJSONProtocol.TJSONProtocolFactory(),
'multij': TJSONProtocol.TJSONProtocolFactory(),
}
pfactory = prot_factories.get(options.proto, None)
if pfactory is None:
raise AssertionError('Unknown --protocol option: %s' % options.proto)
try:
pfactory.string_length_limit = options.string_limit
pfactory.container_length_limit = options.container_limit
except Exception:
# Ignore errors for those protocols that does not support length limit
pass
# get the server type (TSimpleServer, TNonblockingServer, etc...)
if len(args) > 1:
raise AssertionError('Only one server type may be specified, not multiple types.')
server_type = args[0]
if options.trans == 'http':
server_type = 'THttpServer'
# Set up the handler and processor objects
handler = TestHandler(options)
processor = ThriftTest.Processor(handler)
if options.proto.startswith('multi'):
secondHandler = SecondHandler()
secondProcessor = SecondService.Processor(secondHandler)
multiplexedProcessor = TMultiplexedProcessor()
multiplexedProcessor.registerDefault(processor)
multiplexedProcessor.registerProcessor('ThriftTest', processor)
multiplexedProcessor.registerProcessor('SecondService', secondProcessor)
processor = multiplexedProcessor
global server
# Handle THttpServer as a special case
if server_type == 'THttpServer':
if options.ssl:
__certfile = os.path.join(os.path.dirname(SCRIPT_DIR), "keys", "server.crt")
__keyfile = os.path.join(os.path.dirname(SCRIPT_DIR), "keys", "server.key")
server = THttpServer.THttpServer(processor, ('', options.port), pfactory, cert_file=__certfile, key_file=__keyfile)
else:
server = THttpServer.THttpServer(processor, ('', options.port), pfactory)
server.serve()
sys.exit(0)
# set up server transport and transport factory
abs_key_path = os.path.join(os.path.dirname(SCRIPT_DIR), 'keys', 'server.pem')
host = None
if options.ssl:
from thrift.transport import TSSLSocket
transport = TSSLSocket.TSSLServerSocket(host, options.port, certfile=abs_key_path)
else:
transport = TSocket.TServerSocket(host, options.port, options.domain_socket)
tfactory = TTransport.TBufferedTransportFactory()
if options.trans == 'buffered':
tfactory = TTransport.TBufferedTransportFactory()
elif options.trans == 'framed':
tfactory = TTransport.TFramedTransportFactory()
elif options.trans == '':
raise AssertionError('Unknown --transport option: %s' % options.trans)
else:
tfactory = TTransport.TBufferedTransportFactory()
# if --zlib, then wrap server transport, and use a different transport factory
if options.zlib:
transport = TZlibTransport.TZlibTransport(transport) # wrap with zlib
tfactory = TZlibTransport.TZlibTransportFactory()
# do server-specific setup here:
if server_type == "TNonblockingServer":
server = TNonblockingServer.TNonblockingServer(processor, transport, inputProtocolFactory=pfactory)
elif server_type == "TProcessPoolServer":
import signal
from thrift.server import TProcessPoolServer
server = TProcessPoolServer.TProcessPoolServer(processor, transport, tfactory, pfactory)
server.setNumWorkers(5)
def set_alarm():
def clean_shutdown(signum, frame):
for worker in server.workers:
if options.verbose > 0:
logging.info('Terminating worker: %s' % worker)
worker.terminate()
if options.verbose > 0:
logging.info('Requesting server to stop()')
try:
server.stop()
except Exception:
pass
signal.signal(signal.SIGALRM, clean_shutdown)
signal.alarm(4)
set_alarm()
else:
# look up server class dynamically to instantiate server
ServerClass = getattr(TServer, server_type)
server = ServerClass(processor, transport, tfactory, pfactory)
# enter server main loop
server.serve()
def exit_gracefully(signum, frame):
print("SIGINT received\n")
server.shutdown() # doesn't work properly, yet
sys.exit(0)
if __name__ == '__main__':
signal.signal(signal.SIGINT, exit_gracefully)
parser = OptionParser()
parser.add_option('--libpydir', type='string', dest='libpydir',
help='include this directory to sys.path for locating library code')
parser.add_option('--genpydir', type='string', dest='genpydir',
default='gen-py',
help='include this directory to sys.path for locating generated code')
parser.add_option("--port", type="int", dest="port",
help="port number for server to listen on")
parser.add_option("--zlib", action="store_true", dest="zlib",
help="use zlib wrapper for compressed transport")
parser.add_option("--ssl", action="store_true", dest="ssl",
help="use SSL for encrypted transport")
parser.add_option('-v', '--verbose', action="store_const",
dest="verbose", const=2,
help="verbose output")
parser.add_option('-q', '--quiet', action="store_const",
dest="verbose", const=0,
help="minimal output")
parser.add_option('--protocol', dest="proto", type="string",
help="protocol to use, one of: accel, accelc, binary, compact, json, multi, multia, multiac, multic, multih, multij")
parser.add_option('--transport', dest="trans", type="string",
help="transport to use, one of: buffered, framed, http")
parser.add_option('--domain-socket', dest="domain_socket", type="string",
help="Unix domain socket path")
parser.add_option('--container-limit', dest='container_limit', type='int', default=None)
parser.add_option('--string-limit', dest='string_limit', type='int', default=None)
parser.set_defaults(port=9090, verbose=1, proto='binary', transport='buffered')
options, args = parser.parse_args()
# Print TServer log to stdout so that the test-runner can redirect it to log files
logging.basicConfig(level=options.verbose)
sys.path.insert(0, os.path.join(SCRIPT_DIR, options.genpydir))
from ThriftTest import ThriftTest, SecondService
from thrift.Thrift import TException
from thrift.TMultiplexedProcessor import TMultiplexedProcessor
from thrift.transport import THeaderTransport
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.transport import TZlibTransport
from thrift.protocol import TBinaryProtocol
from thrift.protocol import TCompactProtocol
from thrift.protocol import THeaderProtocol
from thrift.protocol import TJSONProtocol
from thrift.server import TServer, TNonblockingServer, THttpServer
sys.exit(main(options))
| TPedanticSequenceIdProtocolFactory |
python | django__django | tests/model_formsets_regress/models.py | {
"start": 154,
"end": 291
} | class ____(models.Model):
user = models.ForeignKey(User, models.CASCADE, to_field="username")
data = models.IntegerField()
| UserSite |
python | pytorch__pytorch | test/quantization/pt2e/test_quantize_pt2e_qat.py | {
"start": 33593,
"end": 33859
} | class ____(TestQuantizePT2EQAT_ConvBn_Base):
dim = 1
example_inputs = (torch.randn(1, 3, 5),)
conv_class = torch.nn.Conv1d
conv_transpose_class = torch.nn.ConvTranspose1d
bn_class = torch.nn.BatchNorm1d
@skipIfNoQNNPACK
| TestQuantizePT2EQAT_ConvBn1d |
python | tensorflow__tensorflow | tensorflow/python/framework/extension_type_field.py | {
"start": 2134,
"end": 2846
} | class ____(object):
"""Sentinel value that's not equal (w/ `is`) to any user value."""
def __init__(self, name):
self._name = name
def __repr__(self):
return self._name
_NoneType = type(None)
def _issubclass(cls, clsinfo):
"""Internal issubclass that doesn't raise TypeError."""
try:
return issubclass(cls, clsinfo)
except TypeError:
# issubclass with GenericAlias instances raises TypeError. For example,
# `issubclass(tuple[int], composite_tensor.CompositeTensor)`.
return False
# ==============================================================================
# ExtensionTypeField
# ==============================================================================
| Sentinel |
python | realpython__materials | python-protocol/shapes_v2.py | {
"start": 155,
"end": 384
} | class ____:
def __init__(self, radius) -> None:
self.radius = radius
def get_area(self) -> float:
return pi * self.radius**2
def get_perimeter(self) -> float:
return 2 * pi * self.radius
| Circle |
python | doocs__leetcode | solution/1300-1399/1313.Decompress Run-Length Encoded List/Solution.py | {
"start": 0,
"end": 164
} | class ____:
def decompressRLElist(self, nums: List[int]) -> List[int]:
return [nums[i + 1] for i in range(0, len(nums), 2) for _ in range(nums[i])]
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 70938,
"end": 71126
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
id: Optional[str] = None
| OnFailureItem |
python | openai__openai-python | src/openai/resources/beta/chatkit/sessions.py | {
"start": 1249,
"end": 5857
} | class ____(SyncAPIResource):
@cached_property
def with_raw_response(self) -> SessionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return SessionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> SessionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return SessionsWithStreamingResponse(self)
def create(
self,
*,
user: str,
workflow: ChatSessionWorkflowParam,
chatkit_configuration: ChatSessionChatKitConfigurationParam | Omit = omit,
expires_after: ChatSessionExpiresAfterParam | Omit = omit,
rate_limits: ChatSessionRateLimitsParam | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatSession:
"""
Create a ChatKit session
Args:
user: A free-form string that identifies your end user; ensures this Session can
access other objects that have the same `user` scope.
workflow: Workflow that powers the session.
chatkit_configuration: Optional overrides for ChatKit runtime configuration features
expires_after: Optional override for session expiration timing in seconds from creation.
Defaults to 10 minutes.
rate_limits: Optional override for per-minute request limits. When omitted, defaults to 10.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return self._post(
"/chatkit/sessions",
body=maybe_transform(
{
"user": user,
"workflow": workflow,
"chatkit_configuration": chatkit_configuration,
"expires_after": expires_after,
"rate_limits": rate_limits,
},
session_create_params.SessionCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ChatSession,
)
def cancel(
self,
session_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatSession:
"""
Cancel a ChatKit session
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not session_id:
raise ValueError(f"Expected a non-empty value for `session_id` but received {session_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return self._post(
f"/chatkit/sessions/{session_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ChatSession,
)
| Sessions |
python | sympy__sympy | sympy/polys/polyerrors.py | {
"start": 3571,
"end": 3882
} | class ____(BasePolynomialError):
def __init__(self, func, nargs, exc):
self.func = func
self.nargs = nargs
self.exc = exc
def __str__(self):
return "%s(%s) failed without generators" % (self.func, ', '.join(map(str, self.exc.exprs[:self.nargs])))
@public
| ComputationFailed |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/vertex_ai/dataset.py | {
"start": 1814,
"end": 18079
} | class ____(GoogleBaseHook, OperationHelper):
"""Hook for Google Cloud Vertex AI Dataset APIs."""
def get_dataset_service_client(self, region: str | None = None) -> DatasetServiceClient:
"""Return DatasetServiceClient."""
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-aiplatform.googleapis.com:443")
else:
client_options = ClientOptions()
return DatasetServiceClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
@staticmethod
def extract_dataset_id(obj: dict) -> str:
"""Return unique id of the dataset."""
return obj["name"].rpartition("/")[-1]
@GoogleBaseHook.fallback_to_default_project_id
def create_dataset(
self,
project_id: str,
region: str,
dataset: Dataset | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Create a Dataset.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param dataset: Required. The Dataset to create.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataset_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.create_dataset(
request={
"parent": parent,
"dataset": dataset,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_dataset(
self,
project_id: str,
region: str,
dataset: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Delete a Dataset.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param dataset: Required. The ID of the Dataset to delete.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataset_service_client(region)
name = client.dataset_path(project_id, region, dataset)
result = client.delete_dataset(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def export_data(
self,
project_id: str,
region: str,
dataset: str,
export_config: ExportDataConfig | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Export data from a Dataset.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param dataset: Required. The ID of the Dataset to export.
:param export_config: Required. The desired output location.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataset_service_client(region)
name = client.dataset_path(project_id, region, dataset)
result = client.export_data(
request={
"name": name,
"export_config": export_config,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_annotation_spec(
self,
project_id: str,
region: str,
dataset: str,
annotation_spec: str,
read_mask: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> AnnotationSpec:
"""
Get an AnnotationSpec.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param dataset: Required. The ID of the Dataset.
:param annotation_spec: The ID of the AnnotationSpec resource.
:param read_mask: Optional. Mask specifying which fields to read.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataset_service_client(region)
name = client.annotation_spec_path(project_id, region, dataset, annotation_spec)
result = client.get_annotation_spec(
request={
"name": name,
"read_mask": read_mask,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_dataset(
self,
project_id: str,
region: str,
dataset: str,
read_mask: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Dataset:
"""
Get a Dataset.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param dataset: Required. The ID of the Dataset to export.
:param read_mask: Optional. Mask specifying which fields to read.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataset_service_client(region)
name = client.dataset_path(project_id, region, dataset)
result = client.get_dataset(
request={
"name": name,
"read_mask": read_mask,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def import_data(
self,
project_id: str,
region: str,
dataset: str,
import_configs: Sequence[ImportDataConfig],
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Import data into a Dataset.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param dataset: Required. The ID of the Dataset to import.
:param import_configs: Required. The desired input locations. The contents of all input locations
will be imported in one batch.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataset_service_client(region)
name = client.dataset_path(project_id, region, dataset)
result = client.import_data(
request={
"name": name,
"import_configs": import_configs,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_annotations(
self,
project_id: str,
region: str,
dataset: str,
data_item: str,
filter: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
read_mask: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListAnnotationsPager:
"""
List Annotations belongs to a data item.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param dataset: Required. The ID of the Dataset.
:param data_item: Required. The ID of the DataItem to list Annotations from.
:param filter: The standard list filter.
:param page_size: The standard list page size.
:param page_token: The standard list page token.
:param read_mask: Mask specifying which fields to read.
:param order_by: A comma-separated list of fields to order by, sorted in ascending order. Use "desc"
after a field name for descending.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataset_service_client(region)
parent = client.data_item_path(project_id, region, dataset, data_item)
result = client.list_annotations(
request={
"parent": parent,
"filter": filter,
"page_size": page_size,
"page_token": page_token,
"read_mask": read_mask,
"order_by": order_by,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_data_items(
self,
project_id: str,
region: str,
dataset: str,
filter: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
read_mask: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListDataItemsPager:
"""
List DataItems in a Dataset.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param dataset: Required. The ID of the Dataset.
:param filter: The standard list filter.
:param page_size: The standard list page size.
:param page_token: The standard list page token.
:param read_mask: Mask specifying which fields to read.
:param order_by: A comma-separated list of fields to order by, sorted in ascending order. Use "desc"
after a field name for descending.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataset_service_client(region)
parent = client.dataset_path(project_id, region, dataset)
result = client.list_data_items(
request={
"parent": parent,
"filter": filter,
"page_size": page_size,
"page_token": page_token,
"read_mask": read_mask,
"order_by": order_by,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_datasets(
self,
project_id: str,
region: str,
filter: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
read_mask: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListDatasetsPager:
"""
List Datasets in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: The standard list filter.
:param page_size: The standard list page size.
:param page_token: The standard list page token.
:param read_mask: Mask specifying which fields to read.
:param order_by: A comma-separated list of fields to order by, sorted in ascending order. Use "desc"
after a field name for descending.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataset_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.list_datasets(
request={
"parent": parent,
"filter": filter,
"page_size": page_size,
"page_token": page_token,
"read_mask": read_mask,
"order_by": order_by,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
def update_dataset(
self,
project_id: str,
region: str,
dataset_id: str,
dataset: Dataset | dict,
update_mask: FieldMask | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Dataset:
"""
Update a Dataset.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param dataset_id: Required. The ID of the Dataset.
:param dataset: Required. The Dataset which replaces the resource on the server.
:param update_mask: Required. The update mask applies to the resource.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataset_service_client(region)
dataset["name"] = client.dataset_path(project_id, region, dataset_id)
result = client.update_dataset(
request={
"dataset": dataset,
"update_mask": update_mask,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
| DatasetHook |
python | pytest-dev__pytest | testing/test_pathlib.py | {
"start": 2271,
"end": 4215
} | class ____:
"""Test our port of py.common.FNMatcher (fnmatch_ex)."""
if sys.platform == "win32":
drv1 = "c:"
drv2 = "d:"
else:
drv1 = "/c"
drv2 = "/d"
@pytest.mark.parametrize(
"pattern, path",
[
("*.py", "foo.py"),
("*.py", "bar/foo.py"),
("test_*.py", "foo/test_foo.py"),
("tests/*.py", "tests/foo.py"),
(f"{drv1}/*.py", f"{drv1}/foo.py"),
(f"{drv1}/foo/*.py", f"{drv1}/foo/foo.py"),
("tests/**/test*.py", "tests/foo/test_foo.py"),
("tests/**/doc/test*.py", "tests/foo/bar/doc/test_foo.py"),
("tests/**/doc/**/test*.py", "tests/foo/doc/bar/test_foo.py"),
],
)
def test_matching(self, pattern: str, path: str) -> None:
assert fnmatch_ex(pattern, path)
def test_matching_abspath(self) -> None:
abspath = os.path.abspath(os.path.join("tests/foo.py"))
assert fnmatch_ex("tests/foo.py", abspath)
@pytest.mark.parametrize(
"pattern, path",
[
("*.py", "foo.pyc"),
("*.py", "foo/foo.pyc"),
("tests/*.py", "foo/foo.py"),
(f"{drv1}/*.py", f"{drv2}/foo.py"),
(f"{drv1}/foo/*.py", f"{drv2}/foo/foo.py"),
("tests/**/test*.py", "tests/foo.py"),
("tests/**/test*.py", "foo/test_foo.py"),
("tests/**/doc/test*.py", "tests/foo/bar/doc/foo.py"),
("tests/**/doc/test*.py", "tests/foo/bar/test_foo.py"),
],
)
def test_not_matching(self, pattern: str, path: str) -> None:
assert not fnmatch_ex(pattern, path)
@pytest.fixture(params=[True, False])
def ns_param(request: pytest.FixtureRequest) -> bool:
"""
Simple parametrized fixture for tests which call import_path() with consider_namespace_packages
using True and False.
"""
return bool(request.param)
| TestFNMatcherPort |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 585197,
"end": 586047
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"column",
"kind",
"line",
"message",
"path",
"source",
"suggestion",
)
column = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="column")
kind = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="kind")
line = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="line")
message = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="message")
path = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="path")
source = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="source")
suggestion = sgqlc.types.Field(String, graphql_name="suggestion")
| RepositoryCodeownersError |
python | ray-project__ray | python/ray/train/lightgbm/lightgbm_predictor.py | {
"start": 559,
"end": 5532
} | class ____(Predictor):
"""A predictor for LightGBM models.
Args:
model: The LightGBM booster to use for predictions.
preprocessor: A preprocessor used to transform data batches prior
to prediction.
"""
def __init__(
self, model: lightgbm.Booster, preprocessor: Optional["Preprocessor"] = None
):
self.model = model
super().__init__(preprocessor)
def __repr__(self):
return (
f"{self.__class__.__name__}(model={self.model!r}, "
f"preprocessor={self._preprocessor!r})"
)
@classmethod
def from_checkpoint(cls, checkpoint: LightGBMCheckpoint) -> "LightGBMPredictor":
"""Instantiate the predictor from a LightGBMCheckpoint.
Args:
checkpoint: The checkpoint to load the model and preprocessor from.
"""
model = checkpoint.get_model()
preprocessor = checkpoint.get_preprocessor()
return cls(model=model, preprocessor=preprocessor)
def predict(
self,
data: DataBatchType,
feature_columns: Optional[Union[List[str], List[int]]] = None,
**predict_kwargs,
) -> DataBatchType:
"""Run inference on data batch.
Args:
data: A batch of input data.
feature_columns: The names or indices of the columns in the
data to use as features to predict on. If None, then use
all columns in ``data``.
**predict_kwargs: Keyword arguments passed to
``lightgbm.Booster.predict``.
Examples:
.. testcode::
import numpy as np
import lightgbm as lgbm
from ray.train.lightgbm import LightGBMPredictor
train_X = np.array([[1, 2], [3, 4]])
train_y = np.array([0, 1])
model = lgbm.LGBMClassifier().fit(train_X, train_y)
predictor = LightGBMPredictor(model=model.booster_)
data = np.array([[1, 2], [3, 4]])
predictions = predictor.predict(data)
# Only use first and second column as the feature
data = np.array([[1, 2, 8], [3, 4, 9]])
predictions = predictor.predict(data, feature_columns=[0, 1])
import pandas as pd
import lightgbm as lgbm
from ray.train.lightgbm import LightGBMPredictor
train_X = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
train_y = pd.Series([0, 1])
model = lgbm.LGBMClassifier().fit(train_X, train_y)
predictor = LightGBMPredictor(model=model.booster_)
# Pandas dataframe.
data = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
predictions = predictor.predict(data)
# Only use first and second column as the feature
data = pd.DataFrame([[1, 2, 8], [3, 4, 9]], columns=["A", "B", "C"])
predictions = predictor.predict(data, feature_columns=["A", "B"])
Returns:
Prediction result.
"""
return Predictor.predict(
self, data, feature_columns=feature_columns, **predict_kwargs
)
def _predict_pandas(
self,
data: "pd.DataFrame",
feature_columns: Optional[Union[List[str], List[int]]] = None,
**predict_kwargs,
) -> pd.DataFrame:
feature_names = None
if TENSOR_COLUMN_NAME in data:
data = data[TENSOR_COLUMN_NAME].to_numpy()
data = _unwrap_ndarray_object_type_if_needed(data)
if feature_columns:
# In this case feature_columns is a list of integers
data = data[:, feature_columns]
# Turn into dataframe to make dtype resolution easy
data = pd.DataFrame(data, columns=feature_names)
data = data.infer_objects()
# Pandas does not detect categorical dtypes. Any remaining object
# dtypes are probably categories, so convert them.
# This will fail if we have a category composed entirely of
# integers, but this is the best we can do here.
update_dtypes = {}
for column in data.columns:
dtype = data.dtypes[column]
if is_object_dtype(dtype):
update_dtypes[column] = pd.CategoricalDtype()
if update_dtypes:
data = data.astype(update_dtypes, copy=False)
elif feature_columns:
# feature_columns is a list of integers or strings
data = data[feature_columns]
df = pd.DataFrame(self.model.predict(data, **predict_kwargs))
df.columns = (
["predictions"]
if len(df.columns) == 1
else [f"predictions_{i}" for i in range(len(df.columns))]
)
return df
| LightGBMPredictor |
python | pennersr__django-allauth | allauth/socialaccount/providers/openid/models.py | {
"start": 357,
"end": 636
} | class ____(models.Model):
server_url = models.CharField(max_length=255)
timestamp = models.IntegerField()
salt = models.CharField(max_length=255)
date_created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.server_url
| OpenIDNonce |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1402666,
"end": 1402878
} | class ____(MultiTimeUnit):
"""LocalMultiTimeUnit schema wrapper."""
_schema = {"$ref": "#/definitions/LocalMultiTimeUnit"}
def __init__(self, *args):
super().__init__(*args)
| LocalMultiTimeUnit |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_bytes.py | {
"start": 485,
"end": 633
} | class ____:
def __bytes__(self):
return return_bytes() # [invalid-bytes-return]
# These testcases should NOT raise errors
| ComplexReturn |
python | doocs__leetcode | solution/3000-3099/3019.Number of Changing Keys/Solution.py | {
"start": 0,
"end": 121
} | class ____:
def countKeyChanges(self, s: str) -> int:
return sum(a != b for a, b in pairwise(s.lower()))
| Solution |
python | coleifer__peewee | tests/expressions.py | {
"start": 1618,
"end": 6949
} | class ____(ModelTestCase):
"""
Test the conversion of field values using a field's db_value() function.
It is possible that a field's `db_value()` function may returns a Node
subclass (e.g. a SQL function). These tests verify and document how such
conversions are applied in various parts of the query.
"""
database = get_in_memory_db()
requires = [UpperModel]
def test_value_conversion(self):
# Ensure value is converted on INSERT.
insert = UpperModel.insert({UpperModel.name: 'huey'})
self.assertSQL(insert, (
'INSERT INTO "upper_model" ("name") VALUES (UPPER(?))'), ['huey'])
uid = insert.execute()
obj = UpperModel.get(UpperModel.id == uid)
self.assertEqual(obj.name, 'HUEY')
# Ensure value is converted on UPDATE.
update = (UpperModel
.update({UpperModel.name: 'zaizee'})
.where(UpperModel.id == uid))
self.assertSQL(update, (
'UPDATE "upper_model" SET "name" = UPPER(?) '
'WHERE ("upper_model"."id" = ?)'),
['zaizee', uid])
update.execute()
# Ensure it works with SELECT (or more generally, WHERE expressions).
select = UpperModel.select().where(UpperModel.name == 'zaizee')
self.assertSQL(select, (
'SELECT "t1"."id", "t1"."name" FROM "upper_model" AS "t1" '
'WHERE ("t1"."name" = UPPER(?))'), ['zaizee'])
obj = select.get()
self.assertEqual(obj.name, 'ZAIZEE')
# Ensure it works with DELETE.
delete = UpperModel.delete().where(UpperModel.name == 'zaizee')
self.assertSQL(delete, (
'DELETE FROM "upper_model" '
'WHERE ("upper_model"."name" = UPPER(?))'), ['zaizee'])
self.assertEqual(delete.execute(), 1)
def test_value_conversion_mixed(self):
um = UpperModel.create(name='huey')
# If we apply a function to the field, the conversion is not applied.
sq = UpperModel.select().where(fn.SUBSTR(UpperModel.name, 1, 1) == 'h')
self.assertSQL(sq, (
'SELECT "t1"."id", "t1"."name" FROM "upper_model" AS "t1" '
'WHERE (SUBSTR("t1"."name", ?, ?) = ?)'), [1, 1, 'h'])
self.assertRaises(UpperModel.DoesNotExist, sq.get)
# If we encapsulate the object as a value, the conversion is applied.
sq = UpperModel.select().where(UpperModel.name == Value('huey'))
self.assertSQL(sq, (
'SELECT "t1"."id", "t1"."name" FROM "upper_model" AS "t1" '
'WHERE ("t1"."name" = UPPER(?))'), ['huey'])
self.assertEqual(sq.get().id, um.id)
# Unless we explicitly pass converter=False.
sq = UpperModel.select().where(UpperModel.name == Value('huey', False))
self.assertSQL(sq, (
'SELECT "t1"."id", "t1"."name" FROM "upper_model" AS "t1" '
'WHERE ("t1"."name" = ?)'), ['huey'])
self.assertRaises(UpperModel.DoesNotExist, sq.get)
# If we specify explicit SQL on the rhs, the conversion is not applied.
sq = UpperModel.select().where(UpperModel.name == SQL('?', ['huey']))
self.assertSQL(sq, (
'SELECT "t1"."id", "t1"."name" FROM "upper_model" AS "t1" '
'WHERE ("t1"."name" = ?)'), ['huey'])
self.assertRaises(UpperModel.DoesNotExist, sq.get)
# Function arguments are not coerced.
sq = UpperModel.select().where(UpperModel.name == fn.LOWER('huey'))
self.assertSQL(sq, (
'SELECT "t1"."id", "t1"."name" FROM "upper_model" AS "t1" '
'WHERE ("t1"."name" = LOWER(?))'), ['huey'])
self.assertRaises(UpperModel.DoesNotExist, sq.get)
def test_value_conversion_query(self):
um = UpperModel.create(name='huey')
UM = UpperModel.alias()
subq = UM.select(UM.name).where(UM.name == 'huey')
# Select from WHERE ... IN <subquery>.
query = UpperModel.select().where(UpperModel.name.in_(subq))
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."name" FROM "upper_model" AS "t1" '
'WHERE ("t1"."name" IN ('
'SELECT "t2"."name" FROM "upper_model" AS "t2" '
'WHERE ("t2"."name" = UPPER(?))))'), ['huey'])
self.assertEqual(query.get().id, um.id)
# Join on sub-query.
query = (UpperModel
.select()
.join(subq, on=(UpperModel.name == subq.c.name)))
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."name" FROM "upper_model" AS "t1" '
'INNER JOIN (SELECT "t2"."name" FROM "upper_model" AS "t2" '
'WHERE ("t2"."name" = UPPER(?))) AS "t3" '
'ON ("t1"."name" = "t3"."name")'), ['huey'])
row = query.tuples().get()
self.assertEqual(row, (um.id, 'HUEY'))
def test_having_clause(self):
query = (UpperModel
.select(UpperModel.name, fn.COUNT(UpperModel.id).alias('ct'))
.group_by(UpperModel.name)
.having(UpperModel.name == 'huey'))
self.assertSQL(query, (
'SELECT "t1"."name", COUNT("t1"."id") AS "ct" '
'FROM "upper_model" AS "t1" '
'GROUP BY "t1"."name" '
'HAVING ("t1"."name" = UPPER(?))'), ['huey'])
| TestValueConversion |
python | milvus-io__pymilvus | pymilvus/client/abstract.py | {
"start": 14134,
"end": 14917
} | class ____(BaseRanker):
def __init__(self, *nums, norm_score: bool = True):
self._strategy = RANKER_TYPE_WEIGHTED
weights = []
for num in nums:
# isinstance(True, int) is True, thus we need to check bool first
if isinstance(num, bool) or not isinstance(num, (int, float)):
error_msg = f"Weight must be a number, got {type(num)}"
raise TypeError(error_msg)
weights.append(num)
self._weights = weights
self._norm_score = norm_score
def dict(self):
params = {
"weights": self._weights,
"norm_score": self._norm_score,
}
return {
"strategy": self._strategy,
"params": params,
}
| WeightedRanker |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_results.py | {
"start": 3601,
"end": 9030
} | class ____(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
cls.tables.denormalize_table = Table(
"denormalize_table",
metadata,
Column("id", Integer, primary_key=True),
Column("all_lowercase", Integer),
Column("ALL_UPPERCASE", Integer),
Column("MixedCase", Integer),
Column(quoted_name("all_lowercase_quoted", quote=True), Integer),
Column(quoted_name("ALL_UPPERCASE_QUOTED", quote=True), Integer),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.denormalize_table.insert(),
{
"id": 1,
"all_lowercase": 5,
"ALL_UPPERCASE": 6,
"MixedCase": 7,
"all_lowercase_quoted": 8,
"ALL_UPPERCASE_QUOTED": 9,
},
)
def _assert_row_mapping(self, row, mapping, include_cols=None):
eq_(row._mapping, mapping)
for k in mapping:
eq_(row._mapping[k], mapping[k])
eq_(getattr(row, k), mapping[k])
for idx, k in enumerate(mapping):
eq_(row[idx], mapping[k])
if include_cols:
for col, (idx, k) in zip(include_cols, enumerate(mapping)):
eq_(row._mapping[col], mapping[k])
@testing.variation(
"stmt_type", ["driver_sql", "text_star", "core_select", "text_cols"]
)
@testing.variation("use_driver_cols", [True, False])
def test_cols_driver_cols(self, connection, stmt_type, use_driver_cols):
if stmt_type.driver_sql or stmt_type.text_star or stmt_type.text_cols:
stmt = select("*").select_from(self.tables.denormalize_table)
text_stmt = str(stmt.compile(connection))
if stmt_type.text_star or stmt_type.text_cols:
stmt = text(text_stmt)
if stmt_type.text_cols:
stmt = stmt.columns(*self.tables.denormalize_table.c)
elif stmt_type.core_select:
stmt = select(self.tables.denormalize_table)
else:
stmt_type.fail()
if use_driver_cols:
execution_options = {"driver_column_names": True}
else:
execution_options = {}
if stmt_type.driver_sql:
row = connection.exec_driver_sql(
text_stmt, execution_options=execution_options
).one()
else:
row = connection.execute(
stmt,
execution_options=execution_options,
).one()
if (
stmt_type.core_select and not use_driver_cols
) or not testing.requires.denormalized_names.enabled:
self._assert_row_mapping(
row,
{
"id": 1,
"all_lowercase": 5,
"ALL_UPPERCASE": 6,
"MixedCase": 7,
"all_lowercase_quoted": 8,
"ALL_UPPERCASE_QUOTED": 9,
},
)
if testing.requires.denormalized_names.enabled:
# with driver column names, raw cursor.description
# is used. this is clearly not useful for non-quoted names.
if use_driver_cols:
self._assert_row_mapping(
row,
{
"ID": 1,
"ALL_LOWERCASE": 5,
"ALL_UPPERCASE": 6,
"MixedCase": 7,
"all_lowercase_quoted": 8,
"ALL_UPPERCASE_QUOTED": 9,
},
)
else:
if stmt_type.core_select or stmt_type.text_cols:
self._assert_row_mapping(
row,
{
"id": 1,
"all_lowercase": 5,
"ALL_UPPERCASE": 6,
"MixedCase": 7,
"all_lowercase_quoted": 8,
"ALL_UPPERCASE_QUOTED": 9,
},
include_cols=self.tables.denormalize_table.c,
)
else:
self._assert_row_mapping(
row,
{
"id": 1,
"all_lowercase": 5,
"all_uppercase": 6,
"MixedCase": 7,
"all_lowercase_quoted": 8,
"all_uppercase_quoted": 9,
},
include_cols=None,
)
else:
self._assert_row_mapping(
row,
{
"id": 1,
"all_lowercase": 5,
"ALL_UPPERCASE": 6,
"MixedCase": 7,
"all_lowercase_quoted": 8,
"ALL_UPPERCASE_QUOTED": 9,
},
include_cols=(
self.tables.denormalize_table.c
if stmt_type.core_select or stmt_type.text_cols
else None
),
)
| NameDenormalizeTest |
python | scipy__scipy | scipy/signal/tests/test_windows.py | {
"start": 21345,
"end": 22545
} | class ____:
def test_basic(self, xp):
xp_assert_close(windows.hann(6, sym=False, xp=xp),
xp.asarray([0, 0.25, 0.75, 1.0, 0.75, 0.25], dtype=xp.float64),
rtol=1e-15, atol=1e-15)
xp_assert_close(windows.hann(7, sym=False, xp=xp),
xp.asarray([0, 0.1882550990706332, 0.6112604669781572,
0.9504844339512095, 0.9504844339512095,
0.6112604669781572, 0.1882550990706332],
dtype=xp.float64),
rtol=1e-15, atol=1e-15)
xp_assert_close(windows.hann(6, True, xp=xp),
xp.asarray([0, 0.3454915028125263, 0.9045084971874737,
0.9045084971874737, 0.3454915028125263, 0],
dtype=xp.float64),
rtol=1e-15, atol=1e-15)
xp_assert_close(windows.hann(7, xp=xp),
xp.asarray([0, 0.25, 0.75, 1.0, 0.75, 0.25, 0],
dtype=xp.float64),
rtol=1e-15, atol=1e-15)
@make_xp_test_case(windows.kaiser)
| TestHann |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1257726,
"end": 1258537
} | class ____(sgqlc.types.Type, Node, AuditEntry, OrganizationAuditEntryData):
"""Audit log entry for a org.unblock_user"""
__schema__ = github_schema
__field_names__ = ("blocked_user", "blocked_user_name", "blocked_user_resource_path", "blocked_user_url")
blocked_user = sgqlc.types.Field("User", graphql_name="blockedUser")
"""The user being unblocked by the organization."""
blocked_user_name = sgqlc.types.Field(String, graphql_name="blockedUserName")
"""The username of the blocked user."""
blocked_user_resource_path = sgqlc.types.Field(URI, graphql_name="blockedUserResourcePath")
"""The HTTP path for the blocked user."""
blocked_user_url = sgqlc.types.Field(URI, graphql_name="blockedUserUrl")
"""The HTTP URL for the blocked user."""
| OrgUnblockUserAuditEntry |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.