language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pymupdf__PyMuPDF | src/__init__.py | {
"start": 646302,
"end": 646392
} | class ____(RuntimeError):
"""Raised if file does not exist."""
pass
| FileNotFoundError |
python | huggingface__transformers | src/transformers/models/qwen3_vl_moe/modeling_qwen3_vl_moe.py | {
"start": 11259,
"end": 14831
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Qwen3VLMoeTextConfig, layer_idx: int):
super().__init__()
self.layer_type = config.layer_types[layer_idx] if hasattr(config, "layer_types") else None
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.rotary_fn = apply_rotary_pos_emb
self.q_norm = Qwen3VLMoeTextRMSNorm(
self.head_dim, eps=config.rms_norm_eps
) # unlike olmo, only on the head dim!
self.k_norm = Qwen3VLMoeTextRMSNorm(
self.head_dim, eps=config.rms_norm_eps
) # thus post q_norm does not need reshape
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| Qwen3VLMoeTextAttention |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_exceptions.py | {
"start": 69827,
"end": 71204
} | class ____(__TestCase):
def test_attributes(self):
# Setting 'attr' should not be a problem.
exc = AttributeError('Ouch!')
self.assertIsNone(exc.name)
self.assertIsNone(exc.obj)
sentinel = object()
exc = AttributeError('Ouch', name='carry', obj=sentinel)
self.assertEqual(exc.name, 'carry')
self.assertIs(exc.obj, sentinel)
def test_getattr_has_name_and_obj(self):
with torch._dynamo.error_on_graph_break(False):
class A:
blech = None
obj = A()
try:
obj.bluch
except AttributeError as exc:
self.assertEqual("bluch", exc.name)
self.assertEqual(obj, exc.obj)
try:
object.__getattribute__(obj, "bluch")
except AttributeError as exc:
self.assertEqual("bluch", exc.name)
self.assertEqual(obj, exc.obj)
def test_getattr_has_name_and_obj_for_method(self):
with torch._dynamo.error_on_graph_break(False):
class A:
def blech(self):
return
obj = A()
try:
obj.bluch()
except AttributeError as exc:
self.assertEqual("bluch", exc.name)
self.assertEqual(obj, exc.obj)
# Note: name suggestion tests live in `test_traceback`.
| AttributeErrorTests |
python | getsentry__sentry | tests/sentry/sentry_apps/api/bases/test_sentryapps.py | {
"start": 773,
"end": 3870
} | class ____(TestCase):
def setUp(self) -> None:
self.permission = SentryAppPermission()
self.sentry_app = self.create_sentry_app(name="foo", organization=self.organization)
self.request = drf_request_from_request(self.make_request(user=self.user, method="GET"))
self.superuser = self.create_user(is_superuser=True)
def test_request_user_is_app_owner_succeeds(self) -> None:
assert self.permission.has_object_permission(self.request, APIView(), self.sentry_app)
def test_request_user_is_not_app_owner_fails(self) -> None:
non_owner = self.create_user()
self.request = drf_request_from_request(self.make_request(user=non_owner, method="GET"))
with pytest.raises(SentryAppError):
self.permission.has_object_permission(self.request, APIView(), self.sentry_app)
def test_has_permission(self) -> None:
from sentry.models.apitoken import ApiToken
token: ApiToken = ApiToken.objects.create(
user=self.user, scope_list=["event:read", "org:read"]
)
request = self.make_request(user=None, auth=token, method="GET")
# Need to set token here, else UserAuthTokenAuthentication won't be able to find it & fail auth
request.META["HTTP_AUTHORIZATION"] = f"Bearer {token.plaintext_token}"
self.request = drf_request_from_request(request)
assert self.permission.has_permission(self.request, APIView())
def test_superuser_has_permission(self) -> None:
request = drf_request_from_request(
self.make_request(user=self.superuser, method="GET", is_superuser=True)
)
assert self.permission.has_object_permission(request, APIView(), self.sentry_app)
request._request.method = "POST"
assert self.permission.has_object_permission(request, APIView(), self.sentry_app)
@override_options({"superuser.read-write.ga-rollout": True})
@override_settings(SENTRY_SELF_HOSTED=False)
def test_superuser_has_permission_read_only(self) -> None:
request = drf_request_from_request(
self.make_request(user=self.superuser, method="GET", is_superuser=True)
)
assert self.permission.has_object_permission(request, APIView(), self.sentry_app)
request._request.method = "POST"
with pytest.raises(SentryAppError):
self.permission.has_object_permission(request, APIView(), self.sentry_app)
@override_options({"superuser.read-write.ga-rollout": True})
@override_settings(SENTRY_SELF_HOSTED=False)
def test_superuser_has_permission_write(self) -> None:
self.add_user_permission(self.superuser, "superuser.write")
request = drf_request_from_request(
self.make_request(user=self.superuser, method="GET", is_superuser=True)
)
assert self.permission.has_object_permission(request, APIView(), self.sentry_app)
self.request._request.method = "POST"
self.permission.has_object_permission(request, APIView(), self.sentry_app)
@control_silo_test
| SentryAppPermissionTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1586797,
"end": 1587073
} | class ____(sgqlc.types.Union):
"""Represents either a repository the viewer can access or a
restricted contribution.
"""
__schema__ = github_schema
__types__ = (CreatedRepositoryContribution, RestrictedContribution)
| CreatedRepositoryOrRestrictedContribution |
python | kamyu104__LeetCode-Solutions | Python/count-integers-in-intervals.py | {
"start": 155,
"end": 1054
} | class ____(object):
def __init__(self):
self.__sl = SortedList()
self.__cnt = 0
def add(self, left, right):
"""
:type left: int
:type right: int
:rtype: None
"""
i = self.__sl.bisect_right((left,))
if i-1 >= 0 and self.__sl[i-1][1]+1 >= left:
i -= 1
left = self.__sl[i][0]
to_remove = []
for i in xrange(i, len(self.__sl)):
if not (right+1 >= self.__sl[i][0]):
break
right = max(right, self.__sl[i][1])
self.__cnt -= self.__sl[i][1]-self.__sl[i][0]+1
to_remove.append(i)
while to_remove:
del self.__sl[to_remove.pop()]
self.__sl.add((left, right))
self.__cnt += right-left+1
def count(self):
"""
:rtype: int
"""
return self.__cnt
| CountIntervals |
python | streamlit__streamlit | lib/streamlit/elements/widgets/multiselect.py | {
"start": 2283,
"end": 5559
} | class ____(Generic[T]):
options: Sequence[T]
formatted_options: list[str]
formatted_option_to_option_index: dict[str, int]
default_options_indices: list[int]
def __init__(
self,
options: Sequence[T],
*,
formatted_options: list[str],
formatted_option_to_option_index: dict[str, int],
default_options_indices: list[int] | None = None,
) -> None:
"""Initialize the MultiSelectSerde.
We do not store an option_to_formatted_option mapping because the generic
options might not be hashable, which would raise a RuntimeError. So we do
two lookups: option -> index -> formatted_option[index].
Parameters
----------
options : Sequence[T]
The sequence of selectable options.
formatted_options : list[str]
The string representations of each option. The formatted_options correspond
to the options sequence by index.
formatted_option_to_option_index : dict[str, int]
A mapping from formatted option strings to their corresponding indices in
the options sequence.
default_option_index : int or None, optional
The index of the default option to use when no selection is made.
If None, no default option is selected.
"""
self.options = options
self.formatted_options = formatted_options
self.formatted_option_to_option_index = formatted_option_to_option_index
self.default_options_indices = default_options_indices or []
def serialize(self, value: list[T | str] | list[T]) -> list[str]:
converted_value = convert_anything_to_list(value)
values: list[str] = []
for v in converted_value:
try:
option_index = self.options.index(v)
values.append(self.formatted_options[option_index])
except ValueError: # noqa: PERF203
# at this point we know that v is a string, otherwise
# it would have been found in the options
values.append(cast("str", v))
return values
def deserialize(self, ui_value: list[str] | None) -> list[T | str] | list[T]:
if ui_value is None:
return [self.options[i] for i in self.default_options_indices]
values: list[T | str] = []
for v in ui_value:
try:
option_index = self.formatted_options.index(v)
values.append(self.options[option_index])
except ValueError: # noqa: PERF203
values.append(v)
return values
def _get_default_count(default: Sequence[Any] | Any | None) -> int:
if default is None:
return 0
if not is_iterable(default):
return 1
return len(cast("Sequence[Any]", default))
def _check_max_selections(
selections: Sequence[Any] | Any | None, max_selections: int | None
) -> None:
if max_selections is None:
return
default_count = _get_default_count(selections)
if default_count > max_selections:
raise StreamlitSelectionCountExceedsMaxError(
current_selections_count=default_count, max_selections_count=max_selections
)
| MultiSelectSerde |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_mutating_admission_policy_list.py | {
"start": 383,
"end": 7317
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1beta1MutatingAdmissionPolicy]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1beta1MutatingAdmissionPolicyList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1beta1MutatingAdmissionPolicyList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1beta1MutatingAdmissionPolicyList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1beta1MutatingAdmissionPolicyList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1beta1MutatingAdmissionPolicyList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1beta1MutatingAdmissionPolicyList. # noqa: E501
List of ValidatingAdmissionPolicy. # noqa: E501
:return: The items of this V1beta1MutatingAdmissionPolicyList. # noqa: E501
:rtype: list[V1beta1MutatingAdmissionPolicy]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1beta1MutatingAdmissionPolicyList.
List of ValidatingAdmissionPolicy. # noqa: E501
:param items: The items of this V1beta1MutatingAdmissionPolicyList. # noqa: E501
:type: list[V1beta1MutatingAdmissionPolicy]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1beta1MutatingAdmissionPolicyList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1beta1MutatingAdmissionPolicyList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1beta1MutatingAdmissionPolicyList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1beta1MutatingAdmissionPolicyList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1beta1MutatingAdmissionPolicyList. # noqa: E501
:return: The metadata of this V1beta1MutatingAdmissionPolicyList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1beta1MutatingAdmissionPolicyList.
:param metadata: The metadata of this V1beta1MutatingAdmissionPolicyList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1MutatingAdmissionPolicyList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1MutatingAdmissionPolicyList):
return True
return self.to_dict() != other.to_dict()
| V1beta1MutatingAdmissionPolicyList |
python | pypa__warehouse | warehouse/accounts/utils.py | {
"start": 437,
"end": 1581
} | class ____:
"""
This class supports `MacaroonSecurityPolicy` in
`warehouse.macaroons.security_policy`.
It is a wrapper containing both a user associated with an authenticated request
and an optional corresponding Macaroon, if the authentication was via API token.
If the request was authenticated via login session, `macaroon` should be None.
"""
user: User
"""
The associated user.
"""
macaroon: Macaroon | None
"""
The Macaroon associated to the API token used to authenticate, if token
authentication was used.
"""
def __principals__(self) -> list[str]:
return self.user.__principals__()
def update_email_domain_status(email: Email, request: Request) -> None:
"""
Update the domain status of the given email address.
"""
domain_status_service = request.find_service(IDomainStatusService)
if domain_status := domain_status_service.get_domain_status(email.domain):
email.domain_last_checked = datetime.datetime.now(datetime.UTC)
email.domain_last_status = domain_status
request.db.add(email)
return None
| UserContext |
python | google__jax | tests/mosaic/gpu_test.py | {
"start": 192973,
"end": 200002
} | class ____(Sm90ATestCase, jtu.JaxTestCase):
@parameterized.product(
swizzle=tuple(mgpu_dialect.SwizzlingMode),
transpose_lhs=(False, True),
transpose_rhs=(False, True),
lhs_in_registers=(False, True),
)
def test_wgmma_kernel_with_tma(
self, swizzle, transpose_lhs, transpose_rhs, lhs_in_registers
):
if swizzle == mgpu_dialect.SwizzlingMode.kNoSwizzle:
self.skipTest("No swizzle is not supported by wgmma")
if transpose_lhs and lhs_in_registers:
self.skipTest("The A operand can only be transposed if it is in SMEM.")
swizzle_elems = swizzle // np.dtype(jnp.bfloat16).itemsize
tiling_m, tiling_n, tiling_k = 64, swizzle_elems, swizzle_elems
groups_m, groups_n, groups_k = 4, 1, 1
m, n, k = groups_m * tiling_m, groups_n * tiling_n, groups_k * tiling_k
lhs_shape = (k, m) if transpose_lhs else (m, k)
rhs_shape = (n, k) if transpose_rhs else (k, n)
out_shape = (m, n)
def matmul(
ctx: launch_context.LaunchContext,
lhs_gmem_ref: ir.Value,
rhs_gmem_ref: ir.Value,
result_gmem_ref: ir.Value,
smem: list[ir.Value],
):
del ctx
lhs_smem_ref, rhs_smem_ref, result_smem_ref, tma_barrier = smem
operand_elt_type = ir.MemRefType(lhs_gmem_ref.type).element_type
bytes_a = utils.bytewidth(operand_elt_type) * math.prod(lhs_shape)
bytes_b = utils.bytewidth(operand_elt_type) * math.prod(rhs_shape)
# GMEM -> SMEM
zero_i32 = arith.constant(ir.IntegerType.get_signless(32), 0)
tma_barrier.arrive_expect_tx(bytes_a + bytes_b)
mgpu_dialect.async_load(
source=lhs_gmem_ref,
destination=lhs_smem_ref,
barrier=tma_barrier.as_barrier_memref(),
indices=[zero_i32] * len(lhs_shape),
slice_lengths=lhs_shape,
collective=ir.ArrayAttr.get([]),
)
mgpu_dialect.async_load(
source=rhs_gmem_ref,
destination=rhs_smem_ref,
barrier=tma_barrier.as_barrier_memref(),
indices=[zero_i32] * len(rhs_shape),
slice_lengths=rhs_shape,
collective=ir.ArrayAttr.get([]),
)
tma_barrier.wait()
# Computation
shape_result = ir.MemRefType(result_gmem_ref.type).shape
result_elt_type = ir.MemRefType(result_gmem_ref.type).element_type
acc_elt_type = ir.F32Type.get()
acc_type = ir.VectorType.get(shape_result, acc_elt_type)
zero_acc = arith.constant(
result_elt_type, ir.FloatAttr.get(acc_elt_type, 0.0)
)
accumulator = vector.broadcast(acc_type, zero_acc)
if transpose_lhs:
lhs_smem_ref = utils.memref_transpose(lhs_smem_ref, (1, 0))
if transpose_rhs:
rhs_smem_ref = utils.memref_transpose(rhs_smem_ref, (1, 0))
if lhs_in_registers:
# SMEM -> Registers
lhs_operand = mgpu_dialect.vector_load(lhs_smem_ref)
else:
lhs_operand = lhs_smem_ref
result = mgpu_dialect.wgmma(
accumulator,
lhs_operand,
rhs_smem_ref,
)
nvvm.wgmma_commit_group_sync_aligned()
nvvm.wgmma_wait_group_sync_aligned(0)
# Registers -> SMEM
mgpu_dialect.vector_store(result, result_smem_ref)
# SMEM -> GMEM
mgpu_dialect.async_store(
source=result_smem_ref,
destination=result_gmem_ref,
indices=[zero_i32, zero_i32],
slice_lengths=shape_result,
)
nvvm.cp_async_bulk_wait_group(0)
operand_type = jnp.bfloat16
acctype = jnp.float32
lhs_jax_shape = jax.ShapeDtypeStruct(lhs_shape, operand_type)
rhs_jax_shape = jax.ShapeDtypeStruct(rhs_shape, operand_type)
result_jax_shape = jax.ShapeDtypeStruct(out_shape, acctype)
kernel = mgpu.as_gpu_kernel(
matmul,
grid=(1, 1, 1),
block=(128, 1, 1),
in_shape=(lhs_jax_shape, rhs_jax_shape),
out_shape=result_jax_shape,
smem_scratch_shape=[
lhs_jax_shape,
rhs_jax_shape,
result_jax_shape,
core.TMABarrier(1),
],
thread_semantics=mgpu.LoweringSemantics.Warpgroup,
)
prng_key = jax.random.key(1234)
k0, k1 = jax.random.split(prng_key, 2)
x = jax.random.randint(k0, lhs_shape, 0, 2).astype(operand_type)
y = jax.random.randint(k1, rhs_shape, 0, 2).astype(operand_type)
transpose = lambda x, t: x.T if t else x
self.assertArraysAllClose(
kernel(x, y),
np.matmul(transpose(x, transpose_lhs), transpose(y, transpose_rhs)),
atol=0,
rtol=0,
)
@parameterized.product(
dtype=(jnp.int8, jnp.uint8),
lhs_in_smem=(False, True),
)
def test_integer_wgmma(self, dtype, lhs_in_smem):
m, k, n = 64, 128, 64
def body(ctx, lhs_gmem, rhs_gmem, result_gmem, scratch):
del ctx
lhs_smem, rhs_smem, tma_barrier = scratch
i32 = ir.IntegerType.get_signless(32)
zero = arith.constant(i32, 0)
tma_barrier.arrive_expect_tx(m * k + k * n)
mgpu_dialect.async_load(
source=lhs_gmem,
destination=lhs_smem,
barrier=tma_barrier.as_barrier_memref(),
indices=[zero, zero],
slice_lengths=lhs_smem.type.shape,
collective=ir.ArrayAttr.get([]),
)
mgpu_dialect.async_load(
source=rhs_gmem,
destination=rhs_smem,
barrier=tma_barrier.as_barrier_memref(),
indices=[zero, zero],
slice_lengths=rhs_smem.type.shape,
collective=ir.ArrayAttr.get([]),
)
tma_barrier.wait()
acc_type = ir.VectorType.get((m, n), i32)
acc = vector.broadcast(acc_type, zero)
lhs = lhs_smem if lhs_in_smem else mgpu_dialect.vector_load(lhs_smem)
# Only f16 WGMMA supports transposes
rhs_smem = utils.memref_transpose(rhs_smem, (1, 0))
result = mgpu_dialect.wgmma(acc, lhs, rhs_smem)
nvvm.wgmma_commit_group_sync_aligned()
nvvm.wgmma_wait_group_sync_aligned(0)
mgpu_dialect.vector_store(result, result_gmem)
kernel = mgpu.as_gpu_kernel(
body,
grid=(1, 1, 1),
block=(128, 1, 1),
in_shape=(
jax.ShapeDtypeStruct((m, k), dtype),
jax.ShapeDtypeStruct((n, k), dtype),
),
out_shape=jax.ShapeDtypeStruct((m, n), jnp.int32),
smem_scratch_shape=[
jax.ShapeDtypeStruct((m, k), dtype),
jax.ShapeDtypeStruct((n, k), dtype),
core.TMABarrier(1),
],
thread_semantics=mgpu.LoweringSemantics.Warpgroup,
)
# Use small values to avoid overflow, [0, 8) for u8 and (-8, 8) for s8.
is_signed = jnp.issubdtype(dtype, jnp.signedinteger)
low, high = (-8, 8) if is_signed else (0, 8)
lhs = self.prng.uniform(low, high, (m, k)).astype(dtype)
rhs = self.prng.uniform(low, high, (n, k)).astype(dtype)
self.assertArraysEqual(
kernel(lhs, rhs),
np.matmul(lhs.astype(jnp.int32), rhs.astype(jnp.int32).T),
)
| MosaicGpuDialectSm90ATest |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataflow.py | {
"start": 46647,
"end": 50078
} | class ____(GoogleCloudBaseOperator):
"""
Runs a Dataflow Data Pipeline.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataflowRunPipelineOperator`
:param pipeline_name: The display name of the pipeline. In example
projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID it would be the PIPELINE_ID.
:param project_id: The ID of the GCP project that owns the job.
:param location: The location to direct the Data Pipelines instance to (for example us-central1).
:param gcp_conn_id: The connection ID to connect to the Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
Returns the created Job in JSON representation.
"""
operator_extra_links = (DataflowJobLink(),)
def __init__(
self,
pipeline_name: str,
project_id: str = PROVIDE_PROJECT_ID,
location: str = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.pipeline_name = pipeline_name
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.dataflow_hook: DataflowHook | None = None
def execute(self, context: Context):
self.dataflow_hook = DataflowHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
if self.pipeline_name is None:
raise AirflowException("Data Pipeline name not given; cannot run unspecified pipeline.")
if self.project_id is None:
raise AirflowException("Data Pipeline Project ID not given; cannot run pipeline.")
if self.location is None:
raise AirflowException("Data Pipeline location not given; cannot run pipeline.")
try:
self.job = self.dataflow_hook.run_data_pipeline(
pipeline_name=self.pipeline_name,
project_id=self.project_id,
location=self.location,
)["job"]
job_id = self.dataflow_hook.extract_job_id(self.job)
context["task_instance"].xcom_push(key="job_id", value=job_id)
DataflowJobLink.persist(
context=context, project_id=self.project_id, region=self.location, job_id=job_id
)
except HttpError as e:
if e.resp.status == 404:
raise AirflowException("Pipeline with given name was not found.")
except Exception as exc:
raise AirflowException("Error occurred when running Pipeline: %s", exc)
return self.job
| DataflowRunPipelineOperator |
python | charliermarsh__ruff | crates/ruff_server/resources/test/fixtures/pandas_html.py | {
"start": 16735,
"end": 20405
} | class ____(_HtmlFrameParser):
"""
HTML to DataFrame parser that uses BeautifulSoup under the hood.
See Also
--------
pandas.io.html._HtmlFrameParser
pandas.io.html._LxmlFrameParser
Notes
-----
Documentation strings for this class are in the base class
:class:`pandas.io.html._HtmlFrameParser`.
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
from bs4 import SoupStrainer
self._strainer = SoupStrainer("table")
def _parse_tables(self, document, match, attrs):
element_name = self._strainer.name
tables = document.find_all(element_name, attrs=attrs)
if not tables:
raise ValueError("No tables found")
result = []
unique_tables = set()
tables = self._handle_hidden_tables(tables, "attrs")
for table in tables:
if self.displayed_only:
for elem in table.find_all("style"):
elem.decompose()
for elem in table.find_all(style=re.compile(r"display:\s*none")):
elem.decompose()
if table not in unique_tables and table.find(string=match) is not None:
result.append(table)
unique_tables.add(table)
if not result:
raise ValueError(f"No tables found matching pattern {match.pattern!r}")
return result
def _href_getter(self, obj) -> str | None:
a = obj.find("a", href=True)
return None if not a else a["href"]
def _text_getter(self, obj):
return obj.text
def _equals_tag(self, obj, tag) -> bool:
return obj.name == tag
def _parse_td(self, row):
return row.find_all(("td", "th"), recursive=False)
def _parse_thead_tr(self, table):
return table.select("thead tr")
def _parse_tbody_tr(self, table):
from_tbody = table.select("tbody tr")
from_root = table.find_all("tr", recursive=False)
# HTML spec: at most one of these lists has content
return from_tbody + from_root
def _parse_tfoot_tr(self, table):
return table.select("tfoot tr")
def _setup_build_doc(self):
raw_text = _read(self.io, self.encoding, self.storage_options)
if not raw_text:
raise ValueError(f"No text parsed from document: {self.io}")
return raw_text
def _build_doc(self):
from bs4 import BeautifulSoup
bdoc = self._setup_build_doc()
if isinstance(bdoc, bytes) and self.encoding is not None:
udoc = bdoc.decode(self.encoding)
from_encoding = None
else:
udoc = bdoc
from_encoding = self.encoding
soup = BeautifulSoup(udoc, features="html5lib", from_encoding=from_encoding)
for br in soup.find_all("br"):
br.replace_with("\n" + br.text)
return soup
def _build_xpath_expr(attrs) -> str:
"""
Build an xpath expression to simulate bs4's ability to pass in kwargs to
search for attributes when using the lxml parser.
Parameters
----------
attrs : dict
A dict of HTML attributes. These are NOT checked for validity.
Returns
-------
expr : unicode
An XPath expression that checks for the given HTML attributes.
"""
# give class attribute as class_ because class is a python keyword
if "class_" in attrs:
attrs["class"] = attrs.pop("class_")
s = " and ".join([f"@{k}={v!r}" for k, v in attrs.items()])
return f"[{s}]"
_re_namespace = {"re": "http://exslt.org/regular-expressions"}
| _BeautifulSoupHtml5LibFrameParser |
python | scipy__scipy | scipy/optimize/tests/test_chandrupatla.py | {
"start": 5280,
"end": 20348
} | class ____:
def f(self, x, loc):
xp = array_namespace(x, loc)
res = -xp.exp(-1/2 * (x-loc)**2) / (2*xp.pi)**0.5
return xp.asarray(res, dtype=x.dtype)[()]
@pytest.mark.parametrize('dtype', ('float32', 'float64'))
@pytest.mark.parametrize('loc', [0.6, np.linspace(-1.05, 1.05, 10)])
def test_basic(self, loc, xp, dtype):
# Find mode of normal distribution. Compare mode against location
# parameter and value of pdf at mode against expected pdf.
rtol = {'float32': 5e-3, 'float64': 5e-7}[dtype]
dtype = getattr(xp, dtype)
bracket = (xp.asarray(xi, dtype=dtype) for xi in (-5, 0, 5))
loc = xp.asarray(loc, dtype=dtype)
fun = xp.broadcast_to(xp.asarray(-stats.norm.pdf(0), dtype=dtype), loc.shape)
res = _chandrupatla_minimize(self.f, *bracket, args=(loc,))
xp_assert_close(res.x, loc, rtol=rtol)
xp_assert_equal(res.fun, fun)
@pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)])
def test_vectorization(self, shape, xp):
# Test for correct functionality, output shapes, and dtypes for various
# input shapes.
loc = xp.linspace(-0.05, 1.05, 12).reshape(shape) if shape else xp.asarray(0.6)
args = (loc,)
bracket = xp.asarray(-5.), xp.asarray(0.), xp.asarray(5.)
@_vectorize(xp)
def chandrupatla_single(loc_single):
return _chandrupatla_minimize(self.f, *bracket, args=(loc_single,))
def f(*args, **kwargs):
f.f_evals += 1
return self.f(*args, **kwargs)
f.f_evals = 0
res = _chandrupatla_minimize(f, *bracket, args=args)
refs = chandrupatla_single(loc)
attrs = ['x', 'fun', 'success', 'status', 'nfev', 'nit',
'xl', 'xm', 'xr', 'fl', 'fm', 'fr']
for attr in attrs:
ref_attr = xp.stack([getattr(ref, attr) for ref in refs])
res_attr = xp_ravel(getattr(res, attr))
xp_assert_equal(res_attr, ref_attr)
assert getattr(res, attr).shape == shape
xp_assert_equal(res.fun, self.f(res.x, *args))
xp_assert_equal(res.fl, self.f(res.xl, *args))
xp_assert_equal(res.fm, self.f(res.xm, *args))
xp_assert_equal(res.fr, self.f(res.xr, *args))
assert xp.max(res.nfev) == f.f_evals
assert xp.max(res.nit) == f.f_evals - 3
assert xp.isdtype(res.success.dtype, 'bool')
assert xp.isdtype(res.status.dtype, 'integral')
assert xp.isdtype(res.nfev.dtype, 'integral')
assert xp.isdtype(res.nit.dtype, 'integral')
def test_flags(self, xp):
# Test cases that should produce different status flags; show that all
# can be produced simultaneously.
def f(xs, js):
funcs = [lambda x: (x - 2.5) ** 2,
lambda x: x - 10,
lambda x: (x - 2.5) ** 4,
lambda x: xp.full_like(x, xp.asarray(xp.nan))]
res = []
for i in range(xp_size(js)):
x = xs[i, ...]
j = int(xp_ravel(js)[i])
res.append(funcs[j](x))
return xp.stack(res)
args = (xp.arange(4, dtype=xp.int64),)
bracket = (xp.asarray([0]*4, dtype=xp.float64),
xp.asarray([2]*4, dtype=xp.float64),
xp.asarray([np.pi]*4, dtype=xp.float64))
res = _chandrupatla_minimize(f, *bracket, args=args, maxiter=10)
ref_flags = xp.asarray([eim._ECONVERGED, eim._ESIGNERR, eim._ECONVERR,
eim._EVALUEERR], dtype=xp.int32)
xp_assert_equal(res.status, ref_flags)
def test_convergence(self, xp):
# Test that the convergence tolerances behave as expected
rng = np.random.default_rng(2585255913088665241)
p = xp.asarray(rng.random(size=3))
bracket = (xp.asarray(-5, dtype=xp.float64), xp.asarray(0), xp.asarray(5))
args = (p,)
kwargs0 = dict(args=args, xatol=0, xrtol=0, fatol=0, frtol=0)
kwargs = kwargs0.copy()
kwargs['xatol'] = 1e-3
res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
j1 = xp.abs(res1.xr - res1.xl)
tol = xp.asarray(4*kwargs['xatol'], dtype=p.dtype)
xp_assert_less(j1, xp.full((3,), tol, dtype=p.dtype))
kwargs['xatol'] = 1e-6
res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
j2 = xp.abs(res2.xr - res2.xl)
tol = xp.asarray(4*kwargs['xatol'], dtype=p.dtype)
xp_assert_less(j2, xp.full((3,), tol, dtype=p.dtype))
xp_assert_less(j2, j1)
kwargs = kwargs0.copy()
kwargs['xrtol'] = 1e-3
res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
j1 = xp.abs(res1.xr - res1.xl)
tol = xp.asarray(4*kwargs['xrtol']*xp.abs(res1.x), dtype=p.dtype)
xp_assert_less(j1, tol)
kwargs['xrtol'] = 1e-6
res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
j2 = xp.abs(res2.xr - res2.xl)
tol = xp.asarray(4*kwargs['xrtol']*xp.abs(res2.x), dtype=p.dtype)
xp_assert_less(j2, tol)
xp_assert_less(j2, j1)
kwargs = kwargs0.copy()
kwargs['fatol'] = 1e-3
res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
h1 = xp.abs(res1.fl - 2 * res1.fm + res1.fr)
tol = xp.asarray(2*kwargs['fatol'], dtype=p.dtype)
xp_assert_less(h1, xp.full((3,), tol, dtype=p.dtype))
kwargs['fatol'] = 1e-6
res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
h2 = xp.abs(res2.fl - 2 * res2.fm + res2.fr)
tol = xp.asarray(2*kwargs['fatol'], dtype=p.dtype)
xp_assert_less(h2, xp.full((3,), tol, dtype=p.dtype))
xp_assert_less(h2, h1)
kwargs = kwargs0.copy()
kwargs['frtol'] = 1e-3
res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
h1 = xp.abs(res1.fl - 2 * res1.fm + res1.fr)
tol = xp.asarray(2*kwargs['frtol']*xp.abs(res1.fun), dtype=p.dtype)
xp_assert_less(h1, tol)
kwargs['frtol'] = 1e-6
res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
h2 = xp.abs(res2.fl - 2 * res2.fm + res2.fr)
tol = xp.asarray(2*kwargs['frtol']*abs(res2.fun), dtype=p.dtype)
xp_assert_less(h2, tol)
xp_assert_less(h2, h1)
def test_maxiter_callback(self, xp):
# Test behavior of `maxiter` parameter and `callback` interface
loc = xp.asarray(0.612814)
bracket = (xp.asarray(-5), xp.asarray(0), xp.asarray(5))
maxiter = 5
res = _chandrupatla_minimize(self.f, *bracket, args=(loc,),
maxiter=maxiter)
assert not xp.any(res.success)
assert xp.all(res.nfev == maxiter+3)
assert xp.all(res.nit == maxiter)
def callback(res):
callback.iter += 1
callback.res = res
assert hasattr(res, 'x')
if callback.iter == 0:
# callback is called once with initial bracket
assert (res.xl, res.xm, res.xr) == bracket
else:
changed_xr = (res.xl == callback.xl) & (res.xr != callback.xr)
changed_xl = (res.xl != callback.xl) & (res.xr == callback.xr)
assert xp.all(changed_xr | changed_xl)
callback.xl = res.xl
callback.xr = res.xr
assert res.status == eim._EINPROGRESS
xp_assert_equal(self.f(res.xl, loc), res.fl)
xp_assert_equal(self.f(res.xm, loc), res.fm)
xp_assert_equal(self.f(res.xr, loc), res.fr)
xp_assert_equal(self.f(res.x, loc), res.fun)
if callback.iter == maxiter:
raise StopIteration
callback.xl = xp.nan
callback.xr = xp.nan
callback.iter = -1 # callback called once before first iteration
callback.res = None
res2 = _chandrupatla_minimize(self.f, *bracket, args=(loc,),
callback=callback)
# terminating with callback is identical to terminating due to maxiter
# (except for `status`)
for key in res.keys():
if key == 'status':
assert res[key] == eim._ECONVERR
# assert callback.res[key] == eim._EINPROGRESS
assert res2[key] == eim._ECALLBACK
else:
assert res2[key] == callback.res[key] == res[key]
@pytest.mark.parametrize('case', cases)
def test_nit_expected(self, case, xp):
# Test that `_chandrupatla` implements Chandrupatla's algorithm:
# in all 55 test cases, the number of iterations performed
# matches the number reported in the original paper.
func, x1, nit = case
# Find bracket using the algorithm in the paper
step = 0.2
x2 = x1 + step
x1, x2, x3, f1, f2, f3 = _bracket_minimum(func, x1, x2)
# Use tolerances from original paper
xatol = 0.0001
fatol = 0.000001
xrtol = 1e-16
frtol = 1e-16
bracket = xp.asarray(x1), xp.asarray(x2), xp.asarray(x3, dtype=xp.float64)
res = _chandrupatla_minimize(func, *bracket, xatol=xatol,
fatol=fatol, xrtol=xrtol, frtol=frtol)
xp_assert_equal(res.nit, xp.asarray(nit, dtype=xp.int32))
@pytest.mark.parametrize("loc", (0.65, [0.65, 0.7]))
@pytest.mark.parametrize("dtype", ('float16', 'float32', 'float64'))
def test_dtype(self, loc, dtype, xp):
# Test that dtypes are preserved
dtype = getattr(xp, dtype)
loc = xp.asarray(loc, dtype=dtype)
bracket = (xp.asarray(-3, dtype=dtype),
xp.asarray(1, dtype=dtype),
xp.asarray(5, dtype=dtype))
def f(x, loc):
assert x.dtype == dtype
return xp.astype((x - loc)**2, dtype)
res = _chandrupatla_minimize(f, *bracket, args=(loc,))
assert res.x.dtype == dtype
xp_assert_close(res.x, loc, rtol=math.sqrt(xp.finfo(dtype).eps))
def test_input_validation(self, xp):
# Test input validation for appropriate error messages
message = '`func` must be callable.'
bracket = xp.asarray(-4), xp.asarray(0), xp.asarray(4)
with pytest.raises(ValueError, match=message):
_chandrupatla_minimize(None, *bracket)
message = 'Abscissae and function output must be real numbers.'
bracket = xp.asarray(-4 + 1j), xp.asarray(0), xp.asarray(4)
with pytest.raises(ValueError, match=message):
_chandrupatla_minimize(lambda x: x, *bracket)
message = "...be broadcast..."
bracket = xp.asarray([-2, -3]), xp.asarray([0, 0]), xp.asarray([3, 4, 5])
# raised by `np.broadcast, but the traceback is readable IMO
with pytest.raises((ValueError, RuntimeError), match=message):
_chandrupatla_minimize(lambda x: x, *bracket)
message = "The shape of the array returned by `func` must be the same"
bracket = xp.asarray([-3, -3]), xp.asarray([0, 0]), xp.asarray([5, 5])
with pytest.raises(ValueError, match=message):
_chandrupatla_minimize(lambda x: [x[0, ...], x[1, ...], x[1, ...]],
*bracket)
message = 'Tolerances must be non-negative scalars.'
bracket = xp.asarray(-4), xp.asarray(0), xp.asarray(4)
with pytest.raises(ValueError, match=message):
_chandrupatla_minimize(lambda x: x, *bracket, xatol=-1)
with pytest.raises(ValueError, match=message):
_chandrupatla_minimize(lambda x: x, *bracket, xrtol=xp.nan)
with pytest.raises(ValueError, match=message):
_chandrupatla_minimize(lambda x: x, *bracket, fatol='ekki')
with pytest.raises(ValueError, match=message):
_chandrupatla_minimize(lambda x: x, *bracket, frtol=xp.nan)
message = '`maxiter` must be a non-negative integer.'
with pytest.raises(ValueError, match=message):
_chandrupatla_minimize(lambda x: x, *bracket, maxiter=1.5)
with pytest.raises(ValueError, match=message):
_chandrupatla_minimize(lambda x: x, *bracket, maxiter=-1)
message = '`callback` must be callable.'
with pytest.raises(ValueError, match=message):
_chandrupatla_minimize(lambda x: x, *bracket, callback='shrubbery')
def test_bracket_order(self, xp):
# Confirm that order of points in bracket doesn't
loc = xp.linspace(-1, 1, 6)[:, xp.newaxis]
brackets = xp.asarray(list(permutations([-5, 0, 5]))).T
res = _chandrupatla_minimize(self.f, *brackets, args=(loc,))
assert xp.all(xpx.isclose(res.x, loc) | (res.fun == self.f(loc, loc)))
ref = res.x[:, 0] # all columns should be the same
xp_assert_close(*xp.broadcast_arrays(res.x.T, ref), rtol=1e-15)
def test_special_cases(self, xp):
# Test edge cases and other special cases
# Test that integers are not passed to `f`
def f(x):
assert xp.isdtype(x.dtype, "real floating")
return (x - 1)**2
bracket = xp.asarray(-7), xp.asarray(0), xp.asarray(8)
with np.errstate(invalid='ignore'):
res = _chandrupatla_minimize(f, *bracket, fatol=0, frtol=0)
assert res.success
xp_assert_close(res.x, xp.asarray(1.), rtol=1e-3)
xp_assert_close(res.fun, xp.asarray(0.), atol=1e-200)
# Test that if all elements of bracket equal minimizer, algorithm
# reports convergence
def f(x):
return (x-1)**2
bracket = xp.asarray(1), xp.asarray(1), xp.asarray(1)
res = _chandrupatla_minimize(f, *bracket)
assert res.success
xp_assert_equal(res.x, xp.asarray(1.))
# Test maxiter = 0. Should do nothing to bracket.
def f(x):
return (x-1)**2
bracket = xp.asarray(-3), xp.asarray(1.1), xp.asarray(5)
res = _chandrupatla_minimize(f, *bracket, maxiter=0)
assert res.xl, res.xr == bracket
assert res.nit == 0
assert res.nfev == 3
assert res.status == -2
assert res.x == 1.1 # best so far
# Test scalar `args` (not in tuple)
def f(x, c):
return (x-c)**2 - 1
bracket = xp.asarray(-1), xp.asarray(0), xp.asarray(1)
c = xp.asarray(1/3)
res = _chandrupatla_minimize(f, *bracket, args=(c,))
xp_assert_close(res.x, c)
# Test zero tolerances
def f(x):
return -xp.sin(x)
bracket = xp.asarray(0), xp.asarray(1), xp.asarray(xp.pi)
res = _chandrupatla_minimize(f, *bracket, xatol=0, xrtol=0, fatol=0, frtol=0)
assert res.success
# found a minimum exactly (according to floating point arithmetic)
assert res.xl < res.xm < res.xr
assert f(res.xl) == f(res.xm) == f(res.xr)
@make_xp_test_case(find_root)
| TestChandrupatlaMinimize |
python | openai__openai-python | src/openai/_module_client.py | {
"start": 3072,
"end": 3215
} | class ____(LazyProxy["Containers"]):
@override
def __load__(self) -> Containers:
return _load_client().containers
| ContainersProxy |
python | huggingface__transformers | src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py | {
"start": 133324,
"end": 133920
} | class ____(nn.Module):
def __init__(self, codec_num_embeds, codec_dim, repeats):
super().__init__()
self.repeats = repeats
self.codec_embed = nn.Embedding(codec_num_embeds + 1, codec_dim)
def forward(self, code, drop_code=False):
if drop_code:
code = torch.zeros_like(code)
code_embed = self.codec_embed(code)
code_embed = torch.repeat_interleave(code_embed, repeats=self.repeats, dim=1)
return code_embed
# AdaLayerNormZero
# return with modulated x for attn input, and params for later mlp modulation
| DiTCodecEmbedding |
python | pandas-dev__pandas | asv_bench/benchmarks/io/csv.py | {
"start": 13563,
"end": 14485
} | class ____(StringIORewind):
params = ["c", "python"]
param_names = ["engine"]
def setup(self, engine):
data = """{},19:00:00,18:56:00,0.8100,2.8100,7.2000,0.0000,280.0000\n
{},20:00:00,19:56:00,0.0100,2.2100,7.2000,0.0000,260.0000\n
{},21:00:00,20:56:00,-0.5900,2.2100,5.7000,0.0000,280.0000\n
{},21:00:00,21:18:00,-0.9900,2.0100,3.6000,0.0000,270.0000\n
{},22:00:00,21:56:00,-0.5900,1.7100,5.1000,0.0000,290.0000\n
"""
two_cols = ["KORD,19990127"] * 5
data = data.format(*two_cols)
self.StringIO_input = StringIO(data)
def time_baseline(self, engine):
read_csv(
self.data(self.StringIO_input),
engine=engine,
sep=",",
header=None,
parse_dates=[1],
names=list(string.digits[:9]),
)
| ReadCSVParseDates |
python | doocs__leetcode | solution/1700-1799/1799.Maximize Score After N Operations/Solution.py | {
"start": 0,
"end": 735
} | class ____:
def maxScore(self, nums: List[int]) -> int:
m = len(nums)
f = [0] * (1 << m)
g = [[0] * m for _ in range(m)]
for i in range(m):
for j in range(i + 1, m):
g[i][j] = gcd(nums[i], nums[j])
for k in range(1 << m):
if (cnt := k.bit_count()) % 2 == 0:
for i in range(m):
if k >> i & 1:
for j in range(i + 1, m):
if k >> j & 1:
f[k] = max(
f[k],
f[k ^ (1 << i) ^ (1 << j)] + cnt // 2 * g[i][j],
)
return f[-1]
| Solution |
python | pola-rs__polars | py-polars/src/polars/io/iceberg/_utils.py | {
"start": 17751,
"end": 18680
} | class ____(abc.ABC):
def __init__(self, polars_dtype: pl.DataType) -> None:
self.polars_dtype = polars_dtype
@staticmethod
def init_for_field_type(
current_field_type: IcebergType,
# All types that this field ID has been set to across schema changes.
all_field_types: set[IcebergType],
field_polars_dtype: pl.DataType,
) -> LoadFromBytesImpl | None:
if (v := _bytes_loader_lookup().get(type(current_field_type))) is None:
return None
loader_impl, allowed_field_types = v
return (
loader_impl(field_polars_dtype)
if all(isinstance(x, allowed_field_types) for x in all_field_types) # type: ignore[arg-type]
else None
)
@abc.abstractmethod
def load_from_bytes(self, byte_values: list[bytes | None]) -> pl.Series:
"""`bytes_values` should be of binary type."""
| LoadFromBytesImpl |
python | numba__numba | numba/core/caching.py | {
"start": 25899,
"end": 26855
} | class ____(Cache):
"""
Implements Cache that saves and loads CompileResult objects.
"""
_impl_class = CompileResultCacheImpl
# Remember used cache filename prefixes.
_lib_cache_prefixes = set([''])
def make_library_cache(prefix):
"""
Create a Cache class for additional compilation features to cache their
result for reuse. The cache is saved in filename pattern like
in ``FunctionCache`` but with additional *prefix* as specified.
"""
# avoid cache prefix reuse
assert prefix not in _lib_cache_prefixes
_lib_cache_prefixes.add(prefix)
class CustomCodeLibraryCacheImpl(CodeLibraryCacheImpl):
_filename_prefix = prefix
class LibraryCache(Cache):
"""
Implements Cache that saves and loads CodeLibrary objects for additional
feature for the specified python function.
"""
_impl_class = CustomCodeLibraryCacheImpl
return LibraryCache
| FunctionCache |
python | readthedocs__readthedocs.org | readthedocs/subscriptions/views.py | {
"start": 6812,
"end": 8273
} | class ____(OrganizationMixin, GenericView):
"""Create a stripe billing portal session for the user to manage their subscription."""
http_method_names = ["post"]
def get_success_url(self):
return reverse(
"subscription_detail",
args=[self.get_organization().slug],
)
def post(self, request, *args, **kwargs):
"""Redirect the user to the Stripe billing portal."""
stripe_client = get_stripe_client()
organization = self.get_organization()
stripe_customer = organization.stripe_customer
return_url = request.build_absolute_uri(self.get_success_url())
try:
billing_portal = stripe_client.billing_portal.sessions.create(
params={
"customer": stripe_customer.id,
"return_url": return_url,
}
)
return HttpResponseRedirect(billing_portal.url)
except: # noqa
log.exception(
"There was an error connecting to Stripe to create the billing portal session.",
stripe_customer=stripe_customer.id,
organization_slug=organization.slug,
)
messages.error(
request,
_("There was an error connecting to Stripe, please try again in a few minutes"),
)
return HttpResponseRedirect(self.get_success_url())
| StripeCustomerPortal |
python | getsentry__sentry | src/sentry/notifications/platform/target.py | {
"start": 730,
"end": 865
} | class ____(StrEnum):
GENERIC = "generic"
INTEGRATION = "integration"
@dataclass(kw_only=True, frozen=True)
| NotificationTargetType |
python | getsentry__sentry-python | sentry_sdk/utils.py | {
"start": 7865,
"end": 7927
} | class ____(ValueError):
"""Raised on invalid DSNs."""
| BadDsn |
python | doocs__leetcode | solution/3600-3699/3644.Maximum K to Sort a Permutation/Solution.py | {
"start": 0,
"end": 200
} | class ____:
def sortPermutation(self, nums: List[int]) -> int:
ans = -1
for i, x in enumerate(nums):
if i != x:
ans &= x
return max(ans, 0)
| Solution |
python | huggingface__transformers | src/transformers/models/electra/modeling_electra.py | {
"start": 9395,
"end": 12964
} | class ____(nn.Module):
def __init__(self, config, is_causal=False, layer_idx=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.scaling = self.attention_head_size**-0.5
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.is_causal = is_causal
self.layer_idx = layer_idx
def forward(
self,
hidden_states: torch.Tensor,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[EncoderDecoderCache] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
# determine input shapes
bsz, tgt_len = hidden_states.shape[:-1]
src_len = encoder_hidden_states.shape[1]
q_input_shape = (bsz, tgt_len, -1, self.attention_head_size)
kv_input_shape = (bsz, src_len, -1, self.attention_head_size)
# get query proj
query_layer = self.query(hidden_states).view(*q_input_shape).transpose(1, 2)
is_updated = past_key_values.is_updated.get(self.layer_idx) if past_key_values is not None else False
if past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_layer = past_key_values.cross_attention_cache.layers[self.layer_idx].keys
value_layer = past_key_values.cross_attention_cache.layers[self.layer_idx].values
else:
key_layer = self.key(encoder_hidden_states).view(*kv_input_shape).transpose(1, 2)
value_layer = self.value(encoder_hidden_states).view(*kv_input_shape).transpose(1, 2)
if past_key_values is not None:
# save all states to the cache
key_layer, value_layer = past_key_values.cross_attention_cache.update(
key_layer, value_layer, self.layer_idx
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
past_key_values.is_updated[self.layer_idx] = True
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_layer,
key_layer,
value_layer,
attention_mask,
dropout=0.0 if not self.training else self.dropout.p,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
return attn_output, attn_weights
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
| ElectraCrossAttention |
python | has2k1__plotnine | plotnine/themes/theme_gray.py | {
"start": 5226,
"end": 5265
} | class ____(theme_gray):
pass
| theme_grey |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/closure.py | {
"start": 6143,
"end": 7375
} | class ____(Generic[T]):
...
V = List[GenericClass[str]]
P = ParamSpec('P')
def decorator(function: Callable[P, Awaitable[V]]) -> Callable[P, Awaitable[V]]:
async def wrapper(*args: P.args, **kwargs: P.kwargs) -> V:
return await function(*args, **kwargs)
return wrapper
def ignored_decorator(function: Callable[P, Awaitable[V]]) -> Callable[P, Awaitable[V]]:
async def wrapper(*args: P.args, **kwargs: P.kwargs) -> V:
return await function(*args, **kwargs)
return wrapper
async def async_tito(function: Callable[[], Awaitable[T]]) -> T:
return await function()
async def async_nonlocal_closure_tito_with_decorator(x: GenericClass[str], y: GenericClass[str]):
z = _test_source()
# TODO(T171117938): Decorator support for tito closure
@decorator
async def inner() -> List[GenericClass[str]]:
return [x, y, z]
result = await async_tito(inner)
_test_sink(result)
async def async_nonlocal_closure_tito_ignore_decorator(x: GenericClass[str], y: GenericClass[str]):
z = _test_source()
@ignored_decorator
async def inner() -> List[GenericClass[str]]:
return [x, y, z]
result = await async_tito(inner)
_test_sink(result)
| GenericClass |
python | run-llama__llama_index | llama-index-core/tests/tools/test_eval_query_engine_tool.py | {
"start": 1190,
"end": 2995
} | class ____(IsolatedAsyncioTestCase):
def setUp(self) -> None:
self.mock_evaluator = MockEvaluator()
self.mock_evaluator.aevaluate = AsyncMock()
self.mock_evaluator.aevaluate.return_value = EvaluationResult(passing=True)
tool_name = "nice_tool"
self.tool_input = "hello world"
self.expected_content = f"custom_{self.tool_input}"
self.expected_tool_output = ToolOutput(
content=self.expected_content,
raw_input={"input": self.tool_input},
raw_output=Response(
response=self.expected_content,
source_nodes=[],
),
tool_name=tool_name,
)
self.eval_query_engine_tool = EvalQueryEngineTool.from_defaults(
MockQueryEngine(), evaluator=self.mock_evaluator, name=tool_name
)
def test_eval_query_engine_tool_with_eval_passing(self) -> None:
"""Test eval query engine tool with evaluation passing."""
tool_output = self.eval_query_engine_tool(self.tool_input)
self.assertEqual(self.expected_tool_output, tool_output)
def test_eval_query_engine_tool_with_eval_failing(self) -> None:
"""Test eval query engine tool with evaluation failing."""
evaluation_feedback = "The context does not provide a relevant answer."
self.mock_evaluator.aevaluate.return_value = EvaluationResult(
passing=False, feedback=evaluation_feedback
)
self.expected_tool_output.content = (
"Could not use tool nice_tool because it failed evaluation.\n"
f"Reason: {evaluation_feedback}"
)
tool_output = self.eval_query_engine_tool(self.tool_input)
self.assertEqual(self.expected_tool_output, tool_output)
| TestEvalQueryEngineTool |
python | conda__conda | conda/models/environment.py | {
"start": 6996,
"end": 21527
} | class ____:
"""
**Experimental** While experimental, expect both major and minor changes across minor releases.
Data model for a conda environment.
"""
#: The platform this environment may be installed on (required)
platform: str
#: Environment level configuration, eg. channels, solver options, etc.
#: TODO: may need to think more about the type of this field and how
#: conda should be merging configs between environments
config: EnvironmentConfig = field(default_factory=EnvironmentConfig)
#: Map of other package types that conda can install. For example pypi packages.
external_packages: dict[str, list[str]] = field(default_factory=dict)
#: The complete list of specs for the environment.
#: eg. after a solve, or from an explicit environment spec
explicit_packages: list[PackageRecord] = field(default_factory=list)
#: Environment name
name: str | None = None
#: Prefix the environment is installed into.
prefix: str | None = None
#: User requested specs for this environment.
requested_packages: list[MatchSpec] = field(default_factory=list)
#: Environment variables to be applied to the environment.
variables: dict[str, str] = field(default_factory=dict)
# Virtual packages for the environment. Either the default ones provided by
# the virtual_packages plugins or the overrides captured by CONDA_OVERRIDE_*.
virtual_packages: list[PackageRecord] = field(default_factory=list)
def __post_init__(self):
# an environment must have a platform
if not self.platform:
raise CondaValueError("'Environment' needs a 'platform'.")
# ensure the platform is valid
if self.platform not in PLATFORMS:
raise CondaValueError(
f"Invalid platform '{self.platform}'. Valid platforms are {PLATFORMS}."
)
# ensure there are no duplicate packages in explicit_packages
if len(self.explicit_packages) > 1 and len(
set(pkg.name for pkg in self.explicit_packages)
) != len(self.explicit_packages):
raise CondaValueError("Duplicate packages found in 'explicit_packages'.")
# ensure requested_packages matches one (and only one) explicit package
if len(self.requested_packages) > 0 and len(self.explicit_packages) > 0:
explicit_package_names = set(pkg.name for pkg in self.explicit_packages)
for requested_package in self.requested_packages:
if requested_package.name not in explicit_package_names:
raise CondaValueError(
f"Requested package '{requested_package}' is not found in 'explicit_packages'."
)
@classmethod
def merge(cls, *environments):
"""
**Experimental** While experimental, expect both major and minor changes across minor releases.
Merges multiple environments into a single environment following the rules:
* Keeps first name and/or prefix.
* Concatenates and deduplicates requirements.
* Reduces configuration and variables (last key wins).
"""
name = None
prefix = None
platform = None
names = [env.name for env in environments if env.name]
prefixes = [env.prefix for env in environments if env.prefix]
if names:
name = names[0]
if len(names) > 1:
log.debug("Several names passed %s. Picking first one %s", names, name)
if prefixes:
prefix = prefixes[0]
if len(prefixes) > 1:
log.debug(
"Several prefixes passed %s. Picking first one %s", prefixes, prefix
)
platforms = [env.platform for env in environments if env.platform]
# Ensure that all environments have the same platform
if len(set(platforms)) == 1:
platform = platforms[0]
else:
raise CondaValueError(
"Conda can not merge environments of different platforms. "
f"Received environments with platforms: {platforms}"
)
requested_packages = list(
dict.fromkeys(
requirement
for env in environments
for requirement in env.requested_packages
)
)
explicit_packages = list(
dict.fromkeys(
requirement
for env in environments
for requirement in env.explicit_packages
)
)
virtual_packages = list(
dict.fromkeys(
virtual_package
for env in environments
for virtual_package in env.virtual_packages
)
)
variables = {k: v for env in environments for (k, v) in env.variables.items()}
external_packages = {}
for env in environments:
# External packages map values are always lists of strings. So,
# we'll want to concatenate each list.
for k, v in env.external_packages.items():
if k in external_packages:
for val in v:
if val not in external_packages[k]:
external_packages[k].append(val)
elif isinstance(v, list):
external_packages[k] = v
config = EnvironmentConfig.merge(
*[env.config for env in environments if env.config is not None]
)
return cls(
config=config,
external_packages=external_packages,
explicit_packages=explicit_packages,
name=name,
platform=platform,
prefix=prefix,
requested_packages=requested_packages,
variables=variables,
virtual_packages=virtual_packages,
)
@classmethod
def from_prefix(
cls,
prefix: str,
name: str,
platform: str,
*,
from_history: bool = False,
no_builds: bool = False,
ignore_channels: bool = False,
channels: list[str] | None = None,
) -> Environment:
"""
Create an Environment model from an existing conda prefix.
This method analyzes an installed conda environment and creates
an Environment model that can be used for exporting or other operations.
:param prefix: Path to the conda environment prefix
:param name: Name for the environment
:param platform: Target platform (e.g., 'linux-64', 'osx-64')
:param from_history: Use explicit specs from history instead of installed packages
:param no_builds: Exclude build strings from package specs
:param ignore_channels: Don't include channel information in package specs
:return: Environment model representing the prefix
"""
prefix_data = PrefixData(prefix, interoperability=True)
variables = prefix_data.get_environment_env_vars()
# Build requested packages and external packages
requested_packages = []
external_packages = {}
# Handle --from-history case
if from_history:
requested_packages = cls.from_history(prefix)
conda_precs = [] # No conda packages to process for channel extraction
else:
# Use PrefixData's package extraction methods
conda_precs = prefix_data.get_conda_packages()
python_precs = prefix_data.get_python_packages()
# Create MatchSpecs for conda packages
for conda_prec in conda_precs:
spec_str = conda_prec.spec_no_build if no_builds else conda_prec.spec
if (
not ignore_channels
and conda_prec.channel
and conda_prec.channel.name
):
spec_str = f"{conda_prec.channel.name}::{spec_str}"
requested_packages.append(MatchSpec(spec_str))
# Add pip dependencies to external_packages if any exist
if python_precs:
# Create pip dependencies list matching current conda format
python_deps = [
f"{python_prec.name}=={python_prec.version}"
for python_prec in python_precs
]
external_packages["pip"] = python_deps
# Always populate explicit_packages from prefix data (for explicit export format)
explicit_packages = list(prefix_data.iter_records())
# Build channels tuple
environment_channels = tuple(channels or ())
# Inject channels from installed conda packages (unless ignoring channels)
# This applies regardless of override_channels setting
if not ignore_channels:
environment_channels = (
*(
canonical_name
# Reuse conda_precs instead of calling get_conda_packages() again
for conda_package in conda_precs
if (canonical_name := conda_package.channel.canonical_name)
!= UNKNOWN_CHANNEL
),
*environment_channels,
)
# Channels tuple is a unique ordered sequence
environment_channels = tuple(dict.fromkeys(environment_channels))
# Create environment config with comprehensive context settings
config = EnvironmentConfig.from_context()
# Override/set channels with those extracted from installed packages if any were found
config = replace(config, channels=environment_channels)
return cls(
prefix=prefix,
platform=platform,
name=name,
config=config,
variables=variables,
external_packages=external_packages,
requested_packages=requested_packages,
explicit_packages=explicit_packages,
)
@classmethod
def from_cli(
cls,
args: Namespace,
add_default_packages: bool = False,
) -> Environment:
"""
Create an Environment model from command-line arguments.
This method will parse command-line arguments and create an
Environment object. This includes: reading files provided as
cli arguments, and pulling EnvironmentConfig from the context.
:param args: argparse Namespace containing command-line arguments
:return: An Environment object representing the cli
"""
specs = [package.strip("\"'") for package in args.packages]
requested_packages = []
fetch_explicit_packages = []
# extract specs from files
# TODO: This should be replaced with reading files using the
# environment spec plugin. The core conda cli commands are not
# ready for that yet. So, use this old way of reading specs from
# files.
for fpath in args.file:
try:
specs.extend(
[spec for spec in specs_from_url(fpath) if spec != EXPLICIT_MARKER]
)
except UnicodeError:
raise CondaError(
"Error reading file, file should be a text file containing packages\n"
"See `conda create --help` for details."
)
# Add default packages if required. If the default package is already
# present in the list of specs, don't add it (this will override any
# version constraint from the default package).
if add_default_packages:
names = {MatchSpec(spec).name for spec in specs}
for default_package in context.create_default_packages:
if MatchSpec(default_package).name not in names:
specs.append(default_package)
for spec in specs:
if (match_spec := MatchSpec(spec)).get("url"):
fetch_explicit_packages.append(spec)
else:
requested_packages.append(match_spec)
# transform explicit packages into package records
explicit_packages = []
if fetch_explicit_packages:
if len(fetch_explicit_packages) == len(specs):
explicit_packages = get_package_records_from_explicit(
fetch_explicit_packages
)
else:
raise CondaValueError(
"Cannot mix specifications with conda package filenames"
)
return Environment(
name=args.name,
prefix=context.target_prefix,
platform=context.subdir,
requested_packages=requested_packages,
explicit_packages=explicit_packages,
config=EnvironmentConfig.from_context(),
)
@staticmethod
def from_history(prefix: PathType) -> list[MatchSpec]:
history = History(prefix)
spec_map = history.get_requested_specs_map()
# Get MatchSpec objects from history; they'll be serialized to bracket format later
return list(spec_map.values())
def extrapolate(self, platform: str) -> Environment:
"""
Given the current environment, extrapolate the environment for the given platform.
"""
if platform == self.platform:
return self
from ..cli.install import Repodatas
solver_backend = context.plugin_manager.get_cached_solver_backend()
requested_packages = self.from_history(self.prefix)
for repodata_manager in Repodatas(self.config.repodata_fns, {}):
with repodata_manager as repodata_fn:
solver = solver_backend(
prefix="/env/does/not/exist",
channels=context.channels,
subdirs=(platform, "noarch"),
specs_to_add=requested_packages,
repodata_fn=repodata_fn,
command="create",
)
explicit_packages = solver.solve_final_state()
return Environment(
prefix=self.prefix,
name=self.name,
platform=platform,
config=EnvironmentConfig.from_context(),
requested_packages=requested_packages,
explicit_packages=explicit_packages,
external_packages=self.external_packages,
)
| Environment |
python | django__django | tests/admin_filters/tests.py | {
"start": 9883,
"end": 83510
} | class ____(TestCase):
request_factory = RequestFactory()
@classmethod
def setUpTestData(cls):
cls.today = datetime.date.today()
cls.tomorrow = cls.today + datetime.timedelta(days=1)
cls.one_week_ago = cls.today - datetime.timedelta(days=7)
if cls.today.month == 12:
cls.next_month = cls.today.replace(year=cls.today.year + 1, month=1, day=1)
else:
cls.next_month = cls.today.replace(month=cls.today.month + 1, day=1)
cls.next_year = cls.today.replace(year=cls.today.year + 1, month=1, day=1)
# Users
cls.alfred = User.objects.create_superuser(
"alfred", "alfred@example.com", "password"
)
cls.bob = User.objects.create_user("bob", "bob@example.com")
cls.lisa = User.objects.create_user("lisa", "lisa@example.com")
# Departments
cls.dev = Department.objects.create(code="DEV", description="Development")
cls.design = Department.objects.create(code="DSN", description="Design")
# Employees
cls.john = Employee.objects.create(name="John Blue", department=cls.dev)
cls.jack = Employee.objects.create(name="Jack Red", department=cls.design)
# Books
cls.djangonaut_book = Book.objects.create(
title="Djangonaut: an art of living",
year=2009,
author=cls.alfred,
is_best_seller=True,
date_registered=cls.today,
availability=True,
category="non-fiction",
employee=cls.john,
)
cls.bio_book = Book.objects.create(
title="Django: a biography",
year=1999,
author=cls.alfred,
is_best_seller=False,
no=207,
availability=False,
category="fiction",
employee=cls.john,
)
cls.django_book = Book.objects.create(
title="The Django Book",
year=None,
author=cls.bob,
is_best_seller=None,
date_registered=cls.today,
no=103,
availability=True,
employee=cls.jack,
)
cls.guitar_book = Book.objects.create(
title="Guitar for dummies",
year=2002,
is_best_seller=True,
date_registered=cls.one_week_ago,
availability=None,
category="",
)
cls.guitar_book.contributors.set([cls.bob, cls.lisa])
def assertChoicesDisplay(self, choices, expected_displays):
for choice, expected_display in zip(choices, expected_displays, strict=True):
self.assertEqual(choice["display"], expected_display)
def test_choicesfieldlistfilter_has_none_choice(self):
"""
The last choice is for the None value.
"""
class BookmarkChoicesAdmin(ModelAdmin):
list_display = ["none_or_null"]
list_filter = ["none_or_null"]
modeladmin = BookmarkChoicesAdmin(Bookmark, site)
request = self.request_factory.get("/", {})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]["display"], "None")
self.assertEqual(choices[-1]["query_string"], "?none_or_null__isnull=True")
def test_datefieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get("/")
request.user = self.alfred
changelist = modeladmin.get_changelist(request)
request = self.request_factory.get(
"/",
{"date_registered__gte": self.today, "date_registered__lt": self.tomorrow},
)
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(filterspec.title, "date registered")
choice = select_by(filterspec.choices(changelist), "display", "Today")
self.assertIs(choice["selected"], True)
self.assertEqual(
choice["query_string"],
"?date_registered__gte=%s&date_registered__lt=%s"
% (
self.today,
self.tomorrow,
),
)
request = self.request_factory.get(
"/",
{
"date_registered__gte": self.today.replace(day=1),
"date_registered__lt": self.next_month,
},
)
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
if (self.today.year, self.today.month) == (
self.one_week_ago.year,
self.one_week_ago.month,
):
# In case one week ago is in the same month.
self.assertEqual(
list(queryset),
[self.guitar_book, self.django_book, self.djangonaut_book],
)
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(filterspec.title, "date registered")
choice = select_by(filterspec.choices(changelist), "display", "This month")
self.assertIs(choice["selected"], True)
self.assertEqual(
choice["query_string"],
"?date_registered__gte=%s&date_registered__lt=%s"
% (
self.today.replace(day=1),
self.next_month,
),
)
request = self.request_factory.get(
"/",
{
"date_registered__gte": self.today.replace(month=1, day=1),
"date_registered__lt": self.next_year,
},
)
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
if self.today.year == self.one_week_ago.year:
# In case one week ago is in the same year.
self.assertEqual(
list(queryset),
[self.guitar_book, self.django_book, self.djangonaut_book],
)
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(filterspec.title, "date registered")
choice = select_by(filterspec.choices(changelist), "display", "This year")
self.assertIs(choice["selected"], True)
self.assertEqual(
choice["query_string"],
"?date_registered__gte=%s&date_registered__lt=%s"
% (
self.today.replace(month=1, day=1),
self.next_year,
),
)
request = self.request_factory.get(
"/",
{
"date_registered__gte": str(self.one_week_ago),
"date_registered__lt": str(self.tomorrow),
},
)
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(
list(queryset), [self.guitar_book, self.django_book, self.djangonaut_book]
)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(filterspec.title, "date registered")
choice = select_by(filterspec.choices(changelist), "display", "Past 7 days")
self.assertIs(choice["selected"], True)
self.assertEqual(
choice["query_string"],
"?date_registered__gte=%s&date_registered__lt=%s"
% (
str(self.one_week_ago),
str(self.tomorrow),
),
)
# Null/not null queries
request = self.request_factory.get("/", {"date_registered__isnull": "True"})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(queryset.count(), 1)
self.assertEqual(queryset[0], self.bio_book)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(filterspec.title, "date registered")
choice = select_by(filterspec.choices(changelist), "display", "No date")
self.assertIs(choice["selected"], True)
self.assertEqual(choice["query_string"], "?date_registered__isnull=True")
request = self.request_factory.get("/", {"date_registered__isnull": "False"})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(queryset.count(), 3)
self.assertEqual(
list(queryset), [self.guitar_book, self.django_book, self.djangonaut_book]
)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(filterspec.title, "date registered")
choice = select_by(filterspec.choices(changelist), "display", "Has date")
self.assertIs(choice["selected"], True)
self.assertEqual(choice["query_string"], "?date_registered__isnull=False")
@unittest.skipIf(
sys.platform == "win32",
"Windows doesn't support setting a timezone that differs from the "
"system timezone.",
)
@override_settings(USE_TZ=True)
def test_datefieldlistfilter_with_time_zone_support(self):
# Regression for #17830
self.test_datefieldlistfilter()
def test_allvaluesfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get("/", {"year__isnull": "True"})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, "year")
choices = list(filterspec.choices(changelist))
self.assertIs(choices[-1]["selected"], True)
self.assertEqual(choices[-1]["query_string"], "?year__isnull=True")
request = self.request_factory.get("/", {"year": "2002"})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, "year")
choices = list(filterspec.choices(changelist))
self.assertIs(choices[2]["selected"], True)
self.assertEqual(choices[2]["query_string"], "?year=2002")
def test_allvaluesfieldlistfilter_custom_qs(self):
# Make sure that correct filters are returned with custom querysets
modeladmin = BookAdminWithCustomQueryset(self.alfred, Book, site)
request = self.request_factory.get("/")
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
choices = list(filterspec.choices(changelist))
# Should have 'All', 1999 and 2009 options i.e. the subset of years of
# books written by alfred (which is the filtering criteria set by
# BookAdminWithCustomQueryset.get_queryset())
self.assertEqual(3, len(choices))
self.assertEqual(choices[0]["query_string"], "?")
self.assertEqual(choices[1]["query_string"], "?year=1999")
self.assertEqual(choices[2]["query_string"], "?year=2009")
def test_relatedfieldlistfilter_foreignkey(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get("/")
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure that all users are present in the author's list filter
filterspec = changelist.get_filters(request)[0][1]
expected = [
(self.alfred.pk, "alfred"),
(self.bob.pk, "bob"),
(self.lisa.pk, "lisa"),
]
self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected))
request = self.request_factory.get("/", {"author__isnull": "True"})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.guitar_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, "Verbose Author")
choices = list(filterspec.choices(changelist))
self.assertIs(choices[-1]["selected"], True)
self.assertEqual(choices[-1]["query_string"], "?author__isnull=True")
request = self.request_factory.get("/", {"author__id__exact": self.alfred.pk})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, "Verbose Author")
# order of choices depends on User model, which has no order
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertIs(choice["selected"], True)
self.assertEqual(
choice["query_string"], "?author__id__exact=%d" % self.alfred.pk
)
def test_relatedfieldlistfilter_foreignkey_ordering(self):
"""RelatedFieldListFilter ordering respects ModelAdmin.ordering."""
class EmployeeAdminWithOrdering(ModelAdmin):
ordering = ("name",)
class BookAdmin(ModelAdmin):
list_filter = ("employee",)
site.register(Employee, EmployeeAdminWithOrdering)
self.addCleanup(lambda: site.unregister(Employee))
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get("/")
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
expected = [(self.jack.pk, "Jack Red"), (self.john.pk, "John Blue")]
self.assertEqual(filterspec.lookup_choices, expected)
def test_relatedfieldlistfilter_foreignkey_ordering_reverse(self):
class EmployeeAdminWithOrdering(ModelAdmin):
ordering = ("-name",)
class BookAdmin(ModelAdmin):
list_filter = ("employee",)
site.register(Employee, EmployeeAdminWithOrdering)
self.addCleanup(lambda: site.unregister(Employee))
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get("/")
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
expected = [(self.john.pk, "John Blue"), (self.jack.pk, "Jack Red")]
self.assertEqual(filterspec.lookup_choices, expected)
def test_relatedfieldlistfilter_foreignkey_default_ordering(self):
"""RelatedFieldListFilter ordering respects Model.ordering."""
class BookAdmin(ModelAdmin):
list_filter = ("employee",)
self.addCleanup(setattr, Employee._meta, "ordering", Employee._meta.ordering)
Employee._meta.ordering = ("name",)
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get("/")
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
expected = [(self.jack.pk, "Jack Red"), (self.john.pk, "John Blue")]
self.assertEqual(filterspec.lookup_choices, expected)
def test_relatedfieldlistfilter_manytomany(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get("/")
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure that all users are present in the contrib's list filter
filterspec = changelist.get_filters(request)[0][2]
expected = [
(self.alfred.pk, "alfred"),
(self.bob.pk, "bob"),
(self.lisa.pk, "lisa"),
]
self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected))
request = self.request_factory.get("/", {"contributors__isnull": "True"})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(
list(queryset), [self.django_book, self.bio_book, self.djangonaut_book]
)
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEqual(filterspec.title, "Verbose Contributors")
choices = list(filterspec.choices(changelist))
self.assertIs(choices[-1]["selected"], True)
self.assertEqual(choices[-1]["query_string"], "?contributors__isnull=True")
request = self.request_factory.get(
"/", {"contributors__id__exact": self.bob.pk}
)
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEqual(filterspec.title, "Verbose Contributors")
choice = select_by(filterspec.choices(changelist), "display", "bob")
self.assertIs(choice["selected"], True)
self.assertEqual(
choice["query_string"], "?contributors__id__exact=%d" % self.bob.pk
)
def test_relatedfieldlistfilter_reverse_relationships(self):
modeladmin = CustomUserAdmin(User, site)
# FK relationship -----
request = self.request_factory.get("/", {"books_authored__isnull": "True"})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.lisa])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, "book")
choices = list(filterspec.choices(changelist))
self.assertIs(choices[-1]["selected"], True)
self.assertEqual(choices[-1]["query_string"], "?books_authored__isnull=True")
request = self.request_factory.get(
"/", {"books_authored__id__exact": self.bio_book.pk}
)
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, "book")
choice = select_by(
filterspec.choices(changelist), "display", self.bio_book.title
)
self.assertIs(choice["selected"], True)
self.assertEqual(
choice["query_string"], "?books_authored__id__exact=%d" % self.bio_book.pk
)
# M2M relationship -----
request = self.request_factory.get("/", {"books_contributed__isnull": "True"})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.alfred])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, "book")
choices = list(filterspec.choices(changelist))
self.assertIs(choices[-1]["selected"], True)
self.assertEqual(choices[-1]["query_string"], "?books_contributed__isnull=True")
request = self.request_factory.get(
"/", {"books_contributed__id__exact": self.django_book.pk}
)
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, "book")
choice = select_by(
filterspec.choices(changelist), "display", self.django_book.title
)
self.assertIs(choice["selected"], True)
self.assertEqual(
choice["query_string"],
"?books_contributed__id__exact=%d" % self.django_book.pk,
)
# With one book, the list filter should appear because there is also a
# (None) option.
Book.objects.exclude(pk=self.djangonaut_book.pk).delete()
filterspec = changelist.get_filters(request)[0]
self.assertEqual(len(filterspec), 2)
# With no books remaining, no list filters should appear.
Book.objects.all().delete()
filterspec = changelist.get_filters(request)[0]
self.assertEqual(len(filterspec), 0)
def test_relatedfieldlistfilter_reverse_relationships_default_ordering(self):
self.addCleanup(setattr, Book._meta, "ordering", Book._meta.ordering)
Book._meta.ordering = ("title",)
modeladmin = CustomUserAdmin(User, site)
request = self.request_factory.get("/")
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
expected = [
(self.bio_book.pk, "Django: a biography"),
(self.djangonaut_book.pk, "Djangonaut: an art of living"),
(self.guitar_book.pk, "Guitar for dummies"),
(self.django_book.pk, "The Django Book"),
]
self.assertEqual(filterspec.lookup_choices, expected)
def test_relatedonlyfieldlistfilter_foreignkey(self):
modeladmin = BookAdminRelatedOnlyFilter(Book, site)
request = self.request_factory.get("/")
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure that only actual authors are present in author's list
# filter
filterspec = changelist.get_filters(request)[0][4]
expected = [(self.alfred.pk, "alfred"), (self.bob.pk, "bob")]
self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected))
def test_relatedonlyfieldlistfilter_foreignkey_reverse_relationships(self):
class EmployeeAdminReverseRelationship(ModelAdmin):
list_filter = (("book", RelatedOnlyFieldListFilter),)
self.djangonaut_book.employee = self.john
self.djangonaut_book.save()
self.django_book.employee = self.jack
self.django_book.save()
modeladmin = EmployeeAdminReverseRelationship(Employee, site)
request = self.request_factory.get("/")
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
self.assertCountEqual(
filterspec.lookup_choices,
[
(self.djangonaut_book.pk, "Djangonaut: an art of living"),
(self.bio_book.pk, "Django: a biography"),
(self.django_book.pk, "The Django Book"),
],
)
def test_relatedonlyfieldlistfilter_manytomany_reverse_relationships(self):
class UserAdminReverseRelationship(ModelAdmin):
list_filter = (("books_contributed", RelatedOnlyFieldListFilter),)
modeladmin = UserAdminReverseRelationship(User, site)
request = self.request_factory.get("/")
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(
filterspec.lookup_choices,
[(self.guitar_book.pk, "Guitar for dummies")],
)
def test_relatedonlyfieldlistfilter_foreignkey_ordering(self):
"""RelatedOnlyFieldListFilter ordering respects ModelAdmin.ordering."""
class EmployeeAdminWithOrdering(ModelAdmin):
ordering = ("name",)
class BookAdmin(ModelAdmin):
list_filter = (("employee", RelatedOnlyFieldListFilter),)
albert = Employee.objects.create(name="Albert Green", department=self.dev)
self.djangonaut_book.employee = albert
self.djangonaut_book.save()
self.bio_book.employee = self.jack
self.bio_book.save()
site.register(Employee, EmployeeAdminWithOrdering)
self.addCleanup(lambda: site.unregister(Employee))
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get("/")
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
expected = [(albert.pk, "Albert Green"), (self.jack.pk, "Jack Red")]
self.assertEqual(filterspec.lookup_choices, expected)
def test_relatedonlyfieldlistfilter_foreignkey_default_ordering(self):
"""RelatedOnlyFieldListFilter ordering respects Meta.ordering."""
class BookAdmin(ModelAdmin):
list_filter = (("employee", RelatedOnlyFieldListFilter),)
albert = Employee.objects.create(name="Albert Green", department=self.dev)
self.djangonaut_book.employee = albert
self.djangonaut_book.save()
self.bio_book.employee = self.jack
self.bio_book.save()
self.addCleanup(setattr, Employee._meta, "ordering", Employee._meta.ordering)
Employee._meta.ordering = ("name",)
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get("/")
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
expected = [(albert.pk, "Albert Green"), (self.jack.pk, "Jack Red")]
self.assertEqual(filterspec.lookup_choices, expected)
def test_relatedonlyfieldlistfilter_underscorelookup_foreignkey(self):
Department.objects.create(code="TEST", description="Testing")
self.djangonaut_book.employee = self.john
self.djangonaut_book.save()
self.bio_book.employee = self.jack
self.bio_book.save()
modeladmin = BookAdminRelatedOnlyFilter(Book, site)
request = self.request_factory.get("/")
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Only actual departments should be present in employee__department's
# list filter.
filterspec = changelist.get_filters(request)[0][6]
expected = [
(self.dev.code, str(self.dev)),
(self.design.code, str(self.design)),
]
self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected))
def test_relatedonlyfieldlistfilter_manytomany(self):
modeladmin = BookAdminRelatedOnlyFilter(Book, site)
request = self.request_factory.get("/")
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure that only actual contributors are present in contrib's list
# filter
filterspec = changelist.get_filters(request)[0][5]
expected = [(self.bob.pk, "bob"), (self.lisa.pk, "lisa")]
self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected))
def test_listfilter_genericrelation(self):
django_bookmark = Bookmark.objects.create(url="https://www.djangoproject.com/")
python_bookmark = Bookmark.objects.create(url="https://www.python.org/")
kernel_bookmark = Bookmark.objects.create(url="https://www.kernel.org/")
TaggedItem.objects.create(content_object=django_bookmark, tag="python")
TaggedItem.objects.create(content_object=python_bookmark, tag="python")
TaggedItem.objects.create(content_object=kernel_bookmark, tag="linux")
modeladmin = BookmarkAdminGenericRelation(Bookmark, site)
request = self.request_factory.get("/", {"tags__tag": "python"})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
queryset = changelist.get_queryset(request)
expected = [python_bookmark, django_bookmark]
self.assertEqual(list(queryset), expected)
def test_booleanfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def test_booleanfieldlistfilter_tuple(self):
modeladmin = BookAdminWithTupleBooleanFilter(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def verify_booleanfieldlistfilter(self, modeladmin):
request = self.request_factory.get("/")
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
request = self.request_factory.get("/", {"is_best_seller__exact": 0})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(filterspec.title, "is best seller")
choice = select_by(filterspec.choices(changelist), "display", "No")
self.assertIs(choice["selected"], True)
self.assertEqual(choice["query_string"], "?is_best_seller__exact=0")
request = self.request_factory.get("/", {"is_best_seller__exact": 1})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.guitar_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(filterspec.title, "is best seller")
choice = select_by(filterspec.choices(changelist), "display", "Yes")
self.assertIs(choice["selected"], True)
self.assertEqual(choice["query_string"], "?is_best_seller__exact=1")
request = self.request_factory.get("/", {"is_best_seller__isnull": "True"})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(filterspec.title, "is best seller")
choice = select_by(filterspec.choices(changelist), "display", "Unknown")
self.assertIs(choice["selected"], True)
self.assertEqual(choice["query_string"], "?is_best_seller__isnull=True")
def test_booleanfieldlistfilter_choices(self):
modeladmin = BookAdmin(Book, site)
self.verify_booleanfieldlistfilter_choices(modeladmin)
def test_booleanfieldlistfilter_tuple_choices(self):
modeladmin = BookAdminWithTupleBooleanFilter(Book, site)
self.verify_booleanfieldlistfilter_choices(modeladmin)
def verify_booleanfieldlistfilter_choices(self, modeladmin):
# False.
request = self.request_factory.get("/", {"availability__exact": 0})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
filterspec = changelist.get_filters(request)[0][6]
self.assertEqual(filterspec.title, "availability")
choice = select_by(filterspec.choices(changelist), "display", "Paid")
self.assertIs(choice["selected"], True)
self.assertEqual(choice["query_string"], "?availability__exact=0")
# True.
request = self.request_factory.get("/", {"availability__exact": 1})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
filterspec = changelist.get_filters(request)[0][6]
self.assertEqual(filterspec.title, "availability")
choice = select_by(filterspec.choices(changelist), "display", "Free")
self.assertIs(choice["selected"], True)
self.assertEqual(choice["query_string"], "?availability__exact=1")
# None.
request = self.request_factory.get("/", {"availability__isnull": "True"})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.guitar_book])
filterspec = changelist.get_filters(request)[0][6]
self.assertEqual(filterspec.title, "availability")
choice = select_by(filterspec.choices(changelist), "display", "Obscure")
self.assertIs(choice["selected"], True)
self.assertEqual(choice["query_string"], "?availability__isnull=True")
# All.
request = self.request_factory.get("/")
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
queryset = changelist.get_queryset(request)
self.assertEqual(
list(queryset),
[self.guitar_book, self.django_book, self.bio_book, self.djangonaut_book],
)
filterspec = changelist.get_filters(request)[0][6]
self.assertEqual(filterspec.title, "availability")
choice = select_by(filterspec.choices(changelist), "display", "All")
self.assertIs(choice["selected"], True)
self.assertEqual(choice["query_string"], "?")
def test_fieldlistfilter_underscorelookup_tuple(self):
"""
Ensure ('fieldpath', ClassName ) lookups pass lookup_allowed checks
when fieldpath contains double underscore in value (#19182).
"""
modeladmin = BookAdminWithUnderscoreLookupAndTuple(Book, site)
request = self.request_factory.get("/")
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
request = self.request_factory.get("/", {"author__email": "alfred@example.com"})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book, self.djangonaut_book])
def test_fieldlistfilter_invalid_lookup_parameters(self):
"""Filtering by an invalid value."""
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get(
"/", {"author__id__exact": "StringNotInteger!"}
)
request.user = self.alfred
with self.assertRaises(IncorrectLookupParameters):
modeladmin.get_changelist_instance(request)
def test_fieldlistfilter_multiple_invalid_lookup_parameters(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get(
"/", {"author__id__exact": f"{self.alfred.pk},{self.bob.pk}"}
)
request.user = self.alfred
with self.assertRaises(IncorrectLookupParameters):
modeladmin.get_changelist_instance(request)
def test_simplelistfilter(self):
modeladmin = DecadeFilterBookAdmin(Book, site)
# Make sure that the first option is 'All' ---------------------------
request = self.request_factory.get("/", {})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), list(Book.objects.order_by("-id")))
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, "publication decade")
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]["display"], "All")
self.assertIs(choices[0]["selected"], True)
self.assertEqual(choices[0]["query_string"], "?")
# Look for books in the 1980s ----------------------------------------
request = self.request_factory.get("/", {"publication-decade": "the 80s"})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, "publication decade")
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]["display"], "the 1980's")
self.assertIs(choices[1]["selected"], True)
self.assertEqual(choices[1]["query_string"], "?publication-decade=the+80s")
# Look for books in the 1990s ----------------------------------------
request = self.request_factory.get("/", {"publication-decade": "the 90s"})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, "publication decade")
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]["display"], "the 1990's")
self.assertIs(choices[2]["selected"], True)
self.assertEqual(choices[2]["query_string"], "?publication-decade=the+90s")
# Look for books in the 2000s ----------------------------------------
request = self.request_factory.get("/", {"publication-decade": "the 00s"})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.guitar_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, "publication decade")
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]["display"], "the 2000's")
self.assertIs(choices[3]["selected"], True)
self.assertEqual(choices[3]["query_string"], "?publication-decade=the+00s")
# Combine multiple filters -------------------------------------------
request = self.request_factory.get(
"/", {"publication-decade": "the 00s", "author__id__exact": self.alfred.pk}
)
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.djangonaut_book])
# Make sure the correct choices are selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, "publication decade")
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]["display"], "the 2000's")
self.assertIs(choices[3]["selected"], True)
self.assertEqual(
choices[3]["query_string"],
"?author__id__exact=%s&publication-decade=the+00s" % self.alfred.pk,
)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, "Verbose Author")
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertIs(choice["selected"], True)
self.assertEqual(
choice["query_string"],
"?author__id__exact=%s&publication-decade=the+00s" % self.alfred.pk,
)
def test_listfilter_without_title(self):
"""
Any filter must define a title.
"""
modeladmin = DecadeFilterBookAdminWithoutTitle(Book, site)
request = self.request_factory.get("/", {})
request.user = self.alfred
msg = (
"The list filter 'DecadeListFilterWithoutTitle' does not specify a 'title'."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
modeladmin.get_changelist_instance(request)
def test_simplelistfilter_without_parameter(self):
"""
Any SimpleListFilter must define a parameter_name.
"""
modeladmin = DecadeFilterBookAdminWithoutParameter(Book, site)
request = self.request_factory.get("/", {})
request.user = self.alfred
msg = (
"The list filter 'DecadeListFilterWithoutParameter' does not specify a "
"'parameter_name'."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
modeladmin.get_changelist_instance(request)
def test_simplelistfilter_with_none_returning_lookups(self):
"""
A SimpleListFilter lookups method can return None but disables the
filter completely.
"""
modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site)
request = self.request_factory.get("/", {})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0]
self.assertEqual(len(filterspec), 0)
def test_filter_with_failing_queryset(self):
"""
When a filter's queryset method fails, it fails loudly and
the corresponding exception doesn't get swallowed (#17828).
"""
modeladmin = DecadeFilterBookAdminWithFailingQueryset(Book, site)
request = self.request_factory.get("/", {})
request.user = self.alfred
with self.assertRaises(ZeroDivisionError):
modeladmin.get_changelist_instance(request)
def test_simplelistfilter_with_queryset_based_lookups(self):
modeladmin = DecadeFilterBookAdminWithQuerysetBasedLookups(Book, site)
request = self.request_factory.get("/", {})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, "publication decade")
choices = list(filterspec.choices(changelist))
self.assertEqual(len(choices), 3)
self.assertEqual(choices[0]["display"], "All")
self.assertIs(choices[0]["selected"], True)
self.assertEqual(choices[0]["query_string"], "?")
self.assertEqual(choices[1]["display"], "the 1990's")
self.assertIs(choices[1]["selected"], False)
self.assertEqual(choices[1]["query_string"], "?publication-decade=the+90s")
self.assertEqual(choices[2]["display"], "the 2000's")
self.assertIs(choices[2]["selected"], False)
self.assertEqual(choices[2]["query_string"], "?publication-decade=the+00s")
def _test_facets(self, modeladmin, request, query_string=None):
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
queryset = changelist.get_queryset(request)
self.assertSequenceEqual(queryset, list(Book.objects.order_by("-id")))
filters = changelist.get_filters(request)[0]
# Filters for DateFieldListFilter.
expected_date_filters = ["Any date (4)", "Today (2)", "Past 7 days (3)"]
if (
self.today.month == self.one_week_ago.month
and self.today.year == self.one_week_ago.year
):
expected_date_filters.extend(["This month (3)", "This year (3)"])
elif self.today.year == self.one_week_ago.year:
expected_date_filters.extend(["This month (2)", "This year (3)"])
else:
expected_date_filters.extend(["This month (2)", "This year (2)"])
expected_date_filters.extend(["No date (1)", "Has date (3)"])
empty_choice_count = (
2 if connection.features.interprets_empty_strings_as_nulls else 1
)
tests = [
# RelatedFieldListFilter.
["All", "alfred (2)", "bob (1)", "lisa (0)", "??? (1)"],
# SimpleListFilter.
[
"All",
"the 1980's (0)",
"the 1990's (1)",
"the 2000's (2)",
"other decades (-)",
],
# BooleanFieldListFilter.
["All", "Yes (2)", "No (1)", "Unknown (1)"],
# ChoicesFieldListFilter.
[
"All",
"Non-Fictional (1)",
"Fictional (1)",
f"We don't know ({empty_choice_count})",
f"Not categorized ({empty_choice_count})",
],
# DateFieldListFilter.
expected_date_filters,
# AllValuesFieldListFilter.
[
"All",
"alfred@example.com (2)",
"bob@example.com (1)",
"lisa@example.com (0)",
],
# RelatedOnlyFieldListFilter.
["All", "bob (1)", "lisa (1)", "??? (3)"],
# EmptyFieldListFilter.
["All", "Empty (2)", "Not empty (2)"],
# SimpleListFilter with join relations.
["All", "Owned by Dev Department (2)", "Other (2)"],
]
for filterspec, expected_displays in zip(filters, tests, strict=True):
with self.subTest(filterspec.__class__.__name__):
choices = list(filterspec.choices(changelist))
self.assertChoicesDisplay(choices, expected_displays)
if query_string:
for choice in choices:
self.assertIn(query_string, choice["query_string"])
def test_facets_always(self):
modeladmin = DecadeFilterBookAdminWithAlwaysFacets(Book, site)
request = self.request_factory.get("/")
self._test_facets(modeladmin, request)
def test_facets_no_filter(self):
modeladmin = DecadeFilterBookAdmin(Book, site)
request = self.request_factory.get("/?_facets")
self._test_facets(modeladmin, request, query_string="_facets")
def test_facets_filter(self):
modeladmin = DecadeFilterBookAdmin(Book, site)
request = self.request_factory.get(
"/", {"author__id__exact": self.alfred.pk, "_facets": ""}
)
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
queryset = changelist.get_queryset(request)
self.assertSequenceEqual(
queryset,
list(Book.objects.filter(author=self.alfred).order_by("-id")),
)
filters = changelist.get_filters(request)[0]
tests = [
# RelatedFieldListFilter.
["All", "alfred (2)", "bob (1)", "lisa (0)", "??? (1)"],
# SimpleListFilter.
[
"All",
"the 1980's (0)",
"the 1990's (1)",
"the 2000's (1)",
"other decades (-)",
],
# BooleanFieldListFilter.
["All", "Yes (1)", "No (1)", "Unknown (0)"],
# ChoicesFieldListFilter.
[
"All",
"Non-Fictional (1)",
"Fictional (1)",
"We don't know (0)",
"Not categorized (0)",
],
# DateFieldListFilter.
[
"Any date (2)",
"Today (1)",
"Past 7 days (1)",
"This month (1)",
"This year (1)",
"No date (1)",
"Has date (1)",
],
# AllValuesFieldListFilter.
[
"All",
"alfred@example.com (2)",
"bob@example.com (0)",
"lisa@example.com (0)",
],
# RelatedOnlyFieldListFilter.
["All", "bob (0)", "lisa (0)", "??? (2)"],
# EmptyFieldListFilter.
["All", "Empty (0)", "Not empty (2)"],
# SimpleListFilter with join relations.
["All", "Owned by Dev Department (2)", "Other (0)"],
]
for filterspec, expected_displays in zip(filters, tests, strict=True):
with self.subTest(filterspec.__class__.__name__):
choices = list(filterspec.choices(changelist))
self.assertChoicesDisplay(choices, expected_displays)
for choice in choices:
self.assertIn("_facets", choice["query_string"])
def test_facets_disallowed(self):
modeladmin = DecadeFilterBookAdminDisallowFacets(Book, site)
# Facets are not visible even when in the url query.
request = self.request_factory.get("/?_facets")
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
queryset = changelist.get_queryset(request)
self.assertSequenceEqual(queryset, list(Book.objects.order_by("-id")))
filters = changelist.get_filters(request)[0]
tests = [
# RelatedFieldListFilter.
["All", "alfred", "bob", "lisa", "???"],
# SimpleListFilter.
["All", "the 1980's", "the 1990's", "the 2000's", "other decades"],
# BooleanFieldListFilter.
["All", "Yes", "No", "Unknown"],
# ChoicesFieldListFilter.
["All", "Non-Fictional", "Fictional", "We don't know", "Not categorized"],
# DateFieldListFilter.
[
"Any date",
"Today",
"Past 7 days",
"This month",
"This year",
"No date",
"Has date",
],
# AllValuesFieldListFilter.
["All", "alfred@example.com", "bob@example.com", "lisa@example.com"],
# RelatedOnlyFieldListFilter.
["All", "bob", "lisa", "???"],
# EmptyFieldListFilter.
["All", "Empty", "Not empty"],
# SimpleListFilter with join relations.
["All", "Owned by Dev Department", "Other"],
]
for filterspec, expected_displays in zip(filters, tests, strict=True):
with self.subTest(filterspec.__class__.__name__):
self.assertChoicesDisplay(
filterspec.choices(changelist),
expected_displays,
)
def test_multi_related_field_filter(self):
modeladmin = DecadeFilterBookAdmin(Book, site)
request = self.request_factory.get(
"/",
[("author__id__exact", self.alfred.pk), ("author__id__exact", self.bob.pk)],
)
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
queryset = changelist.get_queryset(request)
self.assertSequenceEqual(
queryset,
list(
Book.objects.filter(
author__pk__in=[self.alfred.pk, self.bob.pk]
).order_by("-id")
),
)
filterspec = changelist.get_filters(request)[0][0]
choices = list(filterspec.choices(changelist))
expected_choice_values = [
("All", False, "?"),
("alfred", True, f"?author__id__exact={self.alfred.pk}"),
("bob", True, f"?author__id__exact={self.bob.pk}"),
("lisa", False, f"?author__id__exact={self.lisa.pk}"),
]
for i, (display, selected, query_string) in enumerate(expected_choice_values):
self.assertEqual(choices[i]["display"], display)
self.assertIs(choices[i]["selected"], selected)
self.assertEqual(choices[i]["query_string"], query_string)
def test_multi_choice_field_filter(self):
modeladmin = DecadeFilterBookAdmin(Book, site)
request = self.request_factory.get(
"/",
[("category__exact", "non-fiction"), ("category__exact", "fiction")],
)
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
queryset = changelist.get_queryset(request)
self.assertSequenceEqual(
queryset,
list(
Book.objects.filter(category__in=["non-fiction", "fiction"]).order_by(
"-id"
)
),
)
filterspec = changelist.get_filters(request)[0][3]
choices = list(filterspec.choices(changelist))
expected_choice_values = [
("All", False, "?"),
("Non-Fictional", True, "?category__exact=non-fiction"),
("Fictional", True, "?category__exact=fiction"),
("We don't know", False, "?category__exact="),
("Not categorized", False, "?category__isnull=True"),
]
for i, (display, selected, query_string) in enumerate(expected_choice_values):
self.assertEqual(choices[i]["display"], display)
self.assertIs(choices[i]["selected"], selected)
self.assertEqual(choices[i]["query_string"], query_string)
def test_multi_all_values_field_filter(self):
modeladmin = DecadeFilterBookAdmin(Book, site)
request = self.request_factory.get(
"/",
[
("author__email", "bob@example.com"),
("author__email", "lisa@example.com"),
],
)
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
queryset = changelist.get_queryset(request)
self.assertSequenceEqual(
queryset,
list(
Book.objects.filter(
author__email__in=["bob@example.com", "lisa@example.com"]
).order_by("-id")
),
)
filterspec = changelist.get_filters(request)[0][5]
choices = list(filterspec.choices(changelist))
expected_choice_values = [
("All", False, "?"),
("alfred@example.com", False, "?author__email=alfred%40example.com"),
("bob@example.com", True, "?author__email=bob%40example.com"),
("lisa@example.com", True, "?author__email=lisa%40example.com"),
]
for i, (display, selected, query_string) in enumerate(expected_choice_values):
self.assertEqual(choices[i]["display"], display)
self.assertIs(choices[i]["selected"], selected)
self.assertEqual(choices[i]["query_string"], query_string)
def test_two_characters_long_field(self):
"""
list_filter works with two-characters long field names (#16080).
"""
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get("/", {"no": "207"})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
filterspec = changelist.get_filters(request)[0][5]
self.assertEqual(filterspec.title, "number")
choices = list(filterspec.choices(changelist))
self.assertIs(choices[2]["selected"], True)
self.assertEqual(choices[2]["query_string"], "?no=207")
def test_parameter_ends_with__in__or__isnull(self):
"""
A SimpleListFilter's parameter name is not mistaken for a model field
if it ends with '__isnull' or '__in' (#17091).
"""
# When it ends with '__in' -----------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__In(Book, site)
request = self.request_factory.get("/", {"decade__in": "the 90s"})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, "publication decade")
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]["display"], "the 1990's")
self.assertIs(choices[2]["selected"], True)
self.assertEqual(choices[2]["query_string"], "?decade__in=the+90s")
# When it ends with '__isnull' ---------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__Isnull(Book, site)
request = self.request_factory.get("/", {"decade__isnull": "the 90s"})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, "publication decade")
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]["display"], "the 1990's")
self.assertIs(choices[2]["selected"], True)
self.assertEqual(choices[2]["query_string"], "?decade__isnull=the+90s")
def test_lookup_with_non_string_value(self):
"""
Ensure choices are set the selected class when using non-string values
for lookups in SimpleListFilters (#19318).
"""
modeladmin = DepartmentFilterEmployeeAdmin(Employee, site)
request = self.request_factory.get("/", {"department": self.john.department.pk})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(filterspec.title, "department")
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]["display"], "DEV")
self.assertIs(choices[1]["selected"], True)
self.assertEqual(
choices[1]["query_string"], "?department=%s" % self.john.department.pk
)
def test_lookup_with_non_string_value_underscored(self):
"""
Ensure SimpleListFilter lookups pass lookup_allowed checks when
parameter_name attribute contains double-underscore value (#19182).
"""
modeladmin = DepartmentFilterUnderscoredEmployeeAdmin(Employee, site)
request = self.request_factory.get(
"/", {"department__whatever": self.john.department.pk}
)
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(filterspec.title, "department")
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]["display"], "DEV")
self.assertIs(choices[1]["selected"], True)
self.assertEqual(
choices[1]["query_string"],
"?department__whatever=%s" % self.john.department.pk,
)
def test_fk_with_to_field(self):
"""
A filter on a FK respects the FK's to_field attribute (#17972).
"""
modeladmin = EmployeeAdmin(Employee, site)
request = self.request_factory.get("/", {})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.jack, self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(filterspec.title, "department")
choices = [
(choice["display"], choice["selected"], choice["query_string"])
for choice in filterspec.choices(changelist)
]
self.assertCountEqual(
choices,
[
("All", True, "?"),
("Development", False, "?department__code__exact=DEV"),
("Design", False, "?department__code__exact=DSN"),
],
)
# Filter by Department=='Development' --------------------------------
request = self.request_factory.get("/", {"department__code__exact": "DEV"})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(filterspec.title, "department")
choices = [
(choice["display"], choice["selected"], choice["query_string"])
for choice in filterspec.choices(changelist)
]
self.assertCountEqual(
choices,
[
("All", False, "?"),
("Development", True, "?department__code__exact=DEV"),
("Design", False, "?department__code__exact=DSN"),
],
)
def test_lookup_with_dynamic_value(self):
"""
Ensure SimpleListFilter can access self.value() inside the lookup.
"""
modeladmin = DepartmentFilterDynamicValueBookAdmin(Book, site)
def _test_choices(request, expected_displays):
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, "publication decade")
choices = tuple(c["display"] for c in filterspec.choices(changelist))
self.assertEqual(choices, expected_displays)
_test_choices(
self.request_factory.get("/", {}), ("All", "the 1980's", "the 1990's")
)
_test_choices(
self.request_factory.get("/", {"publication-decade": "the 80s"}),
("All", "the 1990's"),
)
_test_choices(
self.request_factory.get("/", {"publication-decade": "the 90s"}),
("All", "the 1980's"),
)
def test_list_filter_queryset_filtered_by_default(self):
"""
A list filter that filters the queryset by default gives the correct
full_result_count.
"""
modeladmin = NotNinetiesListFilterAdmin(Book, site)
request = self.request_factory.get("/", {})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
changelist.get_results(request)
self.assertEqual(changelist.full_result_count, 4)
def test_emptylistfieldfilter(self):
empty_description = Department.objects.create(code="EMPT", description="")
none_description = Department.objects.create(code="NONE", description=None)
empty_title = Book.objects.create(title="", author=self.alfred)
department_admin = DepartmentAdminWithEmptyFieldListFilter(Department, site)
book_admin = BookAdminWithEmptyFieldListFilter(Book, site)
tests = [
# Allows nulls and empty strings.
(
department_admin,
{"description__isempty": "1"},
[empty_description, none_description],
),
(
department_admin,
{"description__isempty": "0"},
[self.dev, self.design],
),
# Allows nulls.
(book_admin, {"author__isempty": "1"}, [self.guitar_book]),
(
book_admin,
{"author__isempty": "0"},
[self.django_book, self.bio_book, self.djangonaut_book, empty_title],
),
# Allows empty strings.
(book_admin, {"title__isempty": "1"}, [empty_title]),
(
book_admin,
{"title__isempty": "0"},
[
self.django_book,
self.bio_book,
self.djangonaut_book,
self.guitar_book,
],
),
]
for modeladmin, query_string, expected_result in tests:
with self.subTest(
modeladmin=modeladmin.__class__.__name__,
query_string=query_string,
):
request = self.request_factory.get("/", query_string)
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
queryset = changelist.get_queryset(request)
self.assertCountEqual(queryset, expected_result)
def test_emptylistfieldfilter_reverse_relationships(self):
class UserAdminReverseRelationship(UserAdmin):
list_filter = (("books_contributed", EmptyFieldListFilter),)
ImprovedBook.objects.create(book=self.guitar_book)
no_employees = Department.objects.create(code="NONE", description=None)
book_admin = BookAdminWithEmptyFieldListFilter(Book, site)
department_admin = DepartmentAdminWithEmptyFieldListFilter(Department, site)
user_admin = UserAdminReverseRelationship(User, site)
tests = [
# Reverse one-to-one relationship.
(
book_admin,
{"improvedbook__isempty": "1"},
[self.django_book, self.bio_book, self.djangonaut_book],
),
(book_admin, {"improvedbook__isempty": "0"}, [self.guitar_book]),
# Reverse foreign key relationship.
(department_admin, {"employee__isempty": "1"}, [no_employees]),
(department_admin, {"employee__isempty": "0"}, [self.dev, self.design]),
# Reverse many-to-many relationship.
(user_admin, {"books_contributed__isempty": "1"}, [self.alfred]),
(user_admin, {"books_contributed__isempty": "0"}, [self.bob, self.lisa]),
]
for modeladmin, query_string, expected_result in tests:
with self.subTest(
modeladmin=modeladmin.__class__.__name__,
query_string=query_string,
):
request = self.request_factory.get("/", query_string)
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
queryset = changelist.get_queryset(request)
self.assertCountEqual(queryset, expected_result)
def test_emptylistfieldfilter_genericrelation(self):
class BookmarkGenericRelation(ModelAdmin):
list_filter = (("tags", EmptyFieldListFilter),)
modeladmin = BookmarkGenericRelation(Bookmark, site)
django_bookmark = Bookmark.objects.create(url="https://www.djangoproject.com/")
python_bookmark = Bookmark.objects.create(url="https://www.python.org/")
none_tags = Bookmark.objects.create(url="https://www.kernel.org/")
TaggedItem.objects.create(content_object=django_bookmark, tag="python")
TaggedItem.objects.create(content_object=python_bookmark, tag="python")
tests = [
({"tags__isempty": "1"}, [none_tags]),
({"tags__isempty": "0"}, [django_bookmark, python_bookmark]),
]
for query_string, expected_result in tests:
with self.subTest(query_string=query_string):
request = self.request_factory.get("/", query_string)
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
queryset = changelist.get_queryset(request)
self.assertCountEqual(queryset, expected_result)
def test_emptylistfieldfilter_choices(self):
modeladmin = BookAdminWithEmptyFieldListFilter(Book, site)
request = self.request_factory.get("/")
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, "Verbose Author")
choices = list(filterspec.choices(changelist))
self.assertEqual(len(choices), 3)
self.assertEqual(choices[0]["display"], "All")
self.assertIs(choices[0]["selected"], True)
self.assertEqual(choices[0]["query_string"], "?")
self.assertEqual(choices[1]["display"], "Empty")
self.assertIs(choices[1]["selected"], False)
self.assertEqual(choices[1]["query_string"], "?author__isempty=1")
self.assertEqual(choices[2]["display"], "Not empty")
self.assertIs(choices[2]["selected"], False)
self.assertEqual(choices[2]["query_string"], "?author__isempty=0")
def test_emptylistfieldfilter_non_empty_field(self):
class EmployeeAdminWithEmptyFieldListFilter(ModelAdmin):
list_filter = [("department", EmptyFieldListFilter)]
modeladmin = EmployeeAdminWithEmptyFieldListFilter(Employee, site)
request = self.request_factory.get("/")
request.user = self.alfred
msg = (
"The list filter 'EmptyFieldListFilter' cannot be used with field "
"'department' which doesn't allow empty strings and nulls."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
modeladmin.get_changelist_instance(request)
def test_emptylistfieldfilter_invalid_lookup_parameters(self):
modeladmin = BookAdminWithEmptyFieldListFilter(Book, site)
request = self.request_factory.get("/", {"author__isempty": 42})
request.user = self.alfred
with self.assertRaises(IncorrectLookupParameters):
modeladmin.get_changelist_instance(request)
def test_lookup_using_custom_divider(self):
"""
Filter __in lookups with a custom divider.
"""
jane = Employee.objects.create(name="Jane,Green", department=self.design)
modeladmin = EmployeeCustomDividerFilterAdmin(Employee, site)
employees = [jane, self.jack]
request = self.request_factory.get(
"/", {"name__in": "|".join(e.name for e in employees)}
)
# test for lookup with custom divider
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), employees)
# test for lookup with comma in the lookup string
request = self.request_factory.get("/", {"name": jane.name})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [jane])
| ListFiltersTests |
python | ray-project__ray | rllib/callbacks/tests/test_callbacks_old_api_stack.py | {
"start": 256,
"end": 748
} | class ____(DefaultCallbacks):
def __init__(self):
super().__init__()
self.counts = Counter()
def on_episode_start(self, *args, **kwargs):
self.counts.update({"start": 1})
def on_episode_step(self, *args, **kwargs):
self.counts.update({"step": 1})
def on_episode_end(self, *args, **kwargs):
self.counts.update({"end": 1})
def on_sample_end(self, *args, **kwargs):
self.counts.update({"sample": 1})
| EpisodeAndSampleCallbacks |
python | tensorflow__tensorflow | tensorflow/python/distribute/tpu_strategy_test.py | {
"start": 41146,
"end": 45576
} | class ____(test.TestCase):
def test_prefetch_to_device_default(self):
strategy = get_tpu_strategy()
dataset = dataset_ops.Dataset.range(
strategy.num_replicas_in_sync * 2,
output_type=dtypes.float32).batch(strategy.num_replicas_in_sync)
# Check default, should prefetch to TPU.
dataset_item = next(iter(strategy.experimental_distribute_dataset(dataset)))
dataset_location = tf_device.DeviceSpec.from_string(
dataset_item.values[0].device)
self.assertEqual(dataset_location.device_type, "TPU")
def test_prefetch_to_device_tpu(self):
strategy = get_tpu_strategy()
dataset = dataset_ops.Dataset.range(
strategy.num_replicas_in_sync * 2,
output_type=dtypes.float32).batch(strategy.num_replicas_in_sync)
input_options = distribute_lib.InputOptions(
experimental_fetch_to_device=True)
dataset_item = next(iter(strategy.experimental_distribute_dataset(
dataset, options=input_options)))
dataset_location = tf_device.DeviceSpec.from_string(
dataset_item.values[0].device)
self.assertEqual(dataset_location.device_type, "TPU")
def test_prefetch_to_device_cpu(self):
strategy = get_tpu_strategy()
dataset = dataset_ops.Dataset.range(
strategy.num_replicas_in_sync * 2,
output_type=dtypes.float32).batch(strategy.num_replicas_in_sync)
# Should be CPU when prefetch_to_device is False.
input_options = distribute_lib.InputOptions(
experimental_fetch_to_device=False)
dataset_item = next(iter(strategy.experimental_distribute_dataset(
dataset, options=input_options)))
dataset_location = tf_device.DeviceSpec.from_string(
dataset_item.values[0].device)
self.assertEqual(dataset_location.device_type, "CPU")
def test_prefetch_to_device_sparse_dataset(self):
strategy = get_tpu_strategy()
# Values here aren't important.
dataset = dataset_ops.Dataset.from_tensors(
sparse_tensor.SparseTensor(indices=[[0, 0], [0, 1], [1, 0]],
values=[1, 2, 3],
dense_shape=[2, 2]))
dataset = dataset.repeat()
dataset = dataset.batch(strategy.num_replicas_in_sync)
with self.assertRaisesRegex(ValueError, "TPUStrategy does not support"):
iter(strategy.experimental_distribute_dataset(dataset))
def test_prefetch_to_device_ragged_dataset(self):
strategy = get_tpu_strategy()
# Values here aren't important.
dataset = dataset_ops.Dataset.from_tensors(
ragged_tensor.RaggedTensor.from_row_splits(
values=[1, 2, 3],
row_splits=[0, 2, 3]))
dataset = dataset.repeat()
dataset = dataset.batch(strategy.num_replicas_in_sync)
with self.assertRaisesRegex(ValueError, "TPUStrategy does not support"):
iter(strategy.experimental_distribute_dataset(dataset))
def test_prefetch_to_device_sparse_dataset_fn(self):
strategy = get_tpu_strategy()
def dataset_fn(ctx):
del ctx
# Values here aren't important.
dataset = dataset_ops.Dataset.from_tensors(
sparse_tensor.SparseTensor(indices=[[0, 0], [0, 1], [1, 0]],
values=[1, 2, 3],
dense_shape=[2, 2]))
dataset = dataset.repeat()
return dataset.batch(strategy.num_replicas_in_sync)
with self.assertRaisesRegex(ValueError, "TPUStrategy does not support"):
iter(strategy.distribute_datasets_from_function(dataset_fn))
def test_prefetch_to_device_ragged_dataset_fn(self):
strategy = get_tpu_strategy()
def dataset_fn(ctx):
del ctx
# Values here aren't important.
dataset = dataset_ops.Dataset.from_tensors(
ragged_tensor.RaggedTensor.from_row_splits(
values=[1, 2, 3],
row_splits=[0, 2, 3]))
dataset = dataset.repeat()
return dataset.batch(strategy.num_replicas_in_sync)
with self.assertRaisesRegex(ValueError, "TPUStrategy does not support"):
iter(strategy.distribute_datasets_from_function(dataset_fn))
def test_create_iterator_on_device(self):
@def_function.function
def create_iter():
with ops.device("/device:TPU:0"):
return gen_dataset_ops.anonymous_iterator_v3(
output_types=[dtypes.float32], output_shapes=[[]])
create_iter()
@test_util.with_eager_op_as_function
| TPUStrategyDataPrefetchTest |
python | getsentry__sentry | tests/sentry/tasks/test_post_process.py | {
"start": 30533,
"end": 32910
} | class ____(BasePostProgressGroupMixin):
@patch("sentry.rules.processing.processor.RuleProcessor")
def test_group_inbox_regression(self, mock_processor: MagicMock) -> None:
new_event = self.create_event(data={"message": "testing"}, project_id=self.project.id)
group = new_event.group
assert group.status == GroupStatus.UNRESOLVED
assert group.substatus == GroupSubStatus.NEW
self.call_post_process_group(
is_new=True,
is_regression=True,
is_new_group_environment=False,
event=new_event,
)
assert GroupInbox.objects.filter(group=group, reason=GroupInboxReason.NEW.value).exists()
GroupInbox.objects.filter(
group=group
).delete() # Delete so it creates the .REGRESSION entry.
group.refresh_from_db()
assert group.status == GroupStatus.UNRESOLVED
assert group.substatus == GroupSubStatus.NEW
mock_processor.assert_called_with(EventMatcher(new_event), True, True, False, False, False)
# resolve the new issue so regression actually happens
group.status = GroupStatus.RESOLVED
group.substatus = None
group.active_at = group.active_at - timedelta(minutes=1)
group.save(update_fields=["status", "substatus", "active_at"])
# trigger a transition from resolved to regressed by firing an event that groups to that issue
regressed_event = self.create_event(data={"message": "testing"}, project_id=self.project.id)
assert regressed_event.group == new_event.group
group = regressed_event.group
group.refresh_from_db()
assert group.status == GroupStatus.UNRESOLVED
assert group.substatus == GroupSubStatus.REGRESSED
self.call_post_process_group(
is_new=False,
is_regression=True,
is_new_group_environment=False,
event=regressed_event,
)
mock_processor.assert_called_with(
EventMatcher(regressed_event), False, True, False, False, False
)
group.refresh_from_db()
assert group.status == GroupStatus.UNRESOLVED
assert group.substatus == GroupSubStatus.REGRESSED
assert GroupInbox.objects.filter(
group=group, reason=GroupInboxReason.REGRESSION.value
).exists()
| InboxTestMixin |
python | great-expectations__great_expectations | great_expectations/expectations/core/expect_column_distinct_values_to_equal_set.py | {
"start": 2457,
"end": 19052
} | class ____(ColumnAggregateExpectation):
__doc__ = f"""{EXPECTATION_SHORT_DESCRIPTION}
ExpectColumnDistinctValuesToEqualSet is a \
Column Aggregate Expectation.
Column Aggregate Expectations are one of the most common types of Expectation.
They are evaluated for a single column, and produce an aggregate Metric, such as a mean, standard deviation, number of unique values, column type, etc.
If that Metric meets the conditions you set, the Expectation considers that data valid.
Args:
column (str): \
{COLUMN_DESCRIPTION}
value_set (set-like): \
{VALUE_SET_DESCRIPTION}
Other Parameters:
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
severity (str or None): \
{FAILURE_SEVERITY_DESCRIPTION} \
For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta.
See Also:
[ExpectColumnDistinctValuesToBeInSet](https://greatexpectations.io/expectations/expect_column_distinct_values_to_be_in_set)
[ExpectColumnDistinctValuesToContainSet](https://greatexpectations.io/expectations/expect_column_distinct_values_to_contain_set)
Supported Data Sources:
[{SUPPORTED_DATA_SOURCES[0]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[1]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[2]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[3]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[4]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[5]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[6]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[7]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[8]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[9]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[10]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[11]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[12]}](https://docs.greatexpectations.io/docs/application_integration_support/)
Data Quality Issues:
{DATA_QUALITY_ISSUES[0]}
Example Data:
test test2
0 1 1
1 2 1
2 4 1
Code Examples:
Passing Case:
Input:
ExpectColumnDistinctValuesToEqualSet(
column="test",
value_set=[1, 2, 4]
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"observed_value": [
1,
2,
4
],
"details": {{
"value_counts": [
{{
"value": 1,
"count": 1
}},
{{
"value": 2,
"count": 1
}},
{{
"value": 4,
"count": 1
}}
]
}}
}},
"meta": {{}},
"success": true
}}
Failing Case:
Input:
ExpectColumnDistinctValuesToEqualSet(
column="test2",
value_set=[3, 2, 4]
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"observed_value": [
1
],
"details": {{
"value_counts": [
{{
"value": 1,
"count": 3
}}
]
}}
}},
"meta": {{}},
"success": false
}}
""" # noqa: E501 # FIXME CoP
value_set: ValueSetField
# This dictionary contains metadata for display in the public gallery
library_metadata: ClassVar[Dict[str, Union[str, list, bool]]] = {
"maturity": "production",
"tags": ["core expectation", "column aggregate expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
_library_metadata = library_metadata
# Setting necessary computation metric dependencies and defining kwargs, as well as assigning kwargs default values\ # noqa: E501 # FIXME CoP
metric_dependencies = ("column.value_counts",)
success_keys = ("value_set",)
args_keys = (
"column",
"value_set",
)
class Config:
title = "Expect column distinct values to equal set"
@staticmethod
def schema_extra(
schema: Dict[str, Any], model: Type[ExpectColumnDistinctValuesToEqualSet]
) -> None:
ColumnAggregateExpectation.Config.schema_extra(schema, model)
schema["properties"]["metadata"]["properties"].update(
{
"data_quality_issues": {
"title": "Data Quality Issues",
"type": "array",
"const": DATA_QUALITY_ISSUES,
},
"library_metadata": {
"title": "Library Metadata",
"type": "object",
"const": model._library_metadata,
},
"short_description": {
"title": "Short Description",
"type": "string",
"const": EXPECTATION_SHORT_DESCRIPTION,
},
"supported_data_sources": {
"title": "Supported Data Sources",
"type": "array",
"const": SUPPORTED_DATA_SOURCES,
},
}
)
@override
@classmethod
def _prescriptive_template(
cls,
renderer_configuration: RendererConfiguration,
) -> RendererConfiguration:
add_param_args: AddParamArgs = (
("column", RendererValueType.STRING),
("value_set", RendererValueType.ARRAY),
)
for name, param_type in add_param_args:
renderer_configuration.add_param(name=name, param_type=param_type)
params = renderer_configuration.params
template_str = ""
if params.value_set:
array_param_name = "value_set"
param_prefix = "v__"
renderer_configuration = cls._add_array_params(
array_param_name=array_param_name,
param_prefix=param_prefix,
renderer_configuration=renderer_configuration,
)
value_set_str: str = cls._get_array_string(
array_param_name=array_param_name,
param_prefix=param_prefix,
renderer_configuration=renderer_configuration,
)
template_str += f"distinct values must match this set: {value_set_str}."
if renderer_configuration.include_column_name:
template_str = f"$column {template_str}"
renderer_configuration.template_str = template_str
return renderer_configuration
@override
@classmethod
@renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)
@render_suite_parameter_string
def _prescriptive_renderer( # too complex
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
):
renderer_configuration: RendererConfiguration = RendererConfiguration(
configuration=configuration,
result=result,
runtime_configuration=runtime_configuration,
)
params = substitute_none_for_missing(
renderer_configuration.kwargs,
[
"column",
"value_set",
"row_condition",
"condition_parser",
],
)
if params["value_set"] is None or len(params["value_set"]) == 0:
values_string = "[ ]"
else:
for i, v in enumerate(params["value_set"]):
params[f"v__{i!s}"] = v
values_string = " ".join([f"$v__{i!s}" for i, v in enumerate(params["value_set"])])
template_str = f"distinct values must match this set: {values_string}."
if renderer_configuration.include_column_name:
template_str = f"$column {template_str}"
styling = runtime_configuration.get("styling", {}) if runtime_configuration else {}
if params["row_condition"] is not None:
conditional_template_str = parse_row_condition_string(params["row_condition"])
template_str, styling = _style_row_condition(
conditional_template_str,
template_str,
params,
styling,
)
return [
RenderedStringTemplateContent(
content_block_type="string_template",
string_template={
"template": template_str,
"params": params,
"styling": styling,
},
)
]
@override
def _validate(
self,
metrics: Dict,
runtime_configuration: Optional[dict] = None,
execution_engine: Optional[ExecutionEngine] = None,
):
observed_value_counts = metrics["column.value_counts"]
observed_value_set = set(observed_value_counts.index)
value_set = self._get_success_kwargs()["value_set"]
# Try to coerce string values to match the type of observed values
if observed_value_set and value_set:
first_observed = next(iter(observed_value_set))
expected_value_set = {
parse_value_to_observed_type(first_observed, value) for value in value_set
}
else:
expected_value_set = set(value_set)
return {
"success": observed_value_set == expected_value_set,
"result": {
"observed_value": sorted(list(observed_value_set)),
"details": {"value_counts": observed_value_counts},
},
}
@classmethod
@renderer(renderer_type=AtomicDiagnosticRendererType.OBSERVED_VALUE)
@override
def _atomic_diagnostic_observed_value(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
) -> RenderedAtomicContent:
renderer_configuration: RendererConfiguration = RendererConfiguration(
configuration=configuration,
result=result,
runtime_configuration=runtime_configuration,
)
expected_param_prefix = "exp__"
expected_param_name = "expected_value"
ov_param_prefix = "ov__"
ov_param_name = "observed_value"
renderer_configuration.add_param(
name=expected_param_name,
param_type=RendererValueType.ARRAY,
value=renderer_configuration.kwargs.get("value_set", []),
)
renderer_configuration = cls._add_array_params(
array_param_name=expected_param_name,
param_prefix=expected_param_prefix,
renderer_configuration=renderer_configuration,
)
renderer_configuration.add_param(
name=ov_param_name,
param_type=RendererValueType.ARRAY,
value=result.get("result", {}).get("observed_value", []) if result else [],
)
renderer_configuration = cls._add_array_params(
array_param_name=ov_param_name,
param_prefix=ov_param_prefix,
renderer_configuration=renderer_configuration,
)
observed_value_set = set(
result.get("result", {}).get("observed_value", []) if result else []
)
sample_observed_value = next(iter(observed_value_set)) if observed_value_set else None
expected_value_set = {
parse_value_to_observed_type(observed_value=sample_observed_value, value=value)
for value in renderer_configuration.kwargs.get("value_set", [])
}
observed_values = (
(name, schema)
for name, schema in renderer_configuration.params
if name.startswith(ov_param_prefix)
)
expected_values = (
(name, schema)
for name, schema in renderer_configuration.params
if name.startswith(expected_param_prefix)
)
template_str_list = []
for name, schema in observed_values:
render_state = (
ObservedValueRenderState.EXPECTED.value
if schema.value in expected_value_set
else ObservedValueRenderState.UNEXPECTED.value
)
renderer_configuration.params.__dict__[name].render_state = render_state
template_str_list.append(f"${name}")
for name, schema in expected_values:
coerced_value = parse_value_to_observed_type(
observed_value=sample_observed_value,
value=schema.value,
)
if coerced_value not in observed_value_set:
renderer_configuration.params.__dict__[
name
].render_state = ObservedValueRenderState.MISSING.value
template_str_list.append(f"${name}")
renderer_configuration.template_str = " ".join(template_str_list)
value_obj = renderedAtomicValueSchema.load(
{
"template": renderer_configuration.template_str,
"params": renderer_configuration.params.dict(),
"meta_notes": renderer_configuration.meta_notes,
"schema": {"type": "com.superconductive.rendered.string"},
}
)
return RenderedAtomicContent(
name=AtomicDiagnosticRendererType.OBSERVED_VALUE,
value=value_obj,
value_type="StringValueType",
)
| ExpectColumnDistinctValuesToEqualSet |
python | huggingface__transformers | src/transformers/models/qwen2_moe/modeling_qwen2_moe.py | {
"start": 6363,
"end": 10461
} | class ____(nn.Module):
def __init__(self, config, intermediate_size=None):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
@use_kernel_func_from_hub("rotary_pos_emb")
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| Qwen2MoeMLP |
python | walkccc__LeetCode | solutions/940. Distinct Subsequences II/940.py | {
"start": 0,
"end": 279
} | class ____:
def distinctSubseqII(self, s: str) -> int:
MOD = 1_000_000_007
# endsIn[i] := the number of subsequence that end in ('a' + i)
endsIn = [0] * 26
for c in s:
endsIn[ord(c) - ord('a')] = (sum(endsIn) + 1) % MOD
return sum(endsIn) % MOD
| Solution |
python | getsentry__sentry | src/sentry/apidocs/parameters.py | {
"start": 26650,
"end": 27925
} | class ____:
PROVIDER_KEY = OpenApiParameter(
name="providerKey",
location="query",
required=False,
type=str,
description="""Specific integration provider to filter by such as `slack`. See our [Integrations Documentation](/product/integrations/) for an updated list of providers.""",
)
FEATURES = OpenApiParameter(
name="features",
location="query",
required=False,
type=str,
many=True,
description="""Integration features to filter by. See our [Integrations Documentation](/product/integrations/) for an updated list of features. Current available ones are:
- `alert-rule`
- `chat-unfurl`
- `codeowners`
- `commits`
- `data-forwarding`
- `deployment`
- `enterprise-alert-rule`
- `enterprise-incident-management`
- `incident-management`
- `issue-basic`
- `issue-sync`
- `mobile`
- `serverless`
- `session-replay`
- `stacktrace-link`
- `ticket-rules`
""",
)
INCLUDE_CONFIG = OpenApiParameter(
name="includeConfig",
location="query",
required=False,
type=bool,
description="""Specify `True` to fetch third-party integration configurations. Note that this can add several seconds to the response time.""",
)
| IntegrationParams |
python | bokeh__bokeh | src/bokeh/models/misc/group_by.py | {
"start": 2092,
"end": 2881
} | class ____(GroupBy):
""" Group models by their names (``Model.name`` property). """
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
# TODO GroupByCustomJS(GroupBy)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| GroupByName |
python | huggingface__transformers | tests/models/got_ocr2/test_image_processing_got_ocr2.py | {
"start": 1122,
"end": 3030
} | class ____(unittest.TestCase):
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_normalize=True,
image_mean=[0.48145466, 0.4578275, 0.40821073],
image_std=[0.26862954, 0.26130258, 0.27577711],
do_convert_rgb=True,
):
super().__init__()
size = size if size is not None else {"height": 20, "width": 20}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.size["height"], self.size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
| GotOcr2ImageProcessingTester |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/quantization_ops/quantization_ops_test.py | {
"start": 3334,
"end": 4387
} | class ____(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def test_invalid_inputs(self):
gradients = constant_op.constant(
value=[[1.0], [2.0], [4.0]], dtype=dtypes.float32)
inputs = constant_op.constant(
value=[[1.0], [2.0], [4.0]], dtype=dtypes.float32)
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"must be equal rank|must be rank 0"):
self.evaluate(
array_ops.fake_quant_with_min_max_vars_gradient(
gradients=gradients,
inputs=inputs,
min=0.0,
max=[[1.0], [2.0], [4.0]]))
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"must be rank 0"):
self.evaluate(
array_ops.fake_quant_with_min_max_vars_gradient(
gradients=gradients,
inputs=inputs,
min=[[1.0], [2.0], [4.0]],
max=[[1.0], [2.0], [4.0]]))
| FakeQuantWithMinMaxVarsGradientOpTest |
python | huggingface__transformers | src/transformers/models/xlnet/modeling_xlnet.py | {
"start": 16690,
"end": 19576
} | class ____(nn.Module):
"""
Compute SQuAD 2.0 answer class from classification and start tokens hidden states.
Args:
config ([`XLNetConfig`]):
The config used by the model, will be used to grab the `hidden_size` of the model.
"""
def __init__(self, config: XLNetConfig):
super().__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)
def forward(
self,
hidden_states: torch.FloatTensor,
start_states: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
cls_index: Optional[torch.LongTensor] = None,
) -> torch.FloatTensor:
"""
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`):
The final hidden states of the model.
start_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*):
The hidden states of the first tokens for the labeled span.
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
The position of the first token for the labeled span.
cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Position of the CLS token for each sentence in the batch. If `None`, takes the last token.
<Tip>
One of `start_states` or `start_positions` should be not `None`. If both are set, `start_positions` overrides
`start_states`.
</Tip>
Returns:
`torch.FloatTensor`: The SQuAD 2.0 answer class.
"""
# No dependency on end_feature so that we can obtain one single `cls_logits` for each sample.
hsz = hidden_states.shape[-1]
assert start_states is not None or start_positions is not None, (
"One of start_states, start_positions should be not None"
)
if start_positions is not None:
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz)
if cls_index is not None:
cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz)
else:
cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz)
x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))
x = self.activation(x)
x = self.dense_1(x).squeeze(-1)
return x
# Copied from transformers.models.xlm.modeling_xlm.XLMSequenceSummary with XLM->XLNet
| XLNetPoolerAnswerClass |
python | pytorch__pytorch | test/distributed/optim/test_zero_redundancy_optimizer.py | {
"start": 1644,
"end": 1823
} | class ____(DistributedTestBase):
@property
def device(self):
return device_type
@property
def world_size(self):
return 1
| TestZeroRedundancyOptimizer |
python | getsentry__sentry | src/sentry/sentry_apps/models/sentry_app.py | {
"start": 2200,
"end": 2882
} | class ____(ParanoidManager["SentryApp"]):
def get_alertable_sentry_apps(self, organization_id: int) -> QuerySet:
return self.filter(
installations__organization_id=organization_id,
is_alertable=True,
installations__status=SentryAppInstallationStatus.INSTALLED,
installations__date_deleted=None,
).distinct()
def visible_for_user(self, request: Request) -> QuerySet["SentryApp"]:
from sentry.auth.superuser import is_active_superuser
if is_active_superuser(request):
return self.all()
return self.filter(status=SentryAppStatus.PUBLISHED)
@control_silo_model
| SentryAppManager |
python | openai__openai-python | src/openai/types/responses/response_output_item.py | {
"start": 4553,
"end": 5624
} | class ____(BaseModel):
id: str
"""The unique ID of the approval request."""
arguments: str
"""A JSON string of arguments for the tool."""
name: str
"""The name of the tool to run."""
server_label: str
"""The label of the MCP server making the request."""
type: Literal["mcp_approval_request"]
"""The type of the item. Always `mcp_approval_request`."""
ResponseOutputItem: TypeAlias = Annotated[
Union[
ResponseOutputMessage,
ResponseFileSearchToolCall,
ResponseFunctionToolCall,
ResponseFunctionWebSearch,
ResponseComputerToolCall,
ResponseReasoningItem,
ImageGenerationCall,
ResponseCodeInterpreterToolCall,
LocalShellCall,
ResponseFunctionShellToolCall,
ResponseFunctionShellToolCallOutput,
ResponseApplyPatchToolCall,
ResponseApplyPatchToolCallOutput,
McpCall,
McpListTools,
McpApprovalRequest,
ResponseCustomToolCall,
],
PropertyInfo(discriminator="type"),
]
| McpApprovalRequest |
python | astropy__astropy | astropy/io/fits/tests/test_nonstandard.py | {
"start": 150,
"end": 2325
} | class ____(FitsTestCase):
def test_create_fitshdu(self):
"""
A round trip test of creating a FitsHDU, adding a FITS file to it,
writing the FitsHDU out as part of a new FITS file, and then reading
it and recovering the original FITS file.
"""
self._test_create_fitshdu(compression=False)
def test_create_fitshdu_with_compression(self):
"""Same as test_create_fitshdu but with gzip compression enabled."""
self._test_create_fitshdu(compression=True)
def test_create_fitshdu_from_filename(self):
"""Regression test on `FitsHDU.fromfile`"""
# Build up a simple test FITS file
a = np.arange(100)
phdu = fits.PrimaryHDU(data=a)
phdu.header["TEST1"] = "A"
phdu.header["TEST2"] = "B"
imghdu = fits.ImageHDU(data=a + 1)
phdu.header["TEST3"] = "C"
phdu.header["TEST4"] = "D"
hdul = fits.HDUList([phdu, imghdu])
hdul.writeto(self.temp("test.fits"))
fitshdu = fits.FitsHDU.fromfile(self.temp("test.fits"))
hdul2 = fitshdu.hdulist
assert len(hdul2) == 2
assert fits.FITSDiff(hdul, hdul2).identical
def _test_create_fitshdu(self, compression=False):
hdul_orig = fits.open(self.data("test0.fits"), do_not_scale_image_data=True)
fitshdu = fits.FitsHDU.fromhdulist(hdul_orig, compress=compression)
# Just to be meta, let's append to the same hdulist that the fitshdu
# encapuslates
hdul_orig.append(fitshdu)
hdul_orig.writeto(self.temp("tmp.fits"), overwrite=True)
del hdul_orig[-1]
hdul = fits.open(self.temp("tmp.fits"))
assert isinstance(hdul[-1], fits.FitsHDU)
wrapped = hdul[-1].hdulist
assert isinstance(wrapped, fits.HDUList)
assert hdul_orig.info(output=False) == wrapped.info(output=False)
assert (hdul[1].data == wrapped[1].data).all()
assert (hdul[2].data == wrapped[2].data).all()
assert (hdul[3].data == wrapped[3].data).all()
assert (hdul[4].data == wrapped[4].data).all()
hdul_orig.close()
hdul.close()
| TestNonstandardHdus |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/kernel_tests/assert_prev_test.py | {
"start": 1160,
"end": 3784
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testAssertPrev(self):
dataset = dataset_ops.Dataset.from_tensors(0).map(
lambda x: x, deterministic=True, num_parallel_calls=8).apply(
testing.assert_prev([("ParallelMapDataset",
{"deterministic", "true"})]))
options = options_lib.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=[0])
@combinations.generate(test_base.default_test_combinations())
def testIgnoreVersionSuffix(self):
# The `batch` transformation creates a "BatchV2" dataset, but we should
# still match that with "Batch".
dataset = dataset_ops.Dataset.from_tensors(0).map(
lambda x: x, deterministic=True, num_parallel_calls=8).batch(1).apply(
testing.assert_prev([("BatchDataset", {}),
("ParallelMapDataset", {
"deterministic": "true"
})]))
options = options_lib.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=[[0]])
@combinations.generate(test_base.default_test_combinations())
def testAssertPrevInvalid(self):
dataset = dataset_ops.Dataset.from_tensors(0).apply(
testing.assert_prev([("Whoops", {})]))
self.assertDatasetProduces(
dataset,
expected_error=(errors.InvalidArgumentError,
"Asserted transformation matching 'Whoops'"))
@combinations.generate(test_base.default_test_combinations())
def testAssertPrevShort(self):
dataset = dataset_ops.Dataset.from_tensors(0).apply(
testing.assert_prev([("TensorDataset", {}), ("Whoops", {})]))
self.assertDatasetProduces(
dataset,
expected_error=(
errors.InvalidArgumentError,
"Asserted previous 2 transformations but encountered only 1."))
@combinations.generate(test_base.default_test_combinations())
def testAssertBadAttributeName(self):
dataset = dataset_ops.Dataset.from_tensors(0).apply(
testing.assert_prev([("TensorDataset", {
"whoops": "true"
})]))
self.assertDatasetProduces(
dataset,
expected_error=(errors.InvalidArgumentError, "found no such attribute"))
if __name__ == "__main__":
test.main()
| AssertPrevTest |
python | facebook__pyre-check | tools/upgrade/commands/codemods.py | {
"start": 4920,
"end": 5966
} | class ____(Command):
def __init__(self, *, local_roots: Sequence[Path], repository: Repository) -> None:
super().__init__(repository)
self._local_roots = local_roots
@staticmethod
def from_arguments(
arguments: argparse.Namespace, repository: Repository
) -> "SetUseBuck1":
return SetUseBuck1(local_roots=arguments.local_roots, repository=repository)
@classmethod
def add_arguments(cls, parser: argparse.ArgumentParser) -> None:
super(SetUseBuck1, cls).add_arguments(parser)
parser.set_defaults(command=cls.from_arguments)
parser.add_argument(
"local_roots",
help="Paths to directory with local configuration",
type=path_exists,
nargs="*",
)
@override
def run(self) -> None:
for local_root in self._local_roots:
configuration = Configuration(local_root / ".pyre_configuration.local")
configuration.set_use_buck1_if_possible()
configuration.write()
| SetUseBuck1 |
python | coleifer__peewee | tests/db_tests.py | {
"start": 23878,
"end": 25418
} | class ____(BaseTestCase):
def test_sort_models(self):
class A(Model):
pass
class B(Model):
a = ForeignKeyField(A)
class C(Model):
b = ForeignKeyField(B)
class D(Model):
c = ForeignKeyField(C)
class E(Model):
pass
models = [A, B, C, D, E]
for list_of_models in permutations(models):
sorted_models = sort_models(list_of_models)
self.assertEqual(sorted_models, models)
def test_sort_models_multi_fk(self):
class Inventory(Model):
pass
class Sheet(Model):
inventory = ForeignKeyField(Inventory)
class Program(Model):
inventory = ForeignKeyField(Inventory)
class ProgramSheet(Model):
program = ForeignKeyField(Program)
sheet = ForeignKeyField(Sheet)
class ProgramPart(Model):
program_sheet = ForeignKeyField(ProgramSheet)
class Offal(Model):
program_sheet = ForeignKeyField(ProgramSheet)
sheet = ForeignKeyField(Sheet)
M = [Inventory, Sheet, Program, ProgramSheet, ProgramPart, Offal]
sorted_models = sort_models(M)
self.assertEqual(sorted_models, [
Inventory,
Program,
Sheet,
ProgramSheet,
Offal,
ProgramPart,
])
for list_of_models in permutations(M):
self.assertEqual(sort_models(list_of_models), sorted_models)
| TestSortModels |
python | wandb__wandb | wandb/vendor/pygments/lexers/templates.py | {
"start": 5771,
"end": 7646
} | class ____(RegexLexer):
"""
Generic `Smarty <http://smarty.php.net/>`_ template lexer.
Just highlights smarty code between the preprocessor directives, other
data is left untouched by the lexer.
"""
name = 'Smarty'
aliases = ['smarty']
filenames = ['*.tpl']
mimetypes = ['application/x-smarty']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
(r'[^{]+', Other),
(r'(\{)(\*.*?\*)(\})',
bygroups(Comment.Preproc, Comment, Comment.Preproc)),
(r'(\{php\})(.*?)(\{/php\})',
bygroups(Comment.Preproc, using(PhpLexer, startinline=True),
Comment.Preproc)),
(r'(\{)(/?[a-zA-Z_]\w*)(\s*)',
bygroups(Comment.Preproc, Name.Function, Text), 'smarty'),
(r'\{', Comment.Preproc, 'smarty')
],
'smarty': [
(r'\s+', Text),
(r'\{', Comment.Preproc, '#push'),
(r'\}', Comment.Preproc, '#pop'),
(r'#[a-zA-Z_]\w*#', Name.Variable),
(r'\$[a-zA-Z_]\w*(\.\w+)*', Name.Variable),
(r'[~!%^&*()+=|\[\]:;,.<>/?@-]', Operator),
(r'(true|false|null)\b', Keyword.Constant),
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[a-zA-Z_]\w*', Name.Attribute)
]
}
def analyse_text(text):
rv = 0.0
if re.search('\{if\s+.*?\}.*?\{/if\}', text):
rv += 0.15
if re.search('\{include\s+file=.*?\}', text):
rv += 0.15
if re.search('\{foreach\s+.*?\}.*?\{/foreach\}', text):
rv += 0.15
if re.search('\{\$.*?\}', text):
rv += 0.01
return rv
| SmartyLexer |
python | google__pytype | pytype/tests/test_annotations.py | {
"start": 37948,
"end": 39693
} | class ____(test_base.BaseTest):
"""Tests usage of '...' to mean "inferred type".
This is an experimental feature that makes it possible to explicitly annotate
a type as inferred. See b/213607272.
"""
def test_variable(self):
ty = self.Infer("x: ... = 0")
self.assertTypesMatchPytd(ty, "x: int")
def test_function(self):
ty = self.Infer("""
def f(x: ...) -> ...:
return x
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import TypeVar
_T0 = TypeVar('_T0')
def f(x: _T0) -> _T0: ...
""",
)
def test_class(self):
ty = self.Infer("""
class Foo:
x: ...
def f(self):
self.x = 5
""")
self.assertTypesMatchPytd(
ty,
"""
class Foo:
x: int
def f(self) -> None: ...
""",
)
def test_future(self):
ty = self.Infer("""
from __future__ import annotations
x: ...
x = 5
def f(x: ...): pass
class Foo:
x: ...
def f(self):
self.x = 5
""")
self.assertTypesMatchPytd(
ty,
"""
x: int
def f(x) -> None: ...
class Foo:
x: int
def f(self) -> None: ...
""",
)
def test_try_except_block(self):
# Regression test - the first except line puts a `STORE_NAME e` opcode in
# the next line, and the annotation on `a: int` therefore has two STORE ops
# in its line. This test confirms that the `int` annotation gets put on
# `STORE_NAME a` rather than `STORE_NAME e`
self.Check("""
try:
1
except Exception as e:
a: int = 10
try:
x = 1
except Exception as e:
pass
""")
| EllipsisTest |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/pygments/formatters/html.py | {
"start": 2365,
"end": 35669
} | class ____(Formatter):
r"""
Format tokens as HTML 4 ``<span>`` tags. By default, the content is enclosed
in a ``<pre>`` tag, itself wrapped in a ``<div>`` tag (but see the `nowrap` option).
The ``<div>``'s CSS class can be set by the `cssclass` option.
If the `linenos` option is set to ``"table"``, the ``<pre>`` is
additionally wrapped inside a ``<table>`` which has one row and two
cells: one containing the line numbers and one containing the code.
Example:
.. sourcecode:: html
<div class="highlight" >
<table><tr>
<td class="linenos" title="click to toggle"
onclick="with (this.firstChild.style)
{ display = (display == '') ? 'none' : '' }">
<pre>1
2</pre>
</td>
<td class="code">
<pre><span class="Ke">def </span><span class="NaFu">foo</span>(bar):
<span class="Ke">pass</span>
</pre>
</td>
</tr></table></div>
(whitespace added to improve clarity).
A list of lines can be specified using the `hl_lines` option to make these
lines highlighted (as of Pygments 0.11).
With the `full` option, a complete HTML 4 document is output, including
the style definitions inside a ``<style>`` tag, or in a separate file if
the `cssfile` option is given.
When `tagsfile` is set to the path of a ctags index file, it is used to
generate hyperlinks from names to their definition. You must enable
`lineanchors` and run ctags with the `-n` option for this to work. The
`python-ctags` module from PyPI must be installed to use this feature;
otherwise a `RuntimeError` will be raised.
The `get_style_defs(arg='')` method of a `HtmlFormatter` returns a string
containing CSS rules for the CSS classes used by the formatter. The
argument `arg` can be used to specify additional CSS selectors that
are prepended to the classes. A call `fmter.get_style_defs('td .code')`
would result in the following CSS classes:
.. sourcecode:: css
td .code .kw { font-weight: bold; color: #00FF00 }
td .code .cm { color: #999999 }
...
If you have Pygments 0.6 or higher, you can also pass a list or tuple to the
`get_style_defs()` method to request multiple prefixes for the tokens:
.. sourcecode:: python
formatter.get_style_defs(['div.syntax pre', 'pre.syntax'])
The output would then look like this:
.. sourcecode:: css
div.syntax pre .kw,
pre.syntax .kw { font-weight: bold; color: #00FF00 }
div.syntax pre .cm,
pre.syntax .cm { color: #999999 }
...
Additional options accepted:
`nowrap`
If set to ``True``, don't add a ``<pre>`` and a ``<div>`` tag
around the tokens. This disables most other options (default: ``False``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``). This option has no effect if the `cssfile`
and `noclobber_cssfile` option are given and the file specified in
`cssfile` exists.
`noclasses`
If set to true, token ``<span>`` tags (as well as line number elements)
will not use CSS classes, but inline styles. This is not recommended
for larger pieces of code since it increases output size by quite a bit
(default: ``False``).
`classprefix`
Since the token types use relatively short class names, they may clash
with some of your own class names. In this case you can use the
`classprefix` option to give a string to prepend to all Pygments-generated
CSS class names for token types.
Note that this option also affects the output of `get_style_defs()`.
`cssclass`
CSS class for the wrapping ``<div>`` tag (default: ``'highlight'``).
If you set this option, the default selector for `get_style_defs()`
will be this class.
.. versionadded:: 0.9
If you select the ``'table'`` line numbers, the wrapping table will
have a CSS class of this string plus ``'table'``, the default is
accordingly ``'highlighttable'``.
`cssstyles`
Inline CSS styles for the wrapping ``<div>`` tag (default: ``''``).
`prestyles`
Inline CSS styles for the ``<pre>`` tag (default: ``''``).
.. versionadded:: 0.11
`cssfile`
If the `full` option is true and this option is given, it must be the
name of an external file. If the filename does not include an absolute
path, the file's path will be assumed to be relative to the main output
file's path, if the latter can be found. The stylesheet is then written
to this file instead of the HTML file.
.. versionadded:: 0.6
`noclobber_cssfile`
If `cssfile` is given and the specified file exists, the css file will
not be overwritten. This allows the use of the `full` option in
combination with a user specified css file. Default is ``False``.
.. versionadded:: 1.1
`linenos`
If set to ``'table'``, output line numbers as a table with two cells,
one containing the line numbers, the other the whole code. This is
copy-and-paste-friendly, but may cause alignment problems with some
browsers or fonts. If set to ``'inline'``, the line numbers will be
integrated in the ``<pre>`` tag that contains the code (that setting
is *new in Pygments 0.8*).
For compatibility with Pygments 0.7 and earlier, every true value
except ``'inline'`` means the same as ``'table'`` (in particular, that
means also ``True``).
The default value is ``False``, which means no line numbers at all.
**Note:** with the default ("table") line number mechanism, the line
numbers and code can have different line heights in Internet Explorer
unless you give the enclosing ``<pre>`` tags an explicit ``line-height``
CSS property (you get the default line spacing with ``line-height:
125%``).
`hl_lines`
Specify a list of lines to be highlighted. The line numbers are always
relative to the input (i.e. the first line is line 1) and are
independent of `linenostart`.
.. versionadded:: 0.11
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`linenospecial`
If set to a number n > 0, every nth line number is given the CSS
class ``"special"`` (default: ``0``).
`nobackground`
If set to ``True``, the formatter won't output the background color
for the wrapping element (this automatically defaults to ``False``
when there is no wrapping element [eg: no argument for the
`get_syntax_defs` method given]) (default: ``False``).
.. versionadded:: 0.6
`lineseparator`
This string is output between lines of code. It defaults to ``"\n"``,
which is enough to break a line inside ``<pre>`` tags, but you can
e.g. set it to ``"<br>"`` to get HTML line breaks.
.. versionadded:: 0.7
`lineanchors`
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
output line in an anchor tag with an ``id`` (and `name`) of ``foo-linenumber``.
This allows easy linking to certain lines.
.. versionadded:: 0.9
`linespans`
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
output line in a span tag with an ``id`` of ``foo-linenumber``.
This allows easy access to lines via javascript.
.. versionadded:: 1.6
`anchorlinenos`
If set to `True`, will wrap line numbers in <a> tags. Used in
combination with `linenos` and `lineanchors`.
`tagsfile`
If set to the path of a ctags file, wrap names in anchor tags that
link to their definitions. `lineanchors` should be used, and the
tags file should specify line numbers (see the `-n` option to ctags).
The tags file is assumed to be encoded in UTF-8.
.. versionadded:: 1.6
`tagurlformat`
A string formatting pattern used to generate links to ctags definitions.
Available variables are `%(path)s`, `%(fname)s` and `%(fext)s`.
Defaults to an empty string, resulting in just `#prefix-number` links.
.. versionadded:: 1.6
`filename`
A string used to generate a filename when rendering ``<pre>`` blocks,
for example if displaying source code. If `linenos` is set to
``'table'`` then the filename will be rendered in an initial row
containing a single `<th>` which spans both columns.
.. versionadded:: 2.1
`wrapcode`
Wrap the code inside ``<pre>`` blocks using ``<code>``, as recommended
by the HTML5 specification.
.. versionadded:: 2.4
`debug_token_types`
Add ``title`` attributes to all token ``<span>`` tags that show the
name of the token.
.. versionadded:: 2.10
**Subclassing the HTML formatter**
.. versionadded:: 0.7
The HTML formatter is now built in a way that allows easy subclassing, thus
customizing the output HTML code. The `format()` method calls
`self._format_lines()` which returns a generator that yields tuples of ``(1,
line)``, where the ``1`` indicates that the ``line`` is a line of the
formatted source code.
If the `nowrap` option is set, the generator is the iterated over and the
resulting HTML is output.
Otherwise, `format()` calls `self.wrap()`, which wraps the generator with
other generators. These may add some HTML code to the one generated by
`_format_lines()`, either by modifying the lines generated by the latter,
then yielding them again with ``(1, line)``, and/or by yielding other HTML
code before or after the lines, with ``(0, html)``. The distinction between
source lines and other code makes it possible to wrap the generator multiple
times.
The default `wrap()` implementation adds a ``<div>`` and a ``<pre>`` tag.
A custom `HtmlFormatter` subclass could look like this:
.. sourcecode:: python
class CodeHtmlFormatter(HtmlFormatter):
def wrap(self, source, *, include_div):
return self._wrap_code(source)
def _wrap_code(self, source):
yield 0, '<code>'
for i, t in source:
if i == 1:
# it's a line of formatted code
t += '<br>'
yield i, t
yield 0, '</code>'
This results in wrapping the formatted lines with a ``<code>`` tag, where the
source lines are broken using ``<br>`` tags.
After calling `wrap()`, the `format()` method also adds the "line numbers"
and/or "full document" wrappers if the respective options are set. Then, all
HTML yielded by the wrapped generator is output.
"""
name = 'HTML'
aliases = ['html']
filenames = ['*.html', '*.htm']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.title = self._decodeifneeded(self.title)
self.nowrap = get_bool_opt(options, 'nowrap', False)
self.noclasses = get_bool_opt(options, 'noclasses', False)
self.classprefix = options.get('classprefix', '')
self.cssclass = self._decodeifneeded(options.get('cssclass', 'highlight'))
self.cssstyles = self._decodeifneeded(options.get('cssstyles', ''))
self.prestyles = self._decodeifneeded(options.get('prestyles', ''))
self.cssfile = self._decodeifneeded(options.get('cssfile', ''))
self.noclobber_cssfile = get_bool_opt(options, 'noclobber_cssfile', False)
self.tagsfile = self._decodeifneeded(options.get('tagsfile', ''))
self.tagurlformat = self._decodeifneeded(options.get('tagurlformat', ''))
self.filename = self._decodeifneeded(options.get('filename', ''))
self.wrapcode = get_bool_opt(options, 'wrapcode', False)
self.span_element_openers = {}
self.debug_token_types = get_bool_opt(options, 'debug_token_types', False)
if self.tagsfile:
if not ctags:
raise RuntimeError('The "ctags" package must to be installed '
'to be able to use the "tagsfile" feature.')
self._ctags = ctags.CTags(self.tagsfile)
linenos = options.get('linenos', False)
if linenos == 'inline':
self.linenos = 2
elif linenos:
# compatibility with <= 0.7
self.linenos = 1
else:
self.linenos = 0
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.linenospecial = abs(get_int_opt(options, 'linenospecial', 0))
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.lineseparator = options.get('lineseparator', '\n')
self.lineanchors = options.get('lineanchors', '')
self.linespans = options.get('linespans', '')
self.anchorlinenos = get_bool_opt(options, 'anchorlinenos', False)
self.hl_lines = set()
for lineno in get_list_opt(options, 'hl_lines', []):
try:
self.hl_lines.add(int(lineno))
except ValueError:
pass
self._create_stylesheet()
def _get_css_class(self, ttype):
"""Return the css class of this token type prefixed with
the classprefix option."""
ttypeclass = _get_ttype_class(ttype)
if ttypeclass:
return self.classprefix + ttypeclass
return ''
def _get_css_classes(self, ttype):
"""Return the CSS classes of this token type prefixed with the classprefix option."""
cls = self._get_css_class(ttype)
while ttype not in STANDARD_TYPES:
ttype = ttype.parent
cls = self._get_css_class(ttype) + ' ' + cls
return cls or ''
def _get_css_inline_styles(self, ttype):
"""Return the inline CSS styles for this token type."""
cclass = self.ttype2class.get(ttype)
while cclass is None:
ttype = ttype.parent
cclass = self.ttype2class.get(ttype)
return cclass or ''
def _create_stylesheet(self):
t2c = self.ttype2class = {Token: ''}
c2s = self.class2style = {}
for ttype, ndef in self.style:
name = self._get_css_class(ttype)
style = ''
if ndef['color']:
style += 'color: {}; '.format(webify(ndef['color']))
if ndef['bold']:
style += 'font-weight: bold; '
if ndef['italic']:
style += 'font-style: italic; '
if ndef['underline']:
style += 'text-decoration: underline; '
if ndef['bgcolor']:
style += 'background-color: {}; '.format(webify(ndef['bgcolor']))
if ndef['border']:
style += 'border: 1px solid {}; '.format(webify(ndef['border']))
if style:
t2c[ttype] = name
# save len(ttype) to enable ordering the styles by
# hierarchy (necessary for CSS cascading rules!)
c2s[name] = (style[:-2], ttype, len(ttype))
def get_style_defs(self, arg=None):
"""
Return CSS style definitions for the classes produced by the current
highlighting style. ``arg`` can be a string or list of selectors to
insert before the token type classes.
"""
style_lines = []
style_lines.extend(self.get_linenos_style_defs())
style_lines.extend(self.get_background_style_defs(arg))
style_lines.extend(self.get_token_style_defs(arg))
return '\n'.join(style_lines)
def get_token_style_defs(self, arg=None):
prefix = self.get_css_prefix(arg)
styles = [
(level, ttype, cls, style)
for cls, (style, ttype, level) in self.class2style.items()
if cls and style
]
styles.sort()
lines = [
f'{prefix(cls)} {{ {style} }} /* {repr(ttype)[6:]} */'
for (level, ttype, cls, style) in styles
]
return lines
def get_background_style_defs(self, arg=None):
prefix = self.get_css_prefix(arg)
bg_color = self.style.background_color
hl_color = self.style.highlight_color
lines = []
if arg and not self.nobackground and bg_color is not None:
text_style = ''
if Text in self.ttype2class:
text_style = ' ' + self.class2style[self.ttype2class[Text]][0]
lines.insert(
0, '{}{{ background: {};{} }}'.format(
prefix(''), bg_color, text_style
)
)
if hl_color is not None:
lines.insert(
0, '{} {{ background-color: {} }}'.format(prefix('hll'), hl_color)
)
return lines
def get_linenos_style_defs(self):
lines = [
f'pre {{ {self._pre_style} }}',
f'td.linenos .normal {{ {self._linenos_style} }}',
f'span.linenos {{ {self._linenos_style} }}',
f'td.linenos .special {{ {self._linenos_special_style} }}',
f'span.linenos.special {{ {self._linenos_special_style} }}',
]
return lines
def get_css_prefix(self, arg):
if arg is None:
arg = ('cssclass' in self.options and '.'+self.cssclass or '')
if isinstance(arg, str):
args = [arg]
else:
args = list(arg)
def prefix(cls):
if cls:
cls = '.' + cls
tmp = []
for arg in args:
tmp.append((arg and arg + ' ' or '') + cls)
return ', '.join(tmp)
return prefix
@property
def _pre_style(self):
return 'line-height: 125%;'
@property
def _linenos_style(self):
color = self.style.line_number_color
background_color = self.style.line_number_background_color
return f'color: {color}; background-color: {background_color}; padding-left: 5px; padding-right: 5px;'
@property
def _linenos_special_style(self):
color = self.style.line_number_special_color
background_color = self.style.line_number_special_background_color
return f'color: {color}; background-color: {background_color}; padding-left: 5px; padding-right: 5px;'
def _decodeifneeded(self, value):
if isinstance(value, bytes):
if self.encoding:
return value.decode(self.encoding)
return value.decode()
return value
def _wrap_full(self, inner, outfile):
if self.cssfile:
if os.path.isabs(self.cssfile):
# it's an absolute filename
cssfilename = self.cssfile
else:
try:
filename = outfile.name
if not filename or filename[0] == '<':
# pseudo files, e.g. name == '<fdopen>'
raise AttributeError
cssfilename = os.path.join(os.path.dirname(filename),
self.cssfile)
except AttributeError:
print('Note: Cannot determine output file name, '
'using current directory as base for the CSS file name',
file=sys.stderr)
cssfilename = self.cssfile
# write CSS file only if noclobber_cssfile isn't given as an option.
try:
if not os.path.exists(cssfilename) or not self.noclobber_cssfile:
with open(cssfilename, "w", encoding="utf-8") as cf:
cf.write(CSSFILE_TEMPLATE %
{'styledefs': self.get_style_defs('body')})
except OSError as err:
err.strerror = 'Error writing CSS file: ' + err.strerror
raise
yield 0, (DOC_HEADER_EXTERNALCSS %
dict(title=self.title,
cssfile=self.cssfile,
encoding=self.encoding))
else:
yield 0, (DOC_HEADER %
dict(title=self.title,
styledefs=self.get_style_defs('body'),
encoding=self.encoding))
yield from inner
yield 0, DOC_FOOTER
def _wrap_tablelinenos(self, inner):
dummyoutfile = StringIO()
lncount = 0
for t, line in inner:
if t:
lncount += 1
dummyoutfile.write(line)
fl = self.linenostart
mw = len(str(lncount + fl - 1))
sp = self.linenospecial
st = self.linenostep
anchor_name = self.lineanchors or self.linespans
aln = self.anchorlinenos
nocls = self.noclasses
lines = []
for i in range(fl, fl+lncount):
print_line = i % st == 0
special_line = sp and i % sp == 0
if print_line:
line = '%*d' % (mw, i)
if aln:
line = '<a href="#%s-%d">%s</a>' % (anchor_name, i, line)
else:
line = ' ' * mw
if nocls:
if special_line:
style = f' style="{self._linenos_special_style}"'
else:
style = f' style="{self._linenos_style}"'
else:
if special_line:
style = ' class="special"'
else:
style = ' class="normal"'
if style:
line = f'<span{style}>{line}</span>'
lines.append(line)
ls = '\n'.join(lines)
# If a filename was specified, we can't put it into the code table as it
# would misalign the line numbers. Hence we emit a separate row for it.
filename_tr = ""
if self.filename:
filename_tr = (
'<tr><th colspan="2" class="filename">'
'<span class="filename">' + self.filename + '</span>'
'</th></tr>')
# in case you wonder about the seemingly redundant <div> here: since the
# content in the other cell also is wrapped in a div, some browsers in
# some configurations seem to mess up the formatting...
yield 0, (f'<table class="{self.cssclass}table">' + filename_tr +
'<tr><td class="linenos"><div class="linenodiv"><pre>' +
ls + '</pre></div></td><td class="code">')
yield 0, '<div>'
yield 0, dummyoutfile.getvalue()
yield 0, '</div>'
yield 0, '</td></tr></table>'
def _wrap_inlinelinenos(self, inner):
# need a list of lines since we need the width of a single number :(
inner_lines = list(inner)
sp = self.linenospecial
st = self.linenostep
num = self.linenostart
mw = len(str(len(inner_lines) + num - 1))
anchor_name = self.lineanchors or self.linespans
aln = self.anchorlinenos
nocls = self.noclasses
for _, inner_line in inner_lines:
print_line = num % st == 0
special_line = sp and num % sp == 0
if print_line:
line = '%*d' % (mw, num)
else:
line = ' ' * mw
if nocls:
if special_line:
style = f' style="{self._linenos_special_style}"'
else:
style = f' style="{self._linenos_style}"'
else:
if special_line:
style = ' class="linenos special"'
else:
style = ' class="linenos"'
if style:
linenos = f'<span{style}>{line}</span>'
else:
linenos = line
if aln:
yield 1, ('<a href="#%s-%d">%s</a>' % (anchor_name, num, linenos) +
inner_line)
else:
yield 1, linenos + inner_line
num += 1
def _wrap_lineanchors(self, inner):
s = self.lineanchors
# subtract 1 since we have to increment i *before* yielding
i = self.linenostart - 1
for t, line in inner:
if t:
i += 1
href = "" if self.linenos else ' href="#%s-%d"' % (s, i)
yield 1, '<a id="%s-%d" name="%s-%d"%s></a>' % (s, i, s, i, href) + line
else:
yield 0, line
def _wrap_linespans(self, inner):
s = self.linespans
i = self.linenostart - 1
for t, line in inner:
if t:
i += 1
yield 1, '<span id="%s-%d">%s</span>' % (s, i, line)
else:
yield 0, line
def _wrap_div(self, inner):
style = []
if (self.noclasses and not self.nobackground and
self.style.background_color is not None):
style.append(f'background: {self.style.background_color}')
if self.cssstyles:
style.append(self.cssstyles)
style = '; '.join(style)
yield 0, ('<div' + (self.cssclass and f' class="{self.cssclass}"') +
(style and (f' style="{style}"')) + '>')
yield from inner
yield 0, '</div>\n'
def _wrap_pre(self, inner):
style = []
if self.prestyles:
style.append(self.prestyles)
if self.noclasses:
style.append(self._pre_style)
style = '; '.join(style)
if self.filename and self.linenos != 1:
yield 0, ('<span class="filename">' + self.filename + '</span>')
# the empty span here is to keep leading empty lines from being
# ignored by HTML parsers
yield 0, ('<pre' + (style and f' style="{style}"') + '><span></span>')
yield from inner
yield 0, '</pre>'
def _wrap_code(self, inner):
yield 0, '<code>'
yield from inner
yield 0, '</code>'
@functools.lru_cache(maxsize=100)
def _translate_parts(self, value):
"""HTML-escape a value and split it by newlines."""
return value.translate(_escape_html_table).split('\n')
def _format_lines(self, tokensource):
"""
Just format the tokens, without any wrapping tags.
Yield individual lines.
"""
nocls = self.noclasses
lsep = self.lineseparator
tagsfile = self.tagsfile
lspan = ''
line = []
for ttype, value in tokensource:
try:
cspan = self.span_element_openers[ttype]
except KeyError:
title = ' title="{}"'.format('.'.join(ttype)) if self.debug_token_types else ''
if nocls:
css_style = self._get_css_inline_styles(ttype)
if css_style:
css_style = self.class2style[css_style][0]
cspan = f'<span style="{css_style}"{title}>'
else:
cspan = ''
else:
css_class = self._get_css_classes(ttype)
if css_class:
cspan = f'<span class="{css_class}"{title}>'
else:
cspan = ''
self.span_element_openers[ttype] = cspan
parts = self._translate_parts(value)
if tagsfile and ttype in Token.Name:
filename, linenumber = self._lookup_ctag(value)
if linenumber:
base, filename = os.path.split(filename)
if base:
base += '/'
filename, extension = os.path.splitext(filename)
url = self.tagurlformat % {'path': base, 'fname': filename,
'fext': extension}
parts[0] = "<a href=\"%s#%s-%d\">%s" % \
(url, self.lineanchors, linenumber, parts[0])
parts[-1] = parts[-1] + "</a>"
# for all but the last line
for part in parts[:-1]:
if line:
# Also check for part being non-empty, so we avoid creating
# empty <span> tags
if lspan != cspan and part:
line.extend(((lspan and '</span>'), cspan, part,
(cspan and '</span>'), lsep))
else: # both are the same, or the current part was empty
line.extend((part, (lspan and '</span>'), lsep))
yield 1, ''.join(line)
line = []
elif part:
yield 1, ''.join((cspan, part, (cspan and '</span>'), lsep))
else:
yield 1, lsep
# for the last line
if line and parts[-1]:
if lspan != cspan:
line.extend(((lspan and '</span>'), cspan, parts[-1]))
lspan = cspan
else:
line.append(parts[-1])
elif parts[-1]:
line = [cspan, parts[-1]]
lspan = cspan
# else we neither have to open a new span nor set lspan
if line:
line.extend(((lspan and '</span>'), lsep))
yield 1, ''.join(line)
def _lookup_ctag(self, token):
entry = ctags.TagEntry()
if self._ctags.find(entry, token.encode(), 0):
return entry['file'].decode(), entry['lineNumber']
else:
return None, None
def _highlight_lines(self, tokensource):
"""
Highlighted the lines specified in the `hl_lines` option by
post-processing the token stream coming from `_format_lines`.
"""
hls = self.hl_lines
for i, (t, value) in enumerate(tokensource):
if t != 1:
yield t, value
if i + 1 in hls: # i + 1 because Python indexes start at 0
if self.noclasses:
style = ''
if self.style.highlight_color is not None:
style = (f' style="background-color: {self.style.highlight_color}"')
yield 1, f'<span{style}>{value}</span>'
else:
yield 1, f'<span class="hll">{value}</span>'
else:
yield 1, value
def wrap(self, source):
"""
Wrap the ``source``, which is a generator yielding
individual lines, in custom generators. See docstring
for `format`. Can be overridden.
"""
output = source
if self.wrapcode:
output = self._wrap_code(output)
output = self._wrap_pre(output)
return output
def format_unencoded(self, tokensource, outfile):
"""
The formatting process uses several nested generators; which of
them are used is determined by the user's options.
Each generator should take at least one argument, ``inner``,
and wrap the pieces of text generated by this.
Always yield 2-tuples: (code, text). If "code" is 1, the text
is part of the original tokensource being highlighted, if it's
0, the text is some piece of wrapping. This makes it possible to
use several different wrappers that process the original source
linewise, e.g. line number generators.
"""
source = self._format_lines(tokensource)
# As a special case, we wrap line numbers before line highlighting
# so the line numbers get wrapped in the highlighting tag.
if not self.nowrap and self.linenos == 2:
source = self._wrap_inlinelinenos(source)
if self.hl_lines:
source = self._highlight_lines(source)
if not self.nowrap:
if self.lineanchors:
source = self._wrap_lineanchors(source)
if self.linespans:
source = self._wrap_linespans(source)
source = self.wrap(source)
if self.linenos == 1:
source = self._wrap_tablelinenos(source)
source = self._wrap_div(source)
if self.full:
source = self._wrap_full(source, outfile)
for t, piece in source:
outfile.write(piece)
| HtmlFormatter |
python | PrefectHQ__prefect | tests/server/orchestration/api/ui/test_task_runs.py | {
"start": 4671,
"end": 10626
} | class ____:
@pytest.fixture
def url(self) -> str:
return "/ui/task_runs/count"
@pytest.fixture
async def create_flow_runs(
self,
session: AsyncSession,
flow: orm_models.Flow,
):
await session.execute(delete(orm_models.FlowRun))
run_1 = await models.flow_runs.create_flow_run(
session=session,
flow_run=core.FlowRun(
flow_id=flow.id,
state=states.Completed(),
),
)
run_2 = await models.flow_runs.create_flow_run(
session=session,
flow_run=core.FlowRun(
flow_id=flow.id,
state=states.Failed(),
),
)
run_3 = await models.flow_runs.create_flow_run(
session=session,
flow_run=core.FlowRun(
flow_id=flow.id,
state=states.Pending(),
),
)
await session.commit()
yield [run_1, run_2, run_3]
@pytest.fixture
async def create_task_runs(
self,
session: AsyncSession,
flow_run: orm_models.FlowRun,
create_flow_runs: list[orm_models.FlowRun],
):
await session.execute(delete(orm_models.TaskRun))
task_runs_per_flow_run = 27
now = cast(DateTime, datetime(2023, 6, 1, 18, tzinfo=timezone.utc))
for flow_run in create_flow_runs:
for i in range(task_runs_per_flow_run):
# This means that each flow run should have task runs with the following states:
# 9 completed, 9 failed, 3 scheduled, 3 running, 2 cancelled, 1 crashed, 1 paused, 1 cancelling, 1 pending
if i < 9:
state_type = states.StateType.COMPLETED
state_name = "Completed"
elif i < 15:
state_type = states.StateType.FAILED
state_name = "Failed"
elif i < 18:
state_type = states.StateType.SCHEDULED
state_name = "Scheduled"
elif i < 21:
state_type = states.StateType.RUNNING
state_name = "Running"
elif i < 23:
state_type = states.StateType.CANCELLED
state_name = "Cancelled"
elif i < 24:
state_type = states.StateType.CRASHED
state_name = "Crashed"
elif i < 25:
state_type = states.StateType.PAUSED
state_name = "Paused"
elif i < 26:
state_type = states.StateType.CANCELLING
state_name = "Cancelling"
else:
state_type = states.StateType.PENDING
state_name = "Pending"
await models.task_runs.create_task_run(
session=session,
task_run=core.TaskRun(
flow_run_id=flow_run.id,
task_key=f"task-{i}-{uuid4()}",
dynamic_key=str(uuid4()),
state_type=state_type,
state_name=state_name,
start_time=now,
end_time=now,
),
)
await session.commit()
yield
async def test_returns_all_state_types(
self,
url: str,
client: AsyncClient,
):
response = await client.post(url)
assert response.status_code == 200
counts = response.json()
assert set(counts.keys()) == set(states.StateType.__members__.keys())
async def test_none(
self,
url: str,
client: AsyncClient,
session: AsyncSession,
):
# ensure there are no task runs in the database
await session.execute(delete(orm_models.TaskRun))
await session.commit()
response = await client.post(url)
assert response.status_code == 200
counts = response.json()
assert counts == {
"COMPLETED": 0,
"FAILED": 0,
"PENDING": 0,
"RUNNING": 0,
"CANCELLED": 0,
"CRASHED": 0,
"PAUSED": 0,
"CANCELLING": 0,
"SCHEDULED": 0,
}
@pytest.mark.usefixtures("create_task_runs")
async def test_returns_counts(
self,
url: str,
client: AsyncClient,
):
response = await client.post(url)
assert response.status_code == 200
counts = response.json()
assert counts == {
"COMPLETED": 9 * 3,
"FAILED": 6 * 3,
"PENDING": 1 * 3,
"RUNNING": 3 * 3,
"CANCELLED": 2 * 3,
"CRASHED": 1 * 3,
"PAUSED": 1 * 3,
"CANCELLING": 1 * 3,
"SCHEDULED": 3 * 3,
}
@pytest.mark.usefixtures("create_task_runs")
async def test_returns_counts_with_filter(
self,
url: str,
client: AsyncClient,
):
response = await client.post(
url,
json={
"flow_runs": filters.FlowRunFilter(
state=filters.FlowRunFilterState(
type=filters.FlowRunFilterStateType(
any_=[states.StateType.COMPLETED]
)
)
).model_dump(mode="json")
},
)
assert response.status_code == 200
counts = response.json()
assert counts == {
"COMPLETED": 9,
"FAILED": 6,
"PENDING": 1,
"RUNNING": 3,
"CANCELLED": 2,
"CRASHED": 1,
"PAUSED": 1,
"CANCELLING": 1,
"SCHEDULED": 3,
}
| TestReadTaskRunCountsByState |
python | great-expectations__great_expectations | tests/integration/data_sources_and_expectations/test_expectation_conditions.py | {
"start": 17990,
"end": 19745
} | class ____:
"""Test that SQLAlchemy execution engines properly reject PassThroughCondition."""
@parameterize_batch_for_data_sources(
data_source_configs=[
PostgreSQLDatasourceTestConfig(
column_types={
"created_at": POSTGRESQL_TYPES.TIMESTAMP,
"updated_at": POSTGRESQL_TYPES.DATE,
}
),
SqliteDatasourceTestConfig(),
MySQLDatasourceTestConfig(
column_types={
"created_at": sqltypes.TIMESTAMP(timezone=True),
"updated_at": sqltypes.DATE,
}
),
],
data=DATA,
)
def test_sqlalchemy_rejects_pass_through_condition_object(
self, batch_for_datasource: Batch
) -> None:
"""Test that SQLAlchemy raises error when PassThroughCondition is used.
The error is caught during metric resolution and results in a validation
failure with an error message, rather than an uncaught exception.
"""
row_condition = PassThroughCondition(pass_through_filter="quantity > 0")
expectation = gxe.ExpectColumnMinToBeBetween(
column="amount",
min_value=0.5,
max_value=1.5,
row_condition=row_condition,
)
result = batch_for_datasource.validate(expectation)
# Validation should fail due to PassThroughCondition not being supported
assert result.success is False
exception_info_str = str(result.exception_info)
assert "PassThroughCondition" in exception_info_str
assert "not supported for SqlAlchemyExecutionEngine" in exception_info_str
| TestSqlAlchemyRejectsPassThroughCondition |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-core/dagster_dg_core/config.py | {
"start": 17502,
"end": 17821
} | class ____(TypedDict):
directory_type: Required[Literal["workspace"]]
workspace: Required[DgRawWorkspaceConfig]
cli: NotRequired[DgRawCliConfig]
def is_workspace_file_config(config: "DgFileConfig") -> TypeGuard[DgWorkspaceFileConfig]:
return config["directory_type"] == "workspace"
| DgWorkspaceFileConfig |
python | google__jax | jax/experimental/jax2tf/tests/flax_models/transformer_wmt.py | {
"start": 9707,
"end": 11323
} | class ____(nn.Module):
"""Transformer Model Encoder for sequence to sequence translation.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
shared_embedding: a shared embedding layer to use.
"""
config: TransformerConfig
shared_embedding: Any = None
@nn.compact
def __call__(self,
inputs,
inputs_positions=None,
encoder_mask=None):
"""Applies Transformer model on the inputs.
Args:
inputs: input data
inputs_positions: input subsequence positions for packed examples.
encoder_mask: decoder self-attention mask.
Returns:
output of a transformer encoder.
"""
config = self.config
assert inputs.ndim == 2 # (batch, len)
# Input Embedding
if self.shared_embedding is None:
input_embed = nn.Embed(
num_embeddings=config.vocab_size,
features=config.emb_dim,
embedding_init=nn.initializers.normal(stddev=1.0))
else:
input_embed = self.shared_embedding
x = inputs.astype('int32')
x = input_embed(x)
x = AddPositionEmbs(
config=config, decode=False, name='posembed_input')(
x, inputs_positions=inputs_positions)
x = nn.Dropout(rate=config.dropout_rate)(
x, deterministic=config.deterministic)
x = x.astype(config.dtype)
# Input Encoder
for lyr in range(config.num_layers):
x = Encoder1DBlock(
config=config, name=f'encoderblock_{lyr}')(x, encoder_mask)
encoded = nn.LayerNorm(dtype=config.dtype, name='encoder_norm')(x)
return encoded
| Encoder |
python | streamlit__streamlit | lib/streamlit/elements/widgets/multiselect.py | {
"start": 5559,
"end": 21059
} | class ____:
@overload
def multiselect(
self,
label: str,
options: OptionSequence[T],
default: Any | None = None,
format_func: Callable[[Any], str] = str,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
max_selections: int | None = None,
placeholder: str | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
accept_new_options: Literal[False] = False,
width: WidthWithoutContent = "stretch",
) -> list[T]: ...
@overload
def multiselect(
self,
label: str,
options: OptionSequence[T],
default: Any | None = None,
format_func: Callable[[Any], str] = str,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
max_selections: int | None = None,
placeholder: str | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
accept_new_options: Literal[True] = True,
width: WidthWithoutContent = "stretch",
) -> list[T | str]: ...
@overload
def multiselect(
self,
label: str,
options: OptionSequence[T],
default: Any | None = None,
format_func: Callable[[Any], str] = str,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
max_selections: int | None = None,
placeholder: str | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
accept_new_options: bool = False,
width: WidthWithoutContent = "stretch",
) -> list[T] | list[T | str]: ...
@gather_metrics("multiselect")
def multiselect(
self,
label: str,
options: OptionSequence[T],
default: Any | None = None,
format_func: Callable[[Any], str] = str,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
max_selections: int | None = None,
placeholder: str | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
accept_new_options: Literal[False, True] | bool = False,
width: WidthWithoutContent = "stretch",
) -> list[T] | list[T | str]:
r"""Display a multiselect widget.
The multiselect widget starts as empty.
Parameters
----------
label : str
A short label explaining to the user what this select widget is for.
The label can optionally contain GitHub-flavored Markdown of the
following types: Bold, Italics, Strikethroughs, Inline Code, Links,
and Images. Images display like icons, with a max height equal to
the font height.
Unsupported Markdown elements are unwrapped so only their children
(text contents) render. Display unsupported elements as literal
characters by backslash-escaping them. E.g.,
``"1\. Not an ordered list"``.
See the ``body`` parameter of |st.markdown|_ for additional,
supported Markdown directives.
For accessibility reasons, you should never set an empty label, but
you can hide it with ``label_visibility`` if needed. In the future,
we may disallow empty labels by raising an exception.
.. |st.markdown| replace:: ``st.markdown``
.. _st.markdown: https://docs.streamlit.io/develop/api-reference/text/st.markdown
options : Iterable
Labels for the select options in an ``Iterable``. This can be a
``list``, ``set``, or anything supported by ``st.dataframe``. If
``options`` is dataframe-like, the first column will be used. Each
label will be cast to ``str`` internally by default.
default : Iterable of V, V, or None
List of default values. Can also be a single value.
format_func : function
Function to modify the display of the options. It receives
the raw option as an argument and should output the label to be
shown for that option. This has no impact on the return value of
the command.
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. No two widgets may have the same key.
help : str or None
A tooltip that gets displayed next to the widget label. Streamlit
only displays the tooltip when ``label_visibility="visible"``. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown,
including the Markdown directives described in the ``body``
parameter of ``st.markdown``.
on_change : callable
An optional callback invoked when this widget's value changes.
args : list or tuple
An optional list or tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
max_selections : int
The max selections that can be selected at a time.
placeholder : str or None
A string to display when no options are selected.
If this is ``None`` (default), the widget displays placeholder text
based on the widget's configuration:
- "Choose options" is displayed when options are available and
``accept_new_options=False``.
- "Choose or add options" is displayed when options are available
and ``accept_new_options=True``.
- "Add options" is displayed when no options are available and
``accept_new_options=True``.
- "No options to select" is displayed when no options are available
and ``accept_new_options=False``. The widget is also disabled in
this case.
disabled : bool
An optional boolean that disables the multiselect widget if set
to ``True``. The default is ``False``.
label_visibility : "visible", "hidden", or "collapsed"
The visibility of the label. The default is ``"visible"``. If this
is ``"hidden"``, Streamlit displays an empty spacer instead of the
label, which can help keep the widget aligned with other widgets.
If this is ``"collapsed"``, Streamlit displays no label or spacer.
accept_new_options : bool
Whether the user can add selections that aren't included in ``options``.
If this is ``False`` (default), the user can only select from the
items in ``options``. If this is ``True``, the user can enter new
items that don't exist in ``options``.
When a user enters and selects a new item, it is included in the
widget's returned list as a string. The new item is not added to
the widget's drop-down menu. Streamlit will use a case-insensitive
match from ``options`` before adding a new item, and a new item
can't be added if a case-insensitive match is already selected. The
``max_selections`` argument is still enforced.
width : "stretch" or int
The width of the multiselect widget. This can be one of the
following:
- ``"stretch"`` (default): The width of the widget matches the
width of the parent container.
- An integer specifying the width in pixels: The widget has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the widget matches the width
of the parent container.
Returns
-------
list
A list of the selected options.
The list contains copies of the selected options, not the originals.
Examples
--------
**Example 1: Use a basic multiselect widget**
You can declare one or more initial selections with the ``default``
parameter.
>>> import streamlit as st
>>>
>>> options = st.multiselect(
... "What are your favorite colors?",
... ["Green", "Yellow", "Red", "Blue"],
... default=["Yellow", "Red"],
... )
>>>
>>> st.write("You selected:", options)
.. output::
https://doc-multiselect.streamlit.app/
height: 350px
**Example 2: Let users to add new options**
To allow users to enter and select new options that aren't included in
the ``options`` list, use the ``accept_new_options`` parameter. To
prevent users from adding an unbounded number of new options, use the
``max_selections`` parameter.
>>> import streamlit as st
>>>
>>> options = st.multiselect(
... "What are your favorite cat names?",
... ["Jellybeans", "Fish Biscuit", "Madam President"],
... max_selections=5,
... accept_new_options=True,
... )
>>>
>>> st.write("You selected:", options)
.. output::
https://doc-multiselect-accept-new-options.streamlit.app/
height: 350px
"""
# Convert empty string to single space to distinguish from None:
# - None (default) → "" → Frontend shows contextual placeholders
# - "" (explicit empty) → " " → Frontend shows empty placeholder
# - "Custom" → "Custom" → Frontend shows custom placeholder
if placeholder == "":
placeholder = " "
ctx = get_script_run_ctx()
return self._multiselect(
label=label,
options=options,
default=default,
format_func=format_func,
key=key,
help=help,
on_change=on_change,
args=args,
kwargs=kwargs,
max_selections=max_selections,
placeholder=placeholder,
disabled=disabled,
label_visibility=label_visibility,
accept_new_options=accept_new_options,
width=width,
ctx=ctx,
)
def _multiselect(
self,
label: str,
options: OptionSequence[T],
default: Any | None = None,
format_func: Callable[[Any], str] = str,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
max_selections: int | None = None,
placeholder: str | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
accept_new_options: bool = False,
width: WidthWithoutContent = "stretch",
ctx: ScriptRunContext | None = None,
) -> list[T] | list[T | str]:
key = to_key(key)
widget_name = "multiselect"
check_widget_policies(
self.dg,
key,
on_change,
default_value=default,
)
maybe_raise_label_warnings(label, label_visibility)
indexable_options = convert_to_sequence_and_check_comparable(options)
formatted_options, formatted_option_to_option_index = create_mappings(
indexable_options, format_func
)
default_values = get_default_indices(indexable_options, default)
# Convert empty string to single space to distinguish from None:
# - None (default) → "" → Frontend shows contextual placeholders
# - "" (explicit empty) → " " → Frontend shows empty placeholder
# - "Custom" → "Custom" → Frontend shows custom placeholder
if placeholder == "":
placeholder = " "
form_id = current_form_id(self.dg)
element_id = compute_and_register_element_id(
widget_name,
user_key=key,
# Treat the provided key as the main identity. Only include
# changes to the options, accept_new_options, and max_selections
# in the identity computation as those can invalidate the
# current selection.
key_as_main_identity={
"options",
"max_selections",
"accept_new_options",
"format_func",
},
dg=self.dg,
label=label,
options=formatted_options,
default=default_values,
help=help,
max_selections=max_selections,
placeholder=placeholder,
accept_new_options=accept_new_options,
width=width,
)
proto = MultiSelectProto()
proto.id = element_id
proto.default[:] = default_values
proto.form_id = form_id
proto.disabled = disabled
proto.label = label
proto.max_selections = max_selections or 0
proto.placeholder = placeholder or ""
proto.label_visibility.value = get_label_visibility_proto_value(
label_visibility
)
proto.options[:] = formatted_options
if help is not None:
proto.help = dedent(help)
proto.accept_new_options = accept_new_options
serde = MultiSelectSerde(
indexable_options,
formatted_options=formatted_options,
formatted_option_to_option_index=formatted_option_to_option_index,
default_options_indices=default_values,
)
widget_state = register_widget(
proto.id,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
value_type="string_array_value",
)
_check_max_selections(widget_state.value, max_selections)
widget_state = maybe_coerce_enum_sequence(
widget_state, options, indexable_options
)
if widget_state.value_changed:
proto.raw_values[:] = serde.serialize(widget_state.value)
proto.set_value = True
validate_width(width)
layout_config = LayoutConfig(width=width)
if ctx:
save_for_app_testing(ctx, element_id, format_func)
self.dg._enqueue(widget_name, proto, layout_config=layout_config)
return widget_state.value
@property
def dg(self) -> DeltaGenerator:
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
| MultiSelectMixin |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI019_0.py | {
"start": 5581,
"end": 5706
} | class ____:
@classmethod
def m[S](cls: "type[S]") -> "type[S]": ... # PYI019
| BadSubscriptReturnTypeWithStringTypeHints |
python | donnemartin__interactive-coding-challenges | online_judges/nim/test_can_win_nim.py | {
"start": 18,
"end": 675
} | class ____(unittest.TestCase):
def test_can_win_nim(self):
solution = Solution()
self.assertRaises(TypeError, solution.can_win_nim, None)
self.assertEqual(solution.can_win_nim(1), True)
self.assertEqual(solution.can_win_nim(2), True)
self.assertEqual(solution.can_win_nim(3), True)
self.assertEqual(solution.can_win_nim(4), False)
self.assertEqual(solution.can_win_nim(7), True)
self.assertEqual(solution.can_win_nim(40), False)
print('Success: test_can_win_nim')
def main():
test = TestSolution()
test.test_can_win_nim()
if __name__ == '__main__':
main()
| TestSolution |
python | rapidsai__cudf | docs/cudf/source/_ext/PandasCompat.py | {
"start": 1054,
"end": 2844
} | class ____(BaseAdmonition, SphinxDirective):
# this enables content in the directive
has_content = True
def run(self):
targetid = "PandasCompat-%d" % self.env.new_serialno("PandasCompat")
targetnode = nodes.target("", "", ids=[targetid])
PandasCompat_node = PandasCompat("\n".join(self.content))
PandasCompat_node += nodes.title(
get_translation_sphinx("Pandas Compatibility Note"),
get_translation_sphinx("Pandas Compatibility Note"),
)
PandasCompat_node["docname"] = self.env.docname
PandasCompat_node["target"] = targetnode
self.state.nested_parse(
self.content, self.content_offset, PandasCompat_node
)
if not hasattr(self.env, "PandasCompat_all_pandas_compat"):
self.env.PandasCompat_all_pandas_compat = []
self.env.PandasCompat_all_pandas_compat.append(
{
"docname": self.env.docname,
"PandasCompat": PandasCompat_node.deepcopy(),
"target": targetnode,
}
)
return [targetnode, PandasCompat_node]
def purge_PandasCompats(app, env, docname):
if not hasattr(env, "PandasCompat_all_pandas_compat"):
return
env.PandasCompat_all_pandas_compat = [
PandasCompat
for PandasCompat in env.PandasCompat_all_pandas_compat
if PandasCompat["docname"] != docname
]
def merge_PandasCompats(app, env, docnames, other):
if not hasattr(env, "PandasCompat_all_pandas_compat"):
env.PandasCompat_all_pandas_compat = []
if hasattr(other, "PandasCompat_all_pandas_compat"):
env.PandasCompat_all_pandas_compat.extend(
other.PandasCompat_all_pandas_compat
)
| PandasCompatDirective |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/pool/base.py | {
"start": 4169,
"end": 4230
} | class ____(_ConnDialect):
is_async = True
| _AsyncConnDialect |
python | apache__airflow | providers/elasticsearch/tests/unit/elasticsearch/hooks/test_elasticsearch.py | {
"start": 1878,
"end": 2632
} | class ____:
def setup_method(self):
self.connection = Connection(host="localhost", port=9200, schema="http")
class UnitTestElasticsearchSQLHook(ElasticsearchSQLHook):
conn_name_attr = "elasticsearch_conn_id"
self.db_hook = UnitTestElasticsearchSQLHook()
self.db_hook.get_connection = mock.Mock()
self.db_hook.get_connection.return_value = self.connection
@mock.patch("airflow.providers.elasticsearch.hooks.elasticsearch.connect")
def test_get_conn(self, mock_connect):
self.db_hook.test_conn_id = "non_default"
self.db_hook.get_conn()
mock_connect.assert_called_with(host="localhost", port=9200, scheme="http", user=None, password=None)
| TestElasticsearchSQLHookConn |
python | pytorch__pytorch | torch/distributed/_symmetric_memory/_nvshmem_triton.py | {
"start": 3634,
"end": 46972
} | class ____:
"""
A class to register kernel functions that ** require NVSHMEM initialization **
"""
# Class variable to store the functions to be initialized
_to_init: dict[str, Any] = {}
@classmethod
def register(cls, name: str) -> None:
"""
Register a kernel function with the given name.
Args:
name (str): The name of the kernel function.
"""
cls._to_init.setdefault(name)
@classmethod
def deregister(cls, name: str) -> None:
"""
Deregister a kernel function with the given name.
Args:
name (str): The name of the kernel function.
"""
cls._to_init.pop(name, None)
@classmethod
def has(cls, name: str) -> bool:
"""
Check if a kernel function with the given name is registered.
Args:
name (str): The name of the kernel function.
Returns:
bool: True if the kernel function is registered, False otherwise.
"""
return name in cls._to_init
def _nvshmem_init_hook(*args, **kwargs) -> None: # type: ignore[no-untyped-def]
"""
A hook function to initialize the CUModule created by `triton.jit` with
NVSHMEM device context
"""
from torch._C._distributed_c10d import _nvshmemx_cumodule_init
jit_function = kwargs["fn"].jit_function
fn_name = jit_function.fn.__name__
# Only initialize NVSHMEM module for kernels registered via @requires_nvshmem
if NvshmemKernelRegistry.has(fn_name):
key = kwargs["key"]
device = kwargs["compile"]["device"]
jit_function = kwargs["fn"].jit_function
kernel_cache = jit_function.device_caches[device][0]
kernel = kernel_cache.get(key, None)
if kernel is not None:
kernel.run
# Initialize NVSHMEM for the CU module
_nvshmemx_cumodule_init(kernel.module)
else:
logger.warning(
f"It seems Triton hasn't created a kernel for function {fn_name}. " # noqa: G004
"Please report this issue to Triton."
)
if has_triton():
from triton.runtime.jit import JITFunction, KernelInterface
# Create a new Callable class that follows the KernelInterface protocol so
# that the Callable works with the subscript operator, e.g. `foo[(1, 1)]`
class GridCallableWithExtern(KernelInterface):
"""
`KernelInterface` invokes `self.run` in `__getitem__`, i.e. []. We
implement a `run` method by directing the call to `JITFunction.run`,
with added extern_libs kwarg, so that users don't have to pass it
"""
def __init__(self, jit_func: JITFunction, extern_libs: dict[str, str]) -> None:
self.jit_func = jit_func
self.extern_libs = extern_libs
def run(self, *args, **kwargs): # type: ignore[no-untyped-def]
# Call the JITFunction.run with added extern_libs kwarg
return self.jit_func.run(*args, **kwargs, extern_libs=self.extern_libs)
def requires_nvshmem( # type: ignore[no-untyped-def]
jit_func, # JITFunction created by triton.jit
):
"""
A decorator to register a Triton kernel function that requires NVSHMEM initialization.
Example usage:
```
@requires_nvshmem
@triton.jit
def foo(...):
...
```
If you would like to specify a path to the NVSHMEM device library other
than standard search locations, you can use the following environment
variable:
```
export NVSHMEM_LIB_DIR=/path/to/nvshmem/lib
```
"""
import triton
from triton.runtime.jit import JITFunction
if not isinstance(jit_func, JITFunction):
raise TypeError(f"Expected a JITFunction, but got {type(jit_func)}")
# Find the NVSHMEM device library
lib_path = NvshmemLibFinder.find_device_library()
extern_libs = {"libnvshmem_device": lib_path}
# Register the JITFunction with the kernel registry as "to be initialized"
NvshmemKernelRegistry.register(jit_func.fn.__name__)
# Register the NVSHMEM init function as a post-compile hook.
# [Note] This is a global setting (due to lack of Triton API exposure). To
# avoid initializing Triton kernels that do not require NVSHMEM, filtering
# is performed in the hook function itself by checking against
# NvshmemKernelRegistry.
triton.knobs.runtime.jit_post_compile_hook = _nvshmem_init_hook
return GridCallableWithExtern(jit_func, extern_libs)
if has_triton():
import triton
import triton.language as tl
from triton.language import core
@triton.jit # type: ignore[misc]
def put(dest, source, nelems, pe): # type: ignore[no-untyped-def]
"""
Put tensor data from local PE to a remote PE.
This high-level function provides a tensor-aware interface for NVSHMEM put
operations. It automatically handles type checking and size calculations, making
the API more ergonomic and type-safe.
Args:
dest: Destination tensor on the remote PE. Type must match source.
source: Source tensor on the local PE containing data to be copied.
nelems: Number of elements to transfer.
pe: PE number of the remote PE (0 ≤ pe < nvshmem_n_pes()).
Notes:
- Performs compile-time type checking between dest and source tensors.
- Automatically calculates byte size from tensor type and element count.
- This is a blocking operation that returns after data has been copied out
of the source array on the local PE.
- The operation does not guarantee delivery to the destination PE.
Use nvshmem_fence() for ordering or nvshmem_quiet() for completion.
Example:
```
# Transfer 100 elements to PE 1
nvshmem.put(dest_tensor, src_tensor, 100, 1)
```
"""
tl.static_assert(dest.type == source.type)
nbytes = nelems * dest.type.element_ty.itemsize
return putmem_block_extern_wrapper(
dest.to(tl.int64), source.to(tl.int64), nbytes.to(tl.int64), pe
)
@core.extern
def putmem_block_extern_wrapper(dest, source, size_bytes, pe, _semantic=None): # type: ignore[no-untyped-def]
"""Low-level extern wrapper for NVSHMEM put"""
return core.extern_elementwise(
"",
"",
[dest, source, size_bytes, pe],
{
(
core.dtype("int64"), # dest ptr
core.dtype("int64"), # source ptr
core.dtype("int64"), # size in bytes
core.dtype("int32"), # pe number
): ("nvshmemx_putmem_block", core.dtype("int32"))
},
is_pure=False,
_semantic=_semantic,
)
@triton.jit # type: ignore[misc]
def get(dest, source, nelems, pe): # type: ignore[no-untyped-def]
"""
Get tensor data from a remote PE to local PE.
This high-level function provides a tensor-aware interface for NVSHMEM get
operations. It automatically handles type checking and size calculations, making
the API more ergonomic and type-safe.
Args:
dest: Destination tensor on the local PE. Type must match source.
source: Source tensor on the remote PE containing data to be copied.
nelems: Number of elements to transfer.
pe: PE number of the remote PE (0 ≤ pe < nvshmem_n_pes()).
Notes:
- Performs compile-time type checking between dest and source tensors.
- Automatically calculates byte size from tensor type and element count.
- This is a blocking operation that returns after data has been delivered
to the destination array on the local PE.
- The destination data is guaranteed to be available for use after the call returns.
Example:
```
# Get 100 elements from PE 0
nvshmem.get(dest_tensor, src_tensor, 100, 0)
```
"""
tl.static_assert(dest.type == source.type)
nbytes = nelems * dest.type.element_ty.itemsize
return getmem_block_extern_wrapper(
dest.to(tl.int64), source.to(tl.int64), nbytes.to(tl.int64), pe
)
@core.extern
def getmem_block_extern_wrapper(dest, source, size_bytes, pe, _semantic=None): # type: ignore[no-untyped-def]
"""Low-level extern wrapper for NVSHMEM get"""
return core.extern_elementwise(
"",
"",
[dest, source, size_bytes, pe],
{
(
core.dtype("int64"), # dest ptr
core.dtype("int64"), # source ptr
core.dtype("int64"), # size in bytes
core.dtype("int32"), # pe number
): ("nvshmemx_getmem_block", core.dtype("int32"))
},
is_pure=False,
_semantic=_semantic,
)
@triton.jit # type: ignore[misc]
def get_nbi(dest, source, nelems, pe): # type: ignore[no-untyped-def]
"""
Get tensor data from a remote PE to local PE, non-blocking.
Different from the `get` function, this function returns after
initiating the operation. The operation is considered complete after a
subsequent call to `quiet`.
Args:
dest: Destination tensor on the local PE. Type must match source.
source: Source tensor on the remote PE containing data to be copied.
nelems: Number of elements to transfer.
pe: PE number of the remote PE (0 ≤ pe < nvshmem_n_pes()).
Notes:
- Performs compile-time type checking between dest and source tensors.
- Automatically calculates byte size from tensor type and element count.
Example:
```
# Get 100 elements from PE 0
nvshmem.get_nbi(dest, src, 100, 0)
# Some independent computation which overlaps with the get operation
...
# Wait for completion of the get operation
nvshmem.quiet()
```
"""
tl.static_assert(dest.type == source.type)
nbytes = nelems * dest.type.element_ty.itemsize
return getmem_block_extern_wrapper(
dest.to(tl.int64), source.to(tl.int64), nbytes.to(tl.int64), pe
)
@core.extern
def getmem_nbi_block_extern_wrapper(dest, source, size_bytes, pe, _semantic=None): # type: ignore[no-untyped-def]
"""Low-level extern wrapper for NVSHMEM get"""
return core.extern_elementwise(
"",
"",
[dest, source, size_bytes, pe],
{
(
core.dtype("int64"), # dest ptr
core.dtype("int64"), # source ptr
core.dtype("int64"), # size in bytes
core.dtype("int32"), # pe number
): ("nvshmemx_getmem_nbi_block", core.dtype("int32"))
},
is_pure=False,
_semantic=_semantic,
)
@triton.jit # type: ignore[misc]
def putmem_signal_block( # type: ignore[no-untyped-def]
dst,
src,
size_bytes,
signal,
sig_val,
sig_op,
pe,
): # type: ignore[no-untyped-def]
"""
Put data to remote PE with atomic signal operation using block-scoped operation.
This function copies data from the local PE to the remote PE and then
atomically updates a signal variable on the remote PE to indicate completion.
This enables efficient point-to-point synchronization between PEs.
Args:
dst (tensor): A tensor on calling PE symmetric to the destination tensor on remote PE.
src (tensor): Local tensor containing the source data.
size_bytes (int64): Number of bytes to transfer. Must be positive.
signal (tensor): Symmetric signal pad with remote PE.
Must be 8-byte aligned symmetric memory.
signal (int64): Value to be used in the signal operation.
sig_op (int32): Signal operation type. Common values:
- NVSHMEM_SIGNAL_SET (0): Atomic set operation
- NVSHMEM_SIGNAL_ADD (5): Atomic add operation
pe (int32): PE number of the remote PE (0 ≤ pe < nvshmem_n_pes()).
Returns:
int32: Status code (0 for success).
Notes:
- This is a blocking operation that returns after data has been copied out
of the source array and the signal has been updated on the remote PE.
- The signal update is performed atomically with respect to other signal
operations and synchronization routines.
- The signal variable must be of type uint64_t in symmetric memory.
- Use with nvshmem_signal_wait_until() for synchronization.
Example:
```
# Transfer data and set completion flag to 1
NVSHMEM_SIGNAL_SET = 0
nvshmem.putmem_signal_block(
dst_ptr, src_ptr, 1024, sig_ptr, 1, NVSHMEM_SIGNAL_SET, target_pe
)
```
"""
# Ensure sig_val is 64 bits
sig_val = 0 << 32 | sig_val
return putmem_signal_block_extern_wrapper(
dst.to(tl.int64),
src.to(tl.int64),
size_bytes.to(tl.int64),
signal.to(tl.int64),
sig_val.to(tl.uint64),
sig_op,
pe,
)
@core.extern
def putmem_signal_block_extern_wrapper( # type: ignore[no-untyped-def]
dst,
src,
size_bytes,
signal,
sig_val,
sig_op,
pe,
_semantic=None,
): # type: ignore[no-untyped-def]
return core.extern_elementwise(
"",
"",
[dst, src, size_bytes, signal, sig_val, sig_op, pe],
{
(
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("uint64"),
core.dtype("int32"),
core.dtype("int32"),
): ("nvshmemx_putmem_signal_block", core.dtype("int32"))
},
is_pure=False,
_semantic=_semantic,
)
# Wait and Signal Operations
@triton.jit # type: ignore[misc]
def wait_until(ivar, cmp_op, cmp_val): # type: ignore[no-untyped-def]
"""
Wait until a tensor variable meets a specified condition.
This high-level function provides a tensor-aware interface for NVSHMEM wait_until
operations. It automatically handles tensor address extraction, making
the API more ergonomic and type-safe.
Args:
ivar_tensor: Tensor to monitor (typically int64/uint64) in symmetric memory.
cmp: Comparison operator. Common values:
- NVSHMEM_CMP_EQ (0): Wait until ivar == cmp_val
- NVSHMEM_CMP_NE (1): Wait until ivar != cmp_val
- NVSHMEM_CMP_GT (2): Wait until ivar > cmp_val
- NVSHMEM_CMP_GE (3): Wait until ivar >= cmp_val
- NVSHMEM_CMP_LT (4): Wait until ivar < cmp_val
- NVSHMEM_CMP_LE (5): Wait until ivar <= cmp_val
cmp_val: Value to compare against.
Notes:
- This is a blocking operation that will wait indefinitely until the
condition is satisfied.
- The tensor must be in symmetric memory and accessible from other PEs.
Example:
```
# Wait until flag tensor becomes 1 (set by another PE)
NVSHMEM_CMP_EQ = 0
nvshmem.wait_until_tensor(flag_tensor, NVSHMEM_CMP_EQ, 1)
```
"""
tl.static_assert(
ivar.type.element_ty.itemsize == 4,
"wait_until expects a 32-bit type for the synchronization variable",
)
return wait_until_extern_wrapper(ivar.to(tl.int64), cmp_op, cmp_val)
@core.extern
def wait_until_extern_wrapper(ivar, cmp, cmp_val, _semantic=None): # type: ignore[no-untyped-def]
return core.extern_elementwise(
"",
"",
[ivar, cmp, cmp_val],
{
(
core.dtype("int64"),
core.dtype("int32"),
core.dtype("int32"),
): ("nvshmem_int_wait_until", core.dtype("int32"))
},
is_pure=False,
_semantic=_semantic,
)
@triton.jit # type: ignore[misc]
def signal_wait_until(signal, cmp, cmp_val): # type: ignore[no-untyped-def]
"""
Wait until a signal variable meets a specified condition.
This function blocks the calling thread until the value at the specified
signal variable satisfies the given comparison condition. Signal variables
are special uint64_t symmetric objects used for efficient synchronization
with signal operations.
Args:
signal (tensor): Symmetric signal tensor with remote PE.
Must be 8-byte aligned symmetric memory.
cmp (int32): Comparison operator. Common values:
- NVSHMEM_CMP_EQ (0): Wait until signal == cmp_val
- NVSHMEM_CMP_NE (1): Wait until signal != cmp_val
- NVSHMEM_CMP_GT (2): Wait until signal > cmp_val
- NVSHMEM_CMP_GE (3): Wait until signal >= cmp_val
- NVSHMEM_CMP_LT (4): Wait until signal < cmp_val
- NVSHMEM_CMP_LE (5): Wait until signal <= cmp_val
cmp_val (int64): Value to compare against.
Returns:
int32: Status code (0 for success).
Notes:
- This is a blocking operation designed specifically for signal variables.
- Signal variables are updated atomically by putmem_signal operations.
- More efficient than wait_until for signal-based synchronization patterns.
- Ensures the signal update is fully complete before returning.
- Commonly used with putmem_signal_block for producer-consumer patterns.
Example:
```
# Wait for signal to be set to completion value
NVSHMEM_CMP_EQ = 0
nvshmem.signal_wait_until(signal_ptr, NVSHMEM_CMP_EQ, 42)
```
"""
cmp_val = 0 << 32 | cmp_val
return signal_wait_until_extern_wrapper(
signal.to(tl.int64), cmp, cmp_val.to(tl.uint64)
)
@core.extern
def signal_wait_until_extern_wrapper(signal, cmp, cmp_val, _semantic=None): # type: ignore[no-untyped-def]
return core.extern_elementwise(
"",
"",
[signal, cmp, cmp_val],
{
(
core.dtype("int64"),
core.dtype("int32"),
core.dtype("uint64"),
): ("nvshmem_signal_wait_until", core.dtype("int32"))
},
is_pure=False,
_semantic=_semantic,
)
@core.extern
def signal_op(sig_addr, signal, sig_op, pe, _semantic=None): # type: ignore[no-untyped-def]
"""
Perform an atomic signal operation on a remote PE.
This function atomically updates a signal variable on the specified remote PE
using the given operation and value. This enables efficient point-to-point
synchronization and notification between PEs.
Args:
sig_addr (int64): Symmetric address of the signal variable (uint64_t) on the remote PE.
Must be 8-byte aligned symmetric memory.
signal (int64): Value to be used in the signal operation.
sig_op (int32): Signal operation type. Common values:
- NVSHMEM_SIGNAL_SET (0): Atomically set sig_addr = signal
- NVSHMEM_SIGNAL_ADD (5): Atomically set sig_addr += signal
pe (int32): PE number of the remote PE (0 ≤ pe < nvshmem_n_pes()).
_semantic: Optional semantic information for Triton compilation.
Returns:
int32: Status code (0 for success).
Notes:
- This is a one-sided operation - the remote PE does not need to participate.
- The signal operation is performed atomically on the remote PE.
- Can be used with signal_wait_until() on the remote PE for synchronization.
- Provides low-overhead notification mechanism between PEs.
- The signal variable must be of type uint64_t in symmetric memory.
Example:
```python
# Atomically set remote signal to 1 to notify completion
NVSHMEM_SIGNAL_SET = 0
nvshmem.signal_op(remote_signal_ptr, 1, NVSHMEM_SIGNAL_SET, target_pe)
```
"""
return core.extern_elementwise(
"",
"",
[sig_addr, signal, sig_op, pe],
{
(
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int32"),
core.dtype("int32"),
): ("nvshmemx_signal_op", core.dtype("int32"))
},
is_pure=False,
_semantic=_semantic,
)
# Memory Ordering Operations
@core.extern
def fence(_semantic=None): # type: ignore[no-untyped-def]
"""
Ensure ordering of put operations to each remote PE.
This function provides a memory fence that ensures point-to-point ordering
of remote memory operations. Put operations issued before the fence are
guaranteed to be ordered before put operations issued after the fence,
when targeting the same remote PE.
Args:
_semantic: Optional semantic information for Triton compilation.
Returns:
int32: Status code (0 for success).
Notes:
- This provides weaker ordering guarantees than quiet().
- Operations to each PE are ordered, but operations to different PEs
may still be reordered relative to each other.
- Does not guarantee completion of operations, only ordering.
- Non-blocking operations are not ordered by fence - use quiet() instead.
- Essential for ensuring correct ordering in communication patterns.
Memory Ordering Guarantees:
- Put operations before fence() → ordered before → Put operations after fence()
- Ordering is maintained per-destination-PE basis
- Remote PEs can observe the enforced ordering
Example:
```
# Ensure first put completes before second put to same PE
nvshmem.put(dst, src, nelems, target_pe)
nvshmem.fence() # Enforce ordering
nvshmem.put(dst2, src2, nelems, target_pe)
```
"""
return core.extern_elementwise(
"",
"",
[],
{
(): ("nvshmem_fence", core.dtype("int32")),
},
is_pure=False,
_semantic=_semantic,
)
@core.extern
def quiet(_semantic=None): # type: ignore[no-untyped-def]
"""
Wait for completion of all outstanding put operations.
This function blocks until all outstanding remote memory operations issued
by the calling PE have completed. It provides stronger guarantees than
fence() by ensuring both ordering and completion of all operations.
Args:
_semantic: Optional semantic information for Triton compilation.
Returns:
int32: Status code (0 for success).
Notes:
- This is a blocking operation that waits for completion.
- Ensures all previous put operations have been delivered to their destinations.
- Provides global ordering - operations to ALL PEs are ordered.
- Required to complete non-blocking operations.
- More expensive than fence() but provides stronger guarantees.
Memory Ordering Guarantees:
- All put operations before quiet() are completed before any operations after quiet()
- Operations are visible to all PEs as having occurred before subsequent operations
- Both blocking and non-blocking operations are completed
Example:
```
# Ensure all data transfers complete before setting completion flag
nvshmem.putmem_block(data_ptr, src_ptr, data_size, target_pe)
nvshmem.quiet() # Wait for data transfer completion
nvshmem.putmem_block(
flag_ptr, flag_src_ptr, 8, target_pe
) # Signal completion
```
"""
return core.extern_elementwise(
"",
"",
[],
{
(): ("nvshmem_quiet", core.dtype("int32")),
},
is_pure=False,
_semantic=_semantic,
)
# PE Information Operations
@core.extern
def my_pe(_semantic=None): # type: ignore[no-untyped-def]
"""
Get the PE number of the calling PE.
This function returns the unique identifier (PE number) of the current
processing element within the NVSHMEM job. PE numbers range from 0 to
nvshmem_n_pes() - 1.
Args:
_semantic: Optional semantic information for Triton compilation.
Returns:
int32: PE number of the calling PE (0 ≤ pe < nvshmem_n_pes()).
Notes:
- This is a pure function that returns the same value throughout execution.
- PE numbering starts from 0 and is contiguous.
- Each PE has a unique identifier within the NVSHMEM job.
- Can be called from both host and device code.
- Essential for implementing PE-specific logic and communication patterns.
Example:
```
# Get current PE number for conditional logic
pe = nvshmem.my_pe()
if pe == 0:
# Root PE logic
pass
else:
# Non-root PE logic
pass
```
"""
return core.extern_elementwise(
"",
"",
[],
{(): ("nvshmem_my_pe", core.dtype("int32"))},
is_pure=True,
_semantic=_semantic,
)
@core.extern
def n_pes(_semantic=None): # type: ignore[no-untyped-def]
"""
Get the total number of PEs in the NVSHMEM job.
This function returns the total count of processing elements (PEs)
participating in the current NVSHMEM job. This value remains constant
throughout the execution of the program.
Args:
_semantic: Optional semantic information for Triton compilation.
Returns:
int32: Total number of PEs in the job (always ≥ 1).
Notes:
- This is a pure function that returns the same value throughout execution.
- The value is determined at NVSHMEM initialization and never changes.
- Valid PE numbers range from 0 to n_pes() - 1.
- Can be called from both host and device code.
- Essential for implementing collective operations and communication patterns.
Example:
```
# Broadcast from root to all other PEs
total_pes = nvshmem.n_pes()
my_rank = nvshmem.my_pe()
if my_rank == 0:
# Send to all other PEs
for peer in range(1, total_pes):
nvshmem.putmem_block(dst_ptr, src_ptr, size, peer)
```
"""
return core.extern_elementwise(
"",
"",
[],
{(): ("nvshmem_n_pes", core.dtype("int32"))},
is_pure=True,
_semantic=_semantic,
)
# Synchronization Operations
@core.extern
def barrier_all(_semantic=None): # type: ignore[no-untyped-def]
"""
Synchronize all PEs with completion guarantee.
This function creates a barrier across all PEs in the NVSHMEM job. It ensures
that all local and remote memory updates issued before the barrier by any PE
are completed before any PE exits the barrier. This provides both
synchronization and memory consistency.
Args:
_semantic: Optional semantic information for Triton compilation.
Returns:
int32: Status code (0 for success).
Notes:
- This is a collective operation - all PEs must participate.
- Stronger guarantee than sync_all() - ensures completion of remote operations.
- Blocks until all PEs reach the barrier AND all memory operations complete.
- Must be called from kernels launched with cooperative launch.
- Provides full memory consistency across all PEs.
- More expensive than sync_all() due to completion guarantees.
Memory Consistency Guarantees:
- All memory updates before barrier_all() are visible to all PEs
- All remote memory operations are completed before any PE continues
- Provides a global synchronization point with memory ordering
Example:
```
# Ensure all PEs complete their work before proceeding
# All PEs execute this - it's a collective operation
nvshmem.barrier_all()
# At this point, all previous operations are complete on all PEs
```
"""
return core.extern_elementwise(
"",
"",
[],
{(): ("nvshmem_barrier_all", core.dtype("int32"))},
is_pure=False,
_semantic=_semantic,
)
@core.extern
def sync_all(_semantic=None): # type: ignore[no-untyped-def]
"""
Synchronize all PEs with local completion guarantee.
This function creates a lightweight synchronization barrier across all PEs.
It ensures that all local store operations issued before the sync are
visible to other PEs, but does not guarantee completion of remote memory
operations initiated by the calling PE.
Args:
_semantic: Optional semantic information for Triton compilation.
Returns:
int32: Status code (0 for success).
Notes:
- This is a collective operation - all PEs must participate.
- Lighter weight than barrier_all() - only ensures local store visibility.
- Does not guarantee completion of remote memory updates initiated locally.
- Must be called from kernels launched with cooperative launch.
- Suitable when only synchronization (not completion) is needed.
- More efficient than barrier_all() for synchronization-only patterns.
Memory Consistency Guarantees:
- Local store operations are visible to other PEs
- Does NOT ensure completion of outgoing remote operations
- Provides synchronization point without full completion overhead
Example:
```
# Lightweight synchronization between PEs
# All PEs execute this - it's a collective operation
nvshmem.sync_all()
# Local stores are visible, but remote ops may still be in flight
```
"""
return core.extern_elementwise(
"",
"",
[],
{(): ("nvshmem_sync_all", core.dtype("int32"))},
is_pure=False,
_semantic=_semantic,
)
# Collective Operations (mem-based APIs - sizes in bytes)
@triton.jit # type: ignore[misc]
def alltoall(team, dest, source, nelems_per_pe): # type: ignore[no-untyped-def]
"""
All-to-all tensor exchange between PEs in a team.
This high-level function provides a tensor-aware interface for NVSHMEM alltoall
operations. Each PE sends nelems_per_pe elements to every other PE and receives
the same amount from every other PE.
Args:
team: Team handle for the collective operation. Use 0 for NVSHMEM_TEAM_WORLD.
dest: Destination tensor. Must be large enough for nelems_per_pe * n_pes elements.
source: Source tensor containing data for all PEs. Must contain nelems_per_pe * n_pes elements.
nelems_per_pe: Number of elements to exchange with each PE.
Notes:
- Performs compile-time type checking between dest and source tensors.
- Automatically calculates byte size from tensor type and element count.
- This is a collective operation - all PEs in the team must participate.
- Data layout: source=[data_for_pe0, data_for_pe1, ...], dest=[data_from_pe0, data_from_pe1, ...]
Example:
```
# Each PE exchanges 10 elements with every other PE
nvshmem.alltoall(0, dest_tensor, src_tensor, 10)
```
"""
tl.static_assert(dest.type == source.type)
size_bytes_per_pe = nelems_per_pe * dest.type.element_ty.itemsize
return alltoallmem_block_extern_wrapper(
team, dest.to(tl.int64), source.to(tl.int64), size_bytes_per_pe.to(tl.int64)
)
@core.extern # type: ignore[misc]
def alltoallmem_block_extern_wrapper(
team: Any, dest: Any, source: Any, size_bytes: Any, _semantic: Any = None
) -> None:
"""Low-level extern wrapper for NVSHMEM alltoall"""
return core.extern_elementwise(
"",
"",
[team, dest, source, size_bytes],
{
(
core.dtype("int32"), # team handle
core.dtype("int64"), # dest ptr
core.dtype("int64"), # source ptr
core.dtype("int64"), # size in bytes
): ("nvshmemx_alltoallmem_block", core.dtype("int32"))
},
is_pure=False,
_semantic=_semantic,
)
@triton.jit # type: ignore[misc]
def broadcast(team, dest, source, nelems, pe_root): # type: ignore[no-untyped-def]
"""
Broadcast tensor data from a root PE to all other PEs in a team.
This high-level function provides a tensor-aware interface for NVSHMEM broadcast
operations. It automatically handles type checking and size calculations, making
the API more ergonomic and type-safe.
Args:
team: Team handle for the collective operation. Use 0 for NVSHMEM_TEAM_WORLD.
dest: Destination tensor with type information. All PEs receive data here.
source: Source tensor on the root PE. Type must match dest.
nelems: Number of elements to broadcast.
pe_root: PE number of the root PE that provides the source data.
Notes:
- Performs compile-time type checking between dest and source tensors.
- Automatically calculates byte size from tensor type and element count.
- This is a collective operation - all PEs in the team must participate.
- Must be called from kernels launched with cooperative launch.
Example:
```
# Broadcast 100 elements from PE 0 to all PEs
nvshmem.broadcast(0, dest_tensor, src_tensor, 100, 0)
```
"""
tl.static_assert(dest.type == source.type)
nbytes = nelems * dest.type.element_ty.itemsize
return broadcastmem_block_extern_wrapper(
team, dest.to(tl.int64), source.to(tl.int64), nbytes.to(tl.int64), pe_root
)
@core.extern # type: ignore[misc]
def broadcastmem_block_extern_wrapper(
team: Any,
dest: Any,
source: Any,
size_bytes: Any,
pe_root: Any,
_semantic: Any = None,
) -> None:
"""Low-level extern wrapper for NVSHMEM broadcast"""
return core.extern_elementwise(
"",
"",
[team, dest, source, size_bytes, pe_root],
{
(
core.dtype("int32"), # team handle
core.dtype("int64"), # dest ptr
core.dtype("int64"), # source ptr
core.dtype("int64"), # size in bytes
core.dtype("int32"), # pe_root
): ("nvshmemx_broadcastmem_block", core.dtype("int32"))
},
is_pure=False,
_semantic=_semantic,
)
# Reduction Operation
@triton.jit # type: ignore[misc]
def reduce(team, dest, source, nreduce, operation: tl.constexpr): # type: ignore[no-untyped-def]
"""
Performs a collective reduction on tensors across a team of PEs.
This high-level function provides a tensor-aware interface for NVSHMEM
reduction operations. It automatically infers the data type from the
input tensors and calls the appropriate underlying NVSHMEM function.
Args:
team: The team handle for the collective (0 for NVSHMEM_TEAM_WORLD).
dest: Destination tensor for the reduction results.
source: Source tensor containing data to be reduced. Must be the same type as dest.
nreduce: The number of elements in the source tensor to reduce.
operation: The reduction operation to perform ("sum", "max", "min", "prod").
Notes:
- Performs compile-time type checking between dest and source tensors.
- This is a collective operation that must be called by all PEs in the team.
- Requires a cooperative grid launch.
Example:
```
# Perform a sum reduction on two tensors
nvshmem.reduce(0, dest_tensor, src_tensor, 100, "sum")
```
"""
tl.static_assert(dest.type == source.type)
dtype = dest.type.element_ty
return reduce_extern_wrapper(
team,
dest.to(tl.int64),
source.to(tl.int64),
nreduce.to(tl.int64),
operation,
dtype,
)
@core.extern # type: ignore[misc]
def reduce_extern_wrapper(
team: Any,
dest: Any,
source: Any,
nreduce: Any,
operation: str,
dtype: Any,
_semantic: Any = None,
) -> None:
"""
Low-level extern wrapper for NVSHMEM reduction operations.
This function provides a generic interface to NVSHMEM reduction operations,
automatically selecting the appropriate NVSHMEM function based on the data type
and operation specified.
Args:
team (int64): The team handle (0 for NVSHMEM_TEAM_WORLD).
dest (pointer): Destination pointer where reduction results are stored.
source (pointer): Source pointer containing data to be reduced.
nreduce (int64): Number of elements to reduce.
operation (str): Reduction operation ("sum", "max", "min", "prod").
dtype: Data type specification - accepts torch.dtype, tl.dtype, str, or constexpr.
_semantic: Optional semantic information for Triton compilation.
Raises:
ValueError: If the operation is not supported.
TypeError: If the data type is not supported.
Example:
nvshmem.reduce(0, dest_ptr, src_ptr, 100, "sum", torch.float32)
"""
# Mapping from Triton dtype names to NVSHMEM typenames
DTYPE_TO_NVSHMEM_MAP = {
"int8": "int8",
"int16": "int16",
"int32": "int32",
"int64": "int64",
"uint8": "uint8",
"uint16": "uint16",
"uint32": "uint32",
"uint64": "uint64",
"fp16": "half",
"bf16": "bfloat16",
"fp32": "float",
"fp64": "double",
}
# Triton dtype names are standardized as fp16, bf16, fp32, etc.
dtype_name = str(dtype).replace("tl.", "")
if dtype_name not in DTYPE_TO_NVSHMEM_MAP:
raise TypeError(
f"Unsupported reduction dtype: {dtype_name}. Supported dtypes: {list(DTYPE_TO_NVSHMEM_MAP.keys())}"
)
# Extract operation name from constexpr if needed
op_name = operation.value if hasattr(operation, "value") else operation
# Validate operation is supported
supported_ops = {"sum", "max", "min", "prod"}
if op_name not in supported_ops:
raise ValueError(
f"Unsupported reduction operation: '{op_name}'. Supported ops are {supported_ops}"
)
# Map to NVSHMEM typename and validate dtype is supported
nvshmem_typename = DTYPE_TO_NVSHMEM_MAP.get(dtype_name)
if nvshmem_typename is None:
raise TypeError(
f"Unsupported reduction dtype: {dtype_name}. Supported dtypes are {list(DTYPE_TO_NVSHMEM_MAP.keys())}"
)
# Generate NVSHMEM function name
nvshmem_func = f"nvshmem_{nvshmem_typename}_{op_name}_reduce"
# Define function signature - all parameters are int64 in Triton (they are just ptrs)
signature = (
core.dtype("int32"), # team handle
core.dtype("int64"), # destination pointer
core.dtype("int64"), # source pointer
core.dtype("int64"), # number of elements
)
return core.extern_elementwise(
"",
"",
[team, dest, source, nreduce],
{signature: (nvshmem_func, core.dtype("int32"))},
is_pure=False,
_semantic=_semantic,
)
# Utility for inspecting Triton kernels
triton_kernels: dict = {}
def _log_triton_kernel(kernel) -> None: # type: ignore[no-untyped-def]
import atexit
import tempfile
if dist.is_initialized() and dist.get_rank() != 0:
return
def on_exit() -> None:
logger.info("PTX files:")
for kernel in triton_kernels:
with tempfile.NamedTemporaryFile(dir="/tmp", delete=False) as f:
f.write(kernel.asm["ptx"].encode("utf-8"))
logger.info(f"+- {kernel.name}: {f.name}") # noqa: G004
if len(triton_kernels) == 0:
atexit.register(on_exit)
if kernel not in triton_kernels:
triton_kernels[kernel] = None
| NvshmemKernelRegistry |
python | encode__django-rest-framework | tests/authentication/test_authentication.py | {
"start": 21839,
"end": 23081
} | class ____(TestCase):
def test_base_authentication_abstract_method(self):
with pytest.raises(NotImplementedError):
BaseAuthentication().authenticate({})
def test_basic_authentication_raises_error_if_user_not_found(self):
auth = BasicAuthentication()
with pytest.raises(exceptions.AuthenticationFailed):
auth.authenticate_credentials('invalid id', 'invalid password')
def test_basic_authentication_raises_error_if_user_not_active(self):
from rest_framework import authentication
class MockUser:
is_active = False
old_authenticate = authentication.authenticate
authentication.authenticate = lambda **kwargs: MockUser()
try:
auth = authentication.BasicAuthentication()
with pytest.raises(exceptions.AuthenticationFailed) as exc_info:
auth.authenticate_credentials('foo', 'bar')
assert 'User inactive or deleted.' in str(exc_info.value)
finally:
authentication.authenticate = old_authenticate
@override_settings(ROOT_URLCONF=__name__,
AUTHENTICATION_BACKENDS=('django.contrib.auth.backends.RemoteUserBackend',))
| BasicAuthenticationUnitTests |
python | huggingface__transformers | tests/pipelines/test_pipelines_mask_generation.py | {
"start": 1649,
"end": 7291
} | class ____(unittest.TestCase):
model_mapping = dict(list(MODEL_FOR_MASK_GENERATION_MAPPING.items()) if MODEL_FOR_MASK_GENERATION_MAPPING else [])
def get_test_pipeline(
self,
model,
tokenizer=None,
image_processor=None,
feature_extractor=None,
processor=None,
dtype="float32",
):
image_segmenter = MaskGenerationPipeline(
model=model,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
image_processor=image_processor,
processor=processor,
dtype=dtype,
)
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
@unittest.skip(reason="TODO @Arthur: Implement me")
def run_pipeline_test(self, mask_generator, examples):
pass
@slow
@require_torch
def test_small_model_pt(self):
image_segmenter = pipeline("mask-generation", model="facebook/sam-vit-huge")
outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", points_per_batch=256)
# Shortening by hashing
new_output = []
for i, o in enumerate(outputs["masks"]):
new_output += [{"mask": mask_to_test_readable(o), "scores": outputs["scores"][i]}]
# fmt: off
last_output = Expectations({
("xpu", None): {'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8872},
("cuda", None): {'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8871},
("rocm", (9, 5)): {'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8872}
}).get_expectation()
self.assertEqual(
nested_simplify(new_output, decimals=4),
[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9967},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9909},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9879},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9834},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9716},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9612},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9552},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9532},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9499},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9483},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9408},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9335},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9326},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9262},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8986},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8984},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8873},
last_output
],
)
# fmt: on
@require_torch
@slow
def test_threshold(self):
model_id = "facebook/sam-vit-huge"
image_segmenter = pipeline("mask-generation", model=model_id)
outputs = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg", pred_iou_thresh=1, points_per_batch=256
)
# Shortening by hashing
new_output = []
for i, o in enumerate(outputs["masks"]):
new_output += [{"mask": mask_to_test_readable(o), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(new_output, decimals=4),
[
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0444},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0210},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0167},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0132},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0053},
],
)
| MaskGenerationPipelineTests |
python | ApeWorX__ape | tests/functional/test_project.py | {
"start": 41630,
"end": 42780
} | class ____:
def test_iter(self, smaller_project):
actual = list(iter(smaller_project.contracts))
assert len(actual) > 0
assert "Other" in actual
def test_compile(self, smaller_project):
path = smaller_project.sources.lookup("Other.json")
actual = list(smaller_project.contracts._compile(path))
assert len(actual) == 1
assert actual[0].contract_type.name == "Other"
# Show it can happen again.
actual = list(smaller_project.contracts._compile(path))
assert len(actual) == 1
assert actual[0].contract_type.name == "Other"
def test_values(self, small_temp_project):
actual = [c.name for c in small_temp_project.contracts.values()]
assert "Other" in actual
example = small_temp_project.contracts["Other"]
count = len(small_temp_project.contracts)
# Delete a file and try again, as a test.
file = small_temp_project.path / example.source_id
assert file.is_file()
file.unlink()
new_count = len(small_temp_project.contracts)
assert new_count == count - 1
| TestContractManager |
python | getsentry__sentry | src/sentry/utils/concurrent.py | {
"start": 4438,
"end": 5454
} | class ____:
"""
This class provides an API for executing tasks in different contexts
(immediately, or asynchronously.)
NOTE: This is *not* compatible with the ``concurrent.futures.Executor``
API! Rather than ``submit`` accepting the function arguments, the function
must already have the argument values bound (via ``functools.partial`` or
similar), and ``submit`` passes all additional arguments to ``queue.put``
to allow controlling whether or not queue insertion should be blocking.
"""
def submit[T](
self,
callable: Callable[[], T],
priority: int = 0,
block: bool = True,
timeout=None,
) -> TimedFuture[T]:
"""
Enqueue a task to be executed, returning a ``TimedFuture``.
All implementations *must* accept the ``callable`` parameter, but other
parameters may or may not be implemented, depending on the specific
implementation used.
"""
raise NotImplementedError
| Executor |
python | google__jax | jax/experimental/jax2tf/tests/tf_test_util.py | {
"start": 2457,
"end": 6110
} | class ____:
tf_type: str # The standard Tf.Operation.type
op_type: str # The rest are OpMetadata fields from _Xla... attributes
op_name: str
source_file: str
source_line: str
def SaveAndLoadModel(model: tf.Module,
save_gradients=True) -> tf.Module:
# Roundtrip through saved model on disk.
model_dir = os.path.join(absltest.get_default_test_tmpdir(), str(id(model)))
tf.saved_model.save(
model, model_dir,
options=tf.saved_model.SaveOptions(experimental_custom_gradients=save_gradients))
restored_model = tf.saved_model.load(model_dir)
return restored_model
def SaveAndLoadFunction(f_tf: Callable, *,
input_signature: Sequence[tf.TensorSpec] | None = None,
input_args: Sequence[Any] | None = None,
variables: Sequence[tf.Variable] = (),
save_gradients=True) -> tuple[Callable, tf.train.Checkpoint]:
# Roundtrip through saved model on disk. Return the Checkpoint also
# for the cases when there are variables. If you don't pass input_signature
# then it is created from the input_args.
model = tf.train.Checkpoint()
if input_signature is None:
assert input_args is not None
input_signature = tf.nest.map_structure(lambda a: tf.TensorSpec(a.shape, a.dtype),
input_args)
else:
assert input_args is None
model.f = tf.function(f_tf,
autograph=False,
input_signature=input_signature)
model.variables = variables
restored = SaveAndLoadModel(model, save_gradients=save_gradients)
return restored.f, restored
def TransformJaxVJP(f: Callable, args, res_f_of_args):
# Given `f` and its `args` tuple and `res_f_of_args=f(*args)` return a pair of a function
# that computes the VJP of `f` and appropriate arguments tuple.
def make_ct(res):
res_dtype = np.result_type(res)
assert res_dtype != dtypes.float0
# We produce cotangents of the same type as the primal. It does not
# seem to matter whether we feed float0, and avoiding float0 makes things
# simpler with TF.
return np.ones(np.shape(res), dtype=res_dtype)
cts = tree_util.tree_map(make_ct, res_f_of_args)
def f_vjp(args, cts):
res, pullback = jax.vjp(f, *args)
return pullback(cts)
return (f_vjp, (args, cts))
def TransformTfValueAndGrad(tf_f: Callable, tf_args,
unconnected_gradients=tf.UnconnectedGradients.ZERO):
# Given a TF function `tf_f` and its `tf_args` tuple,
# return a pair of a function that computes both the value and the
# gradient and appropriate arguments tuple.
def wrapped(*tf_args):
tf_vars = tf.nest.map_structure(tf.Variable, tf_args)
with tf.GradientTape() as tape:
res_tf = tf_f(*tf_vars)
grad = tape.gradient(res_tf, tf_vars,
unconnected_gradients=unconnected_gradients)
return (res_tf, grad)
return wrapped, tf_args
def ComputeTfValueAndGrad(tf_f: Callable, tf_args: Sequence,
unconnected_gradients=tf.UnconnectedGradients.ZERO):
assert isinstance(tf_args, Sequence), f"tf_args must be a tuple: {tf_args}"
f1, args1 = TransformTfValueAndGrad(tf_f, tf_args,
unconnected_gradients=unconnected_gradients)
return f1(*args1)
# TODO(necula): clean up the test harnesses to not require these flags
@jtu.with_config(jax_numpy_rank_promotion="allow",
jax_numpy_dtype_promotion='standard',
jax_legacy_prng_key="allow",
jax_debug_key_reuse=False)
| OpMetadataGraph |
python | huggingface__transformers | src/transformers/models/ministral/modular_ministral.py | {
"start": 8661,
"end": 8710
} | class ____(Qwen2RMSNorm):
pass
| MinistralRMSNorm |
python | cython__cython | Cython/Compiler/Tests/TestParseTreeTransforms.py | {
"start": 2282,
"end": 3478
} | class ____: # (TransformTest): # Disabled!
def test_simplified(self):
t = self.run_pipeline([WithTransform(None)], """
with x:
y = z ** 3
""")
self.assertCode("""
$0_0 = x
$0_2 = $0_0.__exit__
$0_0.__enter__()
$0_1 = True
try:
try:
$1_0 = None
y = z ** 3
except:
$0_1 = False
if (not $0_2($1_0)):
raise
finally:
if $0_1:
$0_2(None, None, None)
""", t)
def test_basic(self):
t = self.run_pipeline([WithTransform(None)], """
with x as y:
y = z ** 3
""")
self.assertCode("""
$0_0 = x
$0_2 = $0_0.__exit__
$0_3 = $0_0.__enter__()
$0_1 = True
try:
try:
$1_0 = None
y = $0_3
y = z ** 3
except:
$0_1 = False
if (not $0_2($1_0)):
raise
finally:
if $0_1:
$0_2(None, None, None)
""", t)
| TestWithTransform |
python | pytorch__pytorch | test/dynamo/test_higher_order_ops.py | {
"start": 35825,
"end": 79346
} | class ____(torch.nn.Module):
def forward(self, s77: "Sym(s77)", s27: "Sym(s27)", L_x_: "f32[s77, s27]", s94: "Sym(s94)", L_y_: "f32[s27, s94]"):
l_x_ = L_x_
l_y_ = L_y_
wrap_body_1 = self.wrap_body_1
wrap = torch.ops.higher_order.wrap(wrap_body_1, s77, s27, l_x_, s94, l_y_); wrap_body_1 = s77 = s27 = l_x_ = s94 = l_y_ = None
getitem: "f32[s77, s94]" = wrap[0]; wrap = None
return (getitem,)
class wrap_body_1(torch.nn.Module):
def forward(self, s77: "Sym(s77)", s27: "Sym(s27)", l_x_: "f32[s77, s27]", s94: "Sym(s94)", l_y_: "f32[s27, s94]"):
wrap_body_0 = self.wrap_body_0
wrap = torch.ops.higher_order.wrap(wrap_body_0, s77, s27, l_x_, s94, l_y_); wrap_body_0 = s77 = s27 = l_x_ = s94 = l_y_ = None
getitem: "f32[s77, s94]" = wrap[0]; wrap = None
return (getitem,)
class wrap_body_0(torch.nn.Module):
def forward(self, s77: "Sym(s77)", s27: "Sym(s27)", l_x_: "f32[s77, s27]", s94: "Sym(s94)", l_y_: "f32[s27, s94]"):
matmul: "f32[s77, s94]" = l_x_ @ l_y_; l_x_ = l_y_ = None
return (matmul,)
""",
)
@torch._dynamo.config.patch(
assume_static_by_default=False,
dynamic_shapes=True,
capture_dynamic_output_shape_ops=True,
)
def test_lift_tensors_with_compound_expressions(self):
def f(x, y):
x = x.view(-1, 2)
c = y.nonzero()
d = torch.concat((x, c))
def g(x):
def k(x):
return d.sum() + x
return wrap(k, x)
return wrap(g, x)
x = torch.randn(2, 3)
y = torch.randn(3, 4)
f(x, y)
if not check_dynamic_shape_capture():
out_graph = self._test_wrap_simple(
f,
default_args_generator((x, y)),
6,
9,
return_graph=True,
)
self.assertExpectedInline(
out_graph,
"""\
class GraphModule(torch.nn.Module):
def forward(self, s0: "Sym(s0)", s1: "Sym(s1)", L_x_: "f32[s0, s1]", s2: "Sym(s2)", L_y_: "f32[s1, s2]"):
l_x_ = L_x_
l_y_ = L_y_
x: "f32[((s0*s1)//2), ((s0*s1)//(((s0*s1)//2)))]" = l_x_.view(-1, 2); l_x_ = None
c: "i64[u0, 2]" = l_y_.nonzero(); l_y_ = None
sym_size_int_1: "Sym(u0)" = torch.ops.aten.sym_size.int(c, 0)
_check_is_size = torch._check_is_size(sym_size_int_1); _check_is_size = None
ge: "Sym(u0 >= 0)" = sym_size_int_1 >= 0
_assert_scalar_default = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar_default = None
d: "f32[u0 + ((s0*s1)//2), ((s0*s1)//(((s0*s1)//2)))]" = torch.concat((x, c)); c = None
wrap_body_1 = self.wrap_body_1
wrap = torch.ops.higher_order.wrap(wrap_body_1, sym_size_int_1, s1, s0, d, x); wrap_body_1 = sym_size_int_1 = s1 = s0 = d = x = None
getitem: "f32[((s0*s1)//2), ((s0*s1)//(((s0*s1)//2)))]" = wrap[0]; wrap = None
return (getitem,)
class wrap_body_1(torch.nn.Module):
def forward(self, u0: "Sym(u0)", s1: "Sym(s1)", s0: "Sym(s0)", d: "f32[u0 + ((s0*s1)//2), ((s0*s1)//(((s0*s1)//2)))]", x: "f32[((s0*s1)//2), ((s0*s1)//(((s0*s1)//2)))]"):
wrap_body_0 = self.wrap_body_0
wrap = torch.ops.higher_order.wrap(wrap_body_0, u0, s1, s0, d, x); wrap_body_0 = u0 = s1 = s0 = d = x = None
getitem: "f32[((s0*s1)//2), ((s0*s1)//(((s0*s1)//2)))]" = wrap[0]; wrap = None
return (getitem,)
class wrap_body_0(torch.nn.Module):
def forward(self, u0: "Sym(u0)", s1: "Sym(s1)", s0: "Sym(s0)", d: "f32[u0 + ((s0*s1)//2), ((s0*s1)//(((s0*s1)//2)))]", x: "f32[((s0*s1)//2), ((s0*s1)//(((s0*s1)//2)))]"):
sum_1: "f32[]" = d.sum(); d = None
add: "f32[((s0*s1)//2), ((s0*s1)//(((s0*s1)//2)))]" = sum_1 + x; sum_1 = x = None
return (add,)
""",
)
def test_register_subclass(self):
from torch._higher_order_ops.cond import cond_op
from torch.testing._internal.two_tensor import TwoTensor
a = torch.tensor([1.0, 0.0, 1.0])
b = torch.randn(3)
t = TwoTensor(a, b)
with self.assertRaisesRegex(
NotImplementedError,
"no rule registered for HOP cond and subclass .*TwoTensor'>",
):
res = cond_op(a.sum() > 0, torch.sin, torch.cos, (t,))
called = 0
# Using cond.py_impl
@cond_op.py_impl(TwoTensor)
def _(pred, true_fn, false_fn, operands):
nonlocal called
called += 1
assert len(operands) == 1
a = cond_op(pred, true_fn, false_fn, (operands[0].a,))
b = cond_op(pred, true_fn, false_fn, (operands[0].b,))
return TwoTensor(a, b)
res = cond_op(a.sum() > 0, torch.sin, torch.cos, (t,))
self.assertEqual(res.a, torch.sin(a))
self.assertEqual(res.b, torch.sin(b))
self.assertEqual(called, 1)
def test_register_mode(self):
from torch._higher_order_ops.cond import cond_op
torch_dispatch_called = 0
class MyMode(torch.utils._python_dispatch.TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
nonlocal torch_dispatch_called
torch_dispatch_called += 1
return func(*args, **kwargs)
a = torch.tensor([1.0, 0.1, 1.0])
pred = a.sum() > 0
with self.assertRaisesRegex(
NotImplementedError,
"no rule registered for HigherOrderOperator cond and mode .*MyMode",
):
with MyMode():
res = cond_op(pred, torch.sin, torch.cos, (a,))
py_impl_called = 0
# Using cond.py_impl
@cond_op.py_impl(MyMode)
def _(mode, pred, true_fn, false_fn, operands):
nonlocal py_impl_called
py_impl_called += 1
return cond_op(pred, true_fn, false_fn, operands)
a = torch.tensor([1.0, 0.1, 1.0])
pred = a.sum() > 0
with MyMode():
res = cond_op(pred, torch.sin, torch.cos, (a,))
self.assertEqual(res, a.sin())
def test_capture_value_created_in_subgraph(self):
backend = EagerAndRecordGraphs()
cnt = CompileCounterWithBackend(backend)
x = torch.randn(3, 3)
y = torch.randn(3, 3)
def inner(x, y):
z = x + y
return wrap(lambda x: wrap(lambda x: x + z, x), x)
@torch.compile(backend=cnt, fullgraph=True)
def f(x, y):
return wrap(inner, x, y)
result = f(x, y)
self.assertEqual(result, x + y + x)
self.assertEqual(cnt.frame_count, 1)
self.assertEqual(cnt.op_count, 2)
self.assertEqual(len(backend.graphs), 1)
# No changes to args of outer wrap
gm = backend.graphs[0]
wrap_node = find_first_node(gm, wrap)
self.assertTrue(len(wrap_node.args), 3)
# z was lifted to arg of inner wrap
body_function = getattr(gm, wrap_node.args[0].name)
# addition + wrap + getitem
self.assertEqual(op_count(body_function), 3)
inner_wrap_node = find_first_node(body_function, wrap)
self.assertTrue(len(inner_wrap_node.args), 3)
# Innermost body function: z was also lifted to arg
body_function = getattr(body_function, inner_wrap_node.args[0].name)
self.assertEqual(op_count(body_function), 2)
inner_wrap_node = find_first_node(body_function, wrap)
self.assertTrue(len(inner_wrap_node.args), 3)
def test_side_effect_set_new_attr_global_obj(self):
def setup():
global global_obj
global_obj = Obj()
def f(x):
def h(x):
def g(x):
global_obj.foo = x + 1
return x.clone()
y = wrap(g, x)
return y + global_obj.foo
return h(x)
x = torch.zeros([])
self._assert_wrap_fallback(f, (x,), setup=setup)
def test_side_effect_set_existing_attr_global_obj(self):
def setup():
global global_obj
global_obj = Obj()
global_obj.foo = nn.Parameter(torch.tensor(4.0))
def f(x):
def h(x):
def g(x):
global_obj.foo = x + 1
return x.clone()
y = wrap(g, x)
return y + global_obj.foo
return h(x)
x = torch.zeros([])
self._assert_wrap_fallback(f, (x,), setup=setup)
def test_side_effect_del_existing_attr_global_obj(self):
def setup():
global global_obj
global_obj = Obj()
global_obj.foo = torch.tensor(4.0)
def f(x):
def h(x):
def g(x):
del global_obj.foo
return x.clone()
y = wrap(g, x)
return y
return h(x)
x = torch.zeros([])
self._assert_wrap_fallback(f, (x,), setup=setup)
def test_side_effect_set_new_attr_global_module(self):
def setup():
global global_module
global_module = MyModule()
def h(x):
def g(x):
global_module.foo = nn.Parameter(x + 1)
return x.clone()
y = wrap(g, x)
return y + global_module.foo
x = torch.zeros([])
self._assert_wrap_fallback(h, (x,), setup=setup)
def test_side_effect_set_existing_attr_global_module(self):
def setup():
global global_module
global_module = MyModule()
def h(x):
def g(x):
global_module.existing = nn.Parameter(torch.tensor(4.0))
return global_module(x)
y = wrap(g, x)
return y
x = torch.zeros([])
self._assert_wrap_fallback(h, (x,), setup=setup)
def test_side_effect_del_existing_attr_global_module(self):
def setup():
global global_module
global_module = MyModule()
def h(x):
def g(x):
del global_module.existing
return x.clone()
y = wrap(g, x)
return y
x = torch.zeros([])
self._assert_wrap_fallback(h, (x,), setup=setup)
def test_side_effect_mutate_global_num(self):
def setup():
global global_num
global_num = 3.14
def f(x):
def g(x):
global global_num
global_num = global_num + 1
return x + global_num
y = wrap(g, x)
return y + global_num
x = torch.zeros([])
self._assert_wrap_fallback(f, (x,), setup=setup)
def test_side_effect_mutate_global_num_builtin(self):
def setup():
global global_num
global_num = 3.14
def f(x):
def g(x):
global global_num
global_num += 1
return x + global_num
y = wrap(g, x)
return y + global_num
x = torch.zeros([])
self._assert_wrap_fallback(f, (x,), setup=setup)
def test_side_effect_mutate_global_tensor(self):
def setup():
global global_var
global_var = torch.ones(3)
def f(x):
def g(x):
global global_var
global_var = global_var + 1
return x + global_var
y = wrap(g, x)
return y + global_var
x = torch.zeros([])
self._assert_wrap_fallback(f, (x,), setup=setup)
def test_side_effect_mutate_global_tensor_builtin(self):
def setup():
global global_var
global_var = torch.ones(3)
def f(x):
def g(x):
global global_var
global_var += 1
return x + global_var
y = wrap(g, x)
return y + global_var
x = torch.zeros([])
self._assert_wrap_fallback(f, (x,), setup=setup)
def test_side_effect_mutate_global_list(self):
def setup():
global global_list
global_list = []
def f(x):
def g(x):
val = x + 1
global_list.append(val)
return global_list[-1]
y = wrap(g, x)
z = y + global_list[-1]
return z
x = torch.zeros([])
self._assert_wrap_fallback(f, (x,), setup=setup)
def test_side_effect_mutate_nonlocal_num(self):
def f(x):
def h(x):
val = 1
def g(x):
nonlocal val
val = val + 1
return x + val
y = wrap(g, x)
z = y + val
return z
return h(x)
x = torch.zeros([])
self._assert_wrap_fallback(f, (x,))
def test_side_effect_set_new_attr_nonlocal_obj(self):
def f(x):
def h(x):
obj = Obj()
def g(x):
obj.val = x.dim()
return x.clone()
y = wrap(g, x)
z = y + obj.val
return z
return h(x)
x = torch.zeros([])
self._assert_wrap_fallback(f, (x,))
def test_side_effect_set_existing_attr_nonlocal_obj(self):
def f(x):
def h(x):
obj = Obj()
obj.val = 3
def g(x):
obj.val = x.dim()
return x.clone()
y = wrap(g, x)
z = y + obj.val
return z
return h(x)
x = torch.zeros([])
self._assert_wrap_fallback(f, (x,))
def test_side_effect_del_existing_attr_nonlocal_obj(self):
def f(x):
def h(x):
obj = Obj()
obj.val = 3
def g(x):
del obj.val
return x.clone()
y = wrap(g, x)
return y
return h(x)
x = torch.zeros([])
self._assert_wrap_fallback(f, (x,))
def test_side_effect_set_new_attr_nonlocal_module(self):
def h(x):
obj = MyModule()
def g(x):
obj.val = x.dim()
return x.clone()
y = wrap(g, x)
z = y + obj.val
return z
x = torch.zeros([])
self._assert_wrap_fallback(h, (x,))
def test_side_effect_set_existing_attr_nonlocal_module(self):
def h(x):
obj = MyModule()
def g(x):
obj.existing = nn.Parameter(torch.tensor(3.14))
return obj(x)
y = wrap(g, x)
return y
x = torch.zeros([])
self._assert_wrap_fallback(h, (x,))
def test_side_effect_del_existing_attr_nonlocal_module(self):
def h(x):
obj = MyModule()
def g(x):
del obj.existing
return x.clone()
y = wrap(g, x)
return y
x = torch.zeros([])
self._assert_wrap_fallback(h, (x,))
def test_side_effect_mutate_nonlocal_tensor(self):
def f(x):
def h(x):
val = torch.tensor(1.0)
def g(x):
nonlocal val
val = val + 1
return x + val
y = wrap(g, x)
z = y + val
return z
return h(x)
x = torch.zeros([])
self._assert_wrap_fallback(f, (x,))
def test_side_effect_mutate_nonlocal_num_builtin(self):
def f(x):
def h(x):
val = 1
def g(x):
nonlocal val
val += 1
return x + val
y = wrap(g, x)
z = y + val
return z
return h(x)
x = torch.zeros([])
self._assert_wrap_fallback(f, (x,))
def test_side_effect_mutate_nonlocal_tensor_builtin(self):
def f(x):
def h(x):
val = torch.tensor(1.0)
def g(x):
nonlocal val
val += 1
return x + val
y = wrap(g, x)
z = y + val
return z
return h(x)
x = torch.zeros([])
self._assert_wrap_fallback(f, (x,))
def test_side_effect_nonlocal_list_append_graph_break(self):
def g(x):
y = []
def f(k):
m = k + 1
y.append(m)
return k
wrap(f, x)
return y[0]
x = torch.randn(3, 3)
self._assert_wrap_fallback(g, (x,))
def test_side_effect_nested_nonlocal_list_append_graph_break(self):
def g(x):
def h(x):
y = []
def f(k):
m = k + 1
y.append(m)
return k
wrap(f, x)
return y[0]
return h(x)
x = torch.randn(3, 3)
self._assert_wrap_fallback(g, (x,))
def test_side_effect_local_list_append_no_graph_break(self):
def g(x):
def f(k):
y = []
y.append(k + 1)
return y[0]
return wrap(f, x)
x = torch.randn(3, 3)
arg_count = ifdynstaticdefault(2, 3)
self._test_wrap_simple(g, default_args_generator((x,)), arg_count)
def test_wrap_kwarg(self):
def f(x, y):
return wrap(lambda x, y: x + y, x, y=y)
x = torch.randn(3)
y = torch.randn(3, 3)
arg_count = ifdynstaticdefault(3, 4)
self._test_wrap_simple(f, default_args_generator((x, y)), arg_count)
def test_wrap_kwarg_int(self):
def f(x, y):
return wrap(lambda x, y: x + y, x, y=y)
x = torch.randn(3)
y = 8
arg_count = (
ifdynstaticdefault(2, 3) + 1
if check_dynamic_shape_capture()
else ifdynstaticdefault(2, 3)
)
self._test_wrap_simple(f, default_args_generator((x, y)), arg_count)
def test_wrap_all_kwarg(self):
def f(y, x):
return wrap(lambda x, y: (x * 2) + y, x=x, y=y)
x = torch.randn(3)
y = torch.randn(3, 3)
arg_count = ifdynstaticdefault(3, 4)
self._test_wrap_simple(f, default_args_generator((x, y)), arg_count)
def test_wrap_kwarg_only(self):
def f(x, y):
def fn(*, x, y):
return (x * 2) + y
return wrap(fn, x=x, y=y)
x = torch.randn(3)
y = torch.randn(3, 3)
arg_count = ifdynstaticdefault(3, 4)
self._test_wrap_simple(f, default_args_generator((x, y)), arg_count)
def test_wrap_kwarg_default(self):
def f(x, y):
def fn(*, x, y, z=8):
return (x * 2) + y + z
return wrap(fn, x=x, y=y)
x = torch.randn(3)
y = torch.randn(3, 3)
arg_count = ifdynstaticdefault(3, 4)
self._test_wrap_simple(f, default_args_generator((x, y)), arg_count)
def test_wrap_kwarg_default_if_branch(self):
def f(x, y):
def fn(*, x, y, z=None):
if z is None:
return (x * 2) + y
else:
return 2 * x
return wrap(fn, x=x, y=y)
x = torch.randn(3)
y = torch.randn(3, 3)
arg_count = ifdynstaticdefault(3, 4)
self._test_wrap_simple(f, default_args_generator((x, y)), arg_count)
def test_wrap_kwarg_recompile(self):
def f(x, y, z=None):
def fn(*, x, y, z=None):
if z is None:
return (x * 2) + y
else:
return 2 * x
return wrap(fn, x=x, y=y, z=z)
x = torch.randn(3)
y = torch.randn(3, 3)
counters.clear()
opt = torch.compile(f, backend="eager", fullgraph=True)
opt(x, y)
self.assertEqual(counters["stats"]["calls_captured"], 2)
# verify that we `don't` recompile
opt(x, y)
self.assertEqual(counters["stats"]["calls_captured"], 2)
output = opt(x, y, 8)
self.assertEqual(counters["stats"]["calls_captured"], 4)
self.assertEqual(output, 2 * x)
def test_wrap_kwarg_default_else_branch(self):
def f(x, y, z):
def fn(*, x, y, z=None):
if z is None:
return (x * 2) + y
else:
return 2 * x
return wrap(fn, x=x, y=y, z=z)
x = torch.randn(3)
y = torch.randn(3, 3)
arg_count = ifdynstaticdefault(2, 3)
self._test_wrap_simple(f, default_args_generator((x, y, 8)), arg_count)
def test_map_subgraph_name_is_valid(self):
xs = torch.randn(2, 3, 3)
y = torch.randn(3)
def map_f(xs, y):
def inner(x, y):
def inner2(x, y):
return x + y
return control_flow.map(inner2, x, y)
return control_flow.map(inner, xs, y)
graphs = self._check_map_graph_and_extract(map_f, (xs, y))
if graphs:
graph, body_graph = graphs
self.assertExpectedInline(
graph,
"""\
def forward(self, L_xs_ : torch.Tensor, L_y_ : torch.Tensor):
l_xs_ = L_xs_
l_y_ = L_y_
map_body_1 = self.map_body_1
map_impl = torch.ops.higher_order.map_impl(map_body_1, [l_xs_], [l_y_]); map_body_1 = l_xs_ = l_y_ = None
getitem = map_impl[0]; map_impl = None
return (getitem,)""",
)
self.assertExpectedInline(
body_graph,
"""\
def forward(self, child : torch.Tensor, l_y_ : torch.Tensor):
map_body_0 = self.map_body_0
map_impl = torch.ops.higher_order.map_impl(map_body_0, [child], [l_y_]); map_body_0 = child = l_y_ = None
getitem = map_impl[0]; map_impl = None
return (getitem,)""",
)
def test_map_multi_return(self):
def f(x):
return control_flow.map(lambda x: (x.sin(), x.sin()), x)
x = torch.randn(3)
graphs = self._check_map_graph_and_extract(f, (x,))
if graphs:
graph, body_graph = graphs
self.assertExpectedInline(
graph,
"""\
def forward(self, L_x_ : torch.Tensor):
l_x_ = L_x_
map_body_0 = self.map_body_0
map_impl = torch.ops.higher_order.map_impl(map_body_0, [l_x_], []); map_body_0 = l_x_ = None
getitem = map_impl[0]
getitem_1 = map_impl[1]; map_impl = None
return (getitem, getitem_1)""",
)
self.assertExpectedInline(
body_graph,
"""\
def forward(self, child : torch.Tensor):
child_1 = child.sin()
child_2 = child.sin(); child = None
return (child_1, child_2)""",
)
def test_map_pytree_return(self):
def _construct_pytree(a):
return (
a.clone(),
[[[a.clone()]]],
a.clone(),
(a.clone(), (a.clone(),), a.clone()),
{"a": a.clone()},
)
def f(x):
def inner_f(xs):
return _construct_pytree(xs)
return control_flow.map(inner_f, x)
x = torch.randn(3)
graphs = self._check_map_graph_and_extract(f, (x,))
if graphs:
graph, body_graph = graphs
self.assertExpectedInline(
graph,
"""\
def forward(self, L_x_ : torch.Tensor):
l_x_ = L_x_
map_body_0 = self.map_body_0
map_impl = torch.ops.higher_order.map_impl(map_body_0, [l_x_], []); map_body_0 = l_x_ = None
getitem = map_impl[0]
getitem_1 = map_impl[1]
getitem_2 = map_impl[2]
getitem_3 = map_impl[3]
getitem_4 = map_impl[4]
getitem_5 = map_impl[5]
value = map_impl[6]; map_impl = None
return (getitem, getitem_1, getitem_2, getitem_3, getitem_4, getitem_5, value)""",
)
self.assertExpectedInline(
body_graph,
"""\
def forward(self, child : torch.Tensor):
child_1 = child.clone()
child_2 = child.clone()
child_3 = child.clone()
child_4 = child.clone()
child_5 = child.clone()
child_6 = child.clone()
child_7 = child.clone(); child = None
return (child_1, child_2, child_3, child_4, child_5, child_6, child_7)""",
)
def test_map_kwargs(self):
cnt = CompileCounter()
@torch.compile(backend=cnt)
def f(x):
return control_flow.map(lambda x: x.sin(), x=x)
x = torch.randn(3)
self.assertRaises(TypeError, lambda: f(x))
self.assertEqual(cnt.frame_count, 0)
def test_map_symint_input(self):
def fn(x, y):
def inner(x, y):
return torch.sin(x + y)
return control_flow.map(inner, x, y.size(0))
x = torch.randn(3, 1)
y = torch.randn(3, 1)
graphs = self._check_map_graph_and_extract(fn, (x, y))
if graphs:
graph, body_graph = graphs
self.assertExpectedInline(
graph,
"""\
def forward(self, L_x_ : torch.Tensor):
l_x_ = L_x_
map_body_0 = self.map_body_0
map_impl = torch.ops.higher_order.map_impl(map_body_0, [l_x_], [3]); map_body_0 = l_x_ = None
getitem = map_impl[0]; map_impl = None
return (getitem,)""",
)
self.assertExpectedInline(
body_graph,
"""\
def forward(self, child : torch.Tensor, const_unused : int):
add = child + 3; child = None
sin = torch.sin(add); add = None
return (sin,)""",
)
def test_map_lowers_to_graph(self):
def fn(x, y):
def inner(x, y):
return torch.sin(x + y)
return control_flow.map(inner, x, y.size(0))
x = torch.randn(3, 1)
y = torch.randn(3, 1)
graphs = self._check_map_graph_and_extract(fn, (x, y))
if graphs:
graph, body_graph = graphs
self.assertExpectedInline(
graph,
"""\
def forward(self, L_x_ : torch.Tensor):
l_x_ = L_x_
map_body_0 = self.map_body_0
map_impl = torch.ops.higher_order.map_impl(map_body_0, [l_x_], [3]); map_body_0 = l_x_ = None
getitem = map_impl[0]; map_impl = None
return (getitem,)""",
)
self.assertExpectedInline(
body_graph,
"""\
def forward(self, child : torch.Tensor, const_unused : int):
add = child + 3; child = None
sin = torch.sin(add); add = None
return (sin,)""",
)
def test_map_example_value_metadata_consistent_with_eager(self):
from torch._higher_order_ops.map import map_dense
backend = EagerAndRecordGraphs()
def inner(x):
return x.sin(), x.cos().T, x.sin().view(-1)
rand_44 = torch.randn(4, 4)
inps = [
torch.randn(3),
torch.randn(3, 4),
torch.randn(3, 4, 5, requires_grad=True),
torch.randn(3, 4, 5, requires_grad=True).permute((2, 0, 1)),
torch.randn(3, 4, 5, requires_grad=True).detach(),
torch.randn(3, 4, 5, requires_grad=True).narrow(1, 1, 2),
rand_44.T,
rand_44[::2],
rand_44[::2, ::2],
rand_44[1::3, 1::3],
rand_44[1::3, 1::2].T,
rand_44.unsqueeze(1),
rand_44.squeeze(0),
rand_44.reshape(2, 8),
]
for x in inps:
compiled_ret = torch.compile( # noqa: F841
control_flow.map, backend=backend, fullgraph=True
)(inner, x)
eager_sin, eager_transpose, eager_view = map_dense(inner, (x,), ())
map_node = next(
node
for node in backend.graphs[0].graph.nodes
if node.op == "call_function" and "map" in node.name
)
fake_sin, fake_transpose, fake_view = map_node.meta["example_value"]
def _check_size_stride_contiguous(x, y):
self.assertEqual(y.size(), x.size())
self.assertEqual(y.stride(), x.stride())
self.assertEqual(y.requires_grad, x.requires_grad)
self.assertEqual(x.is_contiguous(), True)
self.assertEqual(y.is_contiguous(), True)
_check_size_stride_contiguous(eager_sin, fake_sin)
_check_size_stride_contiguous(eager_transpose, fake_transpose)
_check_size_stride_contiguous(eager_view, fake_view)
torch._dynamo.reset()
backend.graphs.clear()
def test_cond_subgraph_name_is_valid(self):
backend = EagerAndRecordGraphs()
cnt = CompileCounterWithBackend(backend)
pred = torch.tensor(True)
pred2 = torch.tensor(False)
xs = torch.randn(2, 3, 3)
y = torch.randn(3, 3)
@torch.compile(backend=cnt, fullgraph=True)
def cond_f(pred, pred2, x, y):
def true_fn(pred2, x, y):
return x + y
def false_fn(pred2, x, y):
def true_fn2(x, y):
return x.sin() - y.cos()
def false_fn2(x, y):
return x.cos() - y.sin()
return control_flow.cond(pred2, true_fn2, false_fn2, [x, y])
return control_flow.cond(pred, true_fn, false_fn, [pred2, x, y])
result = cond_f(pred, pred2, xs, y)
self.assertEqual(result, xs + y)
cond_gm = backend.graphs[0]
name_set = set()
name_set.update(name for name, _ in cond_gm.named_modules())
self.assertEqual(
name_set,
{
"",
"cond_true_1",
"cond_false_1",
"cond_false_1.cond_false_0",
"cond_false_1.cond_true_0",
},
)
@torch._dynamo.config.patch(
assume_static_by_default=True,
dynamic_shapes=True,
)
def test_cond_graph_break_in_one_branch(self):
backend = EagerAndRecordGraphs()
cnt = CompileCounterWithBackend(backend)
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.buffer = torch.nn.Buffer(torch.ones(6, 4))
def forward(self, x):
def true_fn(x):
self.buffer += 1
return self.buffer.sum() + x.sum()
def false_fn(x):
return (x - 1).sum()
return control_flow.cond(x.sum() > 4, true_fn, false_fn, [x])
mod_for_compile = torch.compile(Foo(), backend=cnt, dynamic=True)
mod_for_eager = Foo()
with self.assertRaisesRegex(
torch._dynamo.exc.UncapturedHigherOrderOpError,
r"Cond doesn't work unless it is captured completely with torch.compile",
):
mod_for_eager(torch.ones(6, 4))
with self.assertRaisesRegex(
torch._dynamo.exc.UncapturedHigherOrderOpError,
r"Cond doesn't work unless it is captured completely with torch.compile",
):
mod_for_compile(torch.ones(3, 4))
def test_cond_free_variable_in_both_branches(self):
backend = EagerAndRecordGraphs()
cnt = CompileCounterWithBackend(backend)
z = torch.ones(4, 4)
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.buffer = torch.nn.Buffer(torch.ones(6, 4))
def forward(self, x, y):
def true_fn(x):
return x.sum() + self.buffer.sum() + z.sum()
def false_fn(x):
return x.sum() - z.sum() - self.buffer.sum()
return control_flow.cond(y, true_fn, false_fn, [x])
mod_for_compile = torch.compile(
Foo(), backend=cnt, dynamic=True, fullgraph=True
)
mod_for_eager = Foo()
self.assertEqual(
mod_for_compile(torch.tensor(True), torch.tensor(5)),
mod_for_eager(torch.tensor(True), torch.tensor(5)),
)
for node in backend.graphs[0].graph.nodes:
if (
node.op == "call_function"
and node.target == torch.ops.higher_order.cond
):
_, _, _, operands = node.args
# Since we compile with dynamic, each branch takes 4 inputs (buffer, x, z, s1)
self.assertEqual(len(operands), 4)
if node.op == "get_attr":
if str(node.target) in ("cond_true_0, cond_false_0"):
num_placeholders = len(
[
node
for node in getattr(
backend.graphs[0], str(node.target)
).graph.nodes
if node.op == "placeholder"
]
)
self.assertEqual(num_placeholders, 4)
def _check_cond_graph_and_extract(self, fn, args):
backend = EagerAndRecordGraphs()
cnt = CompileCounterWithBackend(backend)
out = torch.compile(fn, backend=cnt, fullgraph=True)(*args)
self.assertEqual(out, fn(*args))
self.assertEqual(cnt.frame_count, 1)
self.assertEqual(len(backend.graphs), 1)
# Dynamic shapes produce a slightly different graph.
if check_dynamic_shape_capture():
return
gm = backend.graphs[0]
graph = gm.code.strip()
true_graph = gm.cond_true_0.code.strip()
false_graph = gm.cond_false_0.code.strip()
return (graph, true_graph, false_graph)
def _check_map_graph_and_extract(self, fn, args):
backend = EagerAndRecordGraphs()
cnt = CompileCounterWithBackend(backend)
out = torch.compile(fn, backend=cnt, fullgraph=True)(*args)
self.assertEqual(out, fn(*args))
self.assertEqual(cnt.frame_count, 1)
self.assertEqual(len(backend.graphs), 1)
# Dynamic shapes produce a slightly different graph.
if check_dynamic_shape_capture():
return
gm = backend.graphs[0]
graph = gm.code.strip()
subgraphs = []
for module_name in gm._modules:
subgraphs.append(getattr(gm, module_name).code.strip())
return (graph, *subgraphs)
def test_cond_branches_no_arguments(self):
def fn(x):
def true_fn():
return torch.sin(x)
def false_fn():
return torch.cos(x)
return control_flow.cond(x.sum() > 0, true_fn, false_fn, ())
graphs = self._check_cond_graph_and_extract(fn, (torch.randn(4, 5),))
if graphs is not None:
graph, true_graph, false_graph = graphs
self.assertExpectedInline(
graph,
"""\
def forward(self, L_x_ : torch.Tensor):
l_x_ = L_x_
sum_1 = l_x_.sum()
gt = sum_1 > 0; sum_1 = None
cond_true_0 = self.cond_true_0
cond_false_0 = self.cond_false_0
cond = torch.ops.higher_order.cond(gt, cond_true_0, cond_false_0, (l_x_,)); gt = cond_true_0 = cond_false_0 = l_x_ = None
getitem = cond[0]; cond = None
return (getitem,)""",
)
self.assertExpectedInline(
true_graph,
"""\
def forward(self, l_x_):
l_x__1 = l_x_
sin = torch.sin(l_x__1); l_x__1 = None
return (sin,)""",
)
self.assertExpectedInline(
false_graph,
"""\
def forward(self, l_x_):
l_x__1 = l_x_
cos = torch.cos(l_x__1); l_x__1 = None
return (cos,)""",
)
def test_cond_branches_no_arguments_no_closure(self):
def fn(x):
def true_fn():
return torch.ones(3, 4)
def false_fn():
return torch.ones(3, 4).sin()
return control_flow.cond(x.sum() > 0, true_fn, false_fn, ())
self._check_cond_graph_and_extract(fn, (torch.randn(4, 5),))
graphs = self._check_cond_graph_and_extract(fn, (torch.randn(4, 5),))
if graphs is not None:
graph, true_graph, false_graph = graphs
self.assertExpectedInline(
graph,
"""\
def forward(self, L_x_ : torch.Tensor):
l_x_ = L_x_
sum_1 = l_x_.sum(); l_x_ = None
gt = sum_1 > 0; sum_1 = None
cond_true_0 = self.cond_true_0
cond_false_0 = self.cond_false_0
cond = torch.ops.higher_order.cond(gt, cond_true_0, cond_false_0, ()); gt = cond_true_0 = cond_false_0 = None
getitem = cond[0]; cond = None
return (getitem,)""",
)
self.assertExpectedInline(
true_graph,
"""\
def forward(self):
ones = torch.ones(3, 4)
return (ones,)""",
)
self.assertExpectedInline(
false_graph,
"""\
def forward(self):
ones = torch.ones(3, 4)
sin = ones.sin(); ones = None
return (sin,)""",
)
def test_cond_side_effect_in_one_branches(self):
backend = EagerAndRecordGraphs()
cnt = CompileCounterWithBackend(backend)
z = [torch.ones(4, 4)]
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, y, x):
def true_fn(x):
z.append(x)
z.append(x)
z.pop()
return x.sum() + z[-1].sum()
def false_fn(x):
return x.sum() - z[0].sum()
return control_flow.cond(y, true_fn, false_fn, [x])
mod_for_eager = Foo()
mod_for_compile = torch.compile(
Foo(), backend=cnt, dynamic=True, fullgraph=False
)
with self.assertRaisesRegex(
torch._dynamo.exc.UncapturedHigherOrderOpError,
r"Cond doesn't work unless it is captured completely with torch.compile",
):
mod_for_eager(torch.tensor(True), torch.tensor(5))
with self.assertRaisesRegex(
torch._dynamo.exc.UncapturedHigherOrderOpError,
r"Cond doesn't work unless it is captured completely with torch.compile",
):
mod_for_compile(torch.tensor(True), torch.tensor(5))
def test_cond_with_constant_pred(self):
def test(pred, x):
def true_fn(x):
return x
def false_fn(x):
return -x
return control_flow.cond(pred, true_fn, false_fn, [x])
opt_test = torch.compile(test, backend="eager")
inp = torch.ones(3, 3)
self.assertTrue(torch.allclose(test(True, inp), opt_test(True, inp)))
self.assertTrue(torch.allclose(test(False, inp), opt_test(False, inp)))
def test_map_graph_break(self):
backend = EagerAndRecordGraphs()
cnt = CompileCounterWithBackend(backend)
class Module(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w = torch.nn.Buffer(torch.ones(6, 4))
def forward(self, xs):
def body(x):
self.w += 1
return x
return control_flow.map(body, xs)
mod = Module()
mod_for_compile = torch.compile(mod, backend=cnt, dynamic=True, fullgraph=False)
with self.assertRaisesRegex(
torch._dynamo.exc.UncapturedHigherOrderOpError,
"map doesn't work unless it is captured completely with torch.compile",
):
mod_for_compile(torch.Tensor([[6, 4, 5], [3, 4, 5], [6, 6, 6]]))
def test_map_side_effect(self):
backend = EagerAndRecordGraphs()
cnt = CompileCounterWithBackend(backend)
z = [torch.ones(6, 4)]
class Module(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w = torch.nn.Buffer(torch.ones(6, 4))
def forward(self, xs):
def body(x):
z.append(x)
z.append(x)
z.pop()
return x + z[-1].sum()
return control_flow.map(body, xs)
mod = Module()
mod_for_compile = torch.compile(mod, backend=cnt, dynamic=True, fullgraph=False)
with self.assertRaisesRegex(
torch._dynamo.exc.UncapturedHigherOrderOpError,
"map doesn't work unless it is captured completely with torch.compile",
):
mod_for_compile(torch.Tensor([[6, 4, 5], [3, 4, 5], [6, 6, 6]]))
def test_wrap_subgraph_name_is_valid(self):
backend = EagerAndRecordGraphs()
cnt = CompileCounterWithBackend(backend)
x = torch.randn(3, 3)
y = torch.randn(3, 3)
def inner(x, y):
z = x + y
return wrap(lambda x: wrap(lambda x: x + z, x), x)
@torch.compile(backend=cnt, fullgraph=True)
def f(x, y):
return wrap(inner, x, y)
result = f(x, y)
self.assertEqual(result, x + y + x)
wrap_gm = backend.graphs[0]
names = set()
names.update(mod_name for mod_name, _ in wrap_gm.named_modules())
self.assertEqual(
names,
{
"",
"wrap_body_2",
"wrap_body_2.wrap_body_1",
"wrap_body_2.wrap_body_1.wrap_body_0",
},
)
def test_wrap_allow_local_assign_in_body_fn(self):
def f(arg1, arg2):
def inner_f(arg1, arg2):
a = arg1
b = arg2
ret = []
for x in a:
ret.append(x + 1)
for x in b:
ret.append(x + 1)
return ret
return wrap(inner_f, arg1, arg2)
x = torch.ones(3)
def my_args_generator():
yield [x], [x.sin()]
yield (x,), (x.sin(),)
arg_count = ifdynstaticdefault(3, 4)
actual_graph = self._test_wrap_simple(
f,
my_args_generator(),
arg_count,
3,
return_graph=True,
)
# Dynamic shapes produce a slightly different graph.
if check_dynamic_shape_capture():
return
self.assertExpectedInline(
actual_graph,
"""\
| GraphModule |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_test.py | {
"start": 3947,
"end": 16338
} | class ____(test.TestCase):
def test_all_shape_properties_defined_by_the_one_property_shape(self):
shape = (1, 2, 3, 4)
operator = LinearOperatorShape(shape)
self.assertAllEqual(shape, operator.shape)
self.assertAllEqual(4, operator.tensor_rank)
self.assertAllEqual((1, 2), operator.batch_shape)
self.assertAllEqual(4, operator.domain_dimension)
self.assertAllEqual(3, operator.range_dimension)
expected_parameters = {
"is_non_singular": None,
"is_positive_definite": None,
"is_self_adjoint": None,
"is_square": None,
"shape": (1, 2, 3, 4),
}
self.assertEqual(expected_parameters, operator.parameters)
def test_all_shape_methods_defined_by_the_one_method_shape(self):
with self.cached_session():
shape = (1, 2, 3, 4)
operator = LinearOperatorShape(shape)
self.assertAllEqual(shape, self.evaluate(operator.shape_tensor()))
self.assertAllEqual(4, self.evaluate(operator.tensor_rank_tensor()))
self.assertAllEqual((1, 2), self.evaluate(operator.batch_shape_tensor()))
self.assertAllEqual(4, self.evaluate(operator.domain_dimension_tensor()))
self.assertAllEqual(3, self.evaluate(operator.range_dimension_tensor()))
def test_is_x_properties(self):
operator = LinearOperatorShape(
shape=(2, 2),
is_non_singular=False,
is_self_adjoint=True,
is_positive_definite=False)
self.assertFalse(operator.is_non_singular)
self.assertTrue(operator.is_self_adjoint)
self.assertFalse(operator.is_positive_definite)
def test_nontrivial_parameters(self):
matrix = rng.randn(2, 3, 4)
matrix_ph = array_ops.placeholder_with_default(input=matrix, shape=None)
operator = LinearOperatorMatmulSolve(matrix_ph)
expected_parameters = {
"is_non_singular": None,
"is_positive_definite": None,
"is_self_adjoint": None,
"is_square": None,
"matrix": matrix_ph,
}
self.assertEqual(expected_parameters, operator.parameters)
def test_generic_to_dense_method_non_square_matrix_static(self):
matrix = rng.randn(2, 3, 4)
operator = LinearOperatorMatmulSolve(matrix)
with self.cached_session():
operator_dense = operator.to_dense()
self.assertAllEqual((2, 3, 4), operator_dense.shape)
self.assertAllClose(matrix, self.evaluate(operator_dense))
def test_generic_to_dense_method_non_square_matrix_tensor(self):
matrix = rng.randn(2, 3, 4)
matrix_ph = array_ops.placeholder_with_default(input=matrix, shape=None)
operator = LinearOperatorMatmulSolve(matrix_ph)
operator_dense = operator.to_dense()
self.assertAllClose(matrix, self.evaluate(operator_dense))
def test_matvec(self):
matrix = [[1., 0], [0., 2.]]
operator = LinearOperatorMatmulSolve(matrix)
x = [1., 1.]
with self.cached_session():
y = operator.matvec(x)
self.assertAllEqual((2,), y.shape)
self.assertAllClose([1., 2.], self.evaluate(y))
def test_solvevec(self):
matrix = [[1., 0], [0., 2.]]
operator = LinearOperatorMatmulSolve(matrix)
y = [1., 1.]
with self.cached_session():
x = operator.solvevec(y)
self.assertAllEqual((2,), x.shape)
self.assertAllClose([1., 1 / 2.], self.evaluate(x))
def test_is_square_set_to_true_for_square_static_shapes(self):
operator = LinearOperatorShape(shape=(2, 4, 4))
self.assertTrue(operator.is_square)
def test_is_square_set_to_false_for_square_static_shapes(self):
operator = LinearOperatorShape(shape=(2, 3, 4))
self.assertFalse(operator.is_square)
def test_is_square_set_incorrectly_to_false_raises(self):
with self.assertRaisesRegex(ValueError, "but.*was square"):
_ = LinearOperatorShape(shape=(2, 4, 4), is_square=False).is_square
def test_is_square_set_inconsistent_with_other_hints_raises(self):
with self.assertRaisesRegex(ValueError, "is always square"):
matrix = array_ops.placeholder_with_default(input=(), shape=None)
LinearOperatorMatmulSolve(matrix, is_non_singular=True, is_square=False)
with self.assertRaisesRegex(ValueError, "is always square"):
matrix = array_ops.placeholder_with_default(input=(), shape=None)
LinearOperatorMatmulSolve(
matrix, is_positive_definite=True, is_square=False)
def test_non_square_operators_raise_on_determinant_and_solve(self):
operator = LinearOperatorShape((2, 3))
with self.assertRaisesRegex(NotImplementedError, "not be square"):
operator.determinant()
with self.assertRaisesRegex(NotImplementedError, "not be square"):
operator.log_abs_determinant()
with self.assertRaisesRegex(NotImplementedError, "not be square"):
operator.solve(rng.rand(2, 2))
with self.assertRaisesRegex(ValueError, "is always square"):
matrix = array_ops.placeholder_with_default(input=(), shape=None)
LinearOperatorMatmulSolve(
matrix, is_positive_definite=True, is_square=False)
def test_is_square_manual_set_works(self):
matrix = array_ops.placeholder_with_default(
input=np.ones((2, 2)), shape=None)
operator = LinearOperatorMatmulSolve(matrix)
if not context.executing_eagerly():
# Eager mode will read in the default value, and discover the answer is
# True. Graph mode must rely on the hint, since the placeholder has
# shape=None...the hint is, by default, None.
self.assertEqual(None, operator.is_square)
# Set to True
operator = LinearOperatorMatmulSolve(matrix, is_square=True)
self.assertTrue(operator.is_square)
def test_linear_operator_matmul_hints_closed(self):
matrix = array_ops.placeholder_with_default(input=np.ones((2, 2)),
shape=None)
operator1 = LinearOperatorMatmulSolve(matrix)
operator_matmul = operator1.matmul(operator1)
if not context.executing_eagerly():
# Eager mode will read in the input and discover matrix is square.
self.assertEqual(None, operator_matmul.is_square)
self.assertEqual(None, operator_matmul.is_non_singular)
self.assertEqual(None, operator_matmul.is_self_adjoint)
self.assertEqual(None, operator_matmul.is_positive_definite)
operator2 = LinearOperatorMatmulSolve(
matrix,
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=True,
is_square=True,
)
operator_matmul = operator2.matmul(operator2)
self.assertTrue(operator_matmul.is_square)
self.assertTrue(operator_matmul.is_non_singular)
# A @ A is SA since A is.
self.assertEqual(True, operator_matmul.is_self_adjoint)
# A @ A is non-singular (since A is) and A @ A = A @ A.H is semi-def so...
self.assertEqual(True, operator_matmul.is_positive_definite)
def test_linear_operator_matmul_hints_false(self):
matrix1 = array_ops.placeholder_with_default(
input=rng.rand(2, 2), shape=None)
operator1 = LinearOperatorMatmulSolve(
matrix1,
is_non_singular=False,
is_self_adjoint=False,
is_positive_definite=False,
is_square=True,
)
operator_matmul = operator1.matmul(operator1)
self.assertTrue(operator_matmul.is_square)
self.assertFalse(operator_matmul.is_non_singular)
self.assertEqual(None, operator_matmul.is_self_adjoint)
self.assertEqual(None, operator_matmul.is_positive_definite)
matrix2 = array_ops.placeholder_with_default(
input=rng.rand(2, 3), shape=None)
operator2 = LinearOperatorMatmulSolve(
matrix2,
is_non_singular=False,
is_self_adjoint=False,
is_positive_definite=False,
is_square=False,
)
operator_matmul = operator2.matmul(operator2, adjoint_arg=True)
# Composition recognizes this as the form A @ A.H, which is square, SA.
self.assertTrue(operator_matmul.is_square)
self.assertTrue(operator_matmul.is_self_adjoint)
if context.executing_eagerly():
# False since we specified is_non_singular=False.
self.assertFalse(operator_matmul.is_non_singular)
else:
# May be non-singular, since it's the composition of two non-square.
# TODO(b/136162840) This is a bit inconsistent, and should probably be
# False since we specified operator2.is_non_singular == False.
self.assertIsNone(operator_matmul.is_non_singular)
# No way to deduce these, even in Eager mode.
self.assertIsNone(operator_matmul.is_positive_definite)
def test_linear_operator_matmul_hint_infer_square(self):
matrix1 = array_ops.placeholder_with_default(
input=rng.rand(2, 3), shape=(2, 3))
matrix2 = array_ops.placeholder_with_default(
input=rng.rand(3, 2), shape=(3, 2))
matrix3 = array_ops.placeholder_with_default(
input=rng.rand(3, 4), shape=(3, 4))
operator1 = LinearOperatorMatmulSolve(matrix1, is_square=False)
operator2 = LinearOperatorMatmulSolve(matrix2, is_square=False)
operator3 = LinearOperatorMatmulSolve(matrix3, is_square=False)
self.assertTrue(operator1.matmul(operator2).is_square)
self.assertTrue(operator2.matmul(operator1).is_square)
self.assertFalse(operator1.matmul(operator3).is_square)
def testDispatchedMethods(self):
operator = linalg.LinearOperatorFullMatrix(
[[1., 0.5], [0.5, 1.]],
is_square=True,
is_self_adjoint=True,
is_non_singular=True,
is_positive_definite=True)
methods = {
"trace": linalg.trace,
"diag_part": linalg.diag_part,
"log_abs_determinant": linalg.logdet,
"determinant": linalg.det
}
for method in methods:
op_val = getattr(operator, method)()
linalg_val = methods[method](operator)
self.assertAllClose(
self.evaluate(op_val),
self.evaluate(linalg_val))
# Solve and Matmul go here.
adjoint = linalg.adjoint(operator)
self.assertIsInstance(adjoint, linalg.LinearOperator)
cholesky = linalg.cholesky(operator)
self.assertIsInstance(cholesky, linalg.LinearOperator)
inverse = linalg.inv(operator)
self.assertIsInstance(inverse, linalg.LinearOperator)
def testDispatchMatmulSolve(self):
operator = linalg.LinearOperatorFullMatrix(
np.float64([[1., 0.5], [0.5, 1.]]),
is_square=True,
is_self_adjoint=True,
is_non_singular=True,
is_positive_definite=True)
rhs = np.random.uniform(-1., 1., size=[3, 2, 2])
for adjoint in [False, True]:
for adjoint_arg in [False, True]:
op_val = operator.matmul(
rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
matmul_val = math_ops.matmul(
operator, rhs, adjoint_a=adjoint, adjoint_b=adjoint_arg)
self.assertAllClose(
self.evaluate(op_val), self.evaluate(matmul_val))
op_val = operator.solve(rhs, adjoint=adjoint)
solve_val = linalg.solve(operator, rhs, adjoint=adjoint)
self.assertAllClose(
self.evaluate(op_val), self.evaluate(solve_val))
def testDispatchMatmulLeftOperatorIsTensor(self):
mat = np.float64([[1., 0.5], [0.5, 1.]])
right_operator = linalg.LinearOperatorFullMatrix(
mat,
is_square=True,
is_self_adjoint=True,
is_non_singular=True,
is_positive_definite=True)
lhs = np.random.uniform(-1., 1., size=[3, 2, 2])
for adjoint in [False, True]:
for adjoint_arg in [False, True]:
op_val = math_ops.matmul(
lhs, mat, adjoint_a=adjoint, adjoint_b=adjoint_arg)
matmul_val = math_ops.matmul(
lhs, right_operator, adjoint_a=adjoint, adjoint_b=adjoint_arg)
self.assertAllClose(
self.evaluate(op_val), self.evaluate(matmul_val))
def testVectorizedMap(self):
def fn(x):
y = constant_op.constant([3., 4.])
# Make a [2, N, N] shaped operator.
x = x * y[..., array_ops.newaxis, array_ops.newaxis]
operator = linalg.LinearOperatorFullMatrix(
x, is_square=True)
return operator
x = np.random.uniform(-1., 1., size=[3, 5, 5]).astype(np.float32)
batched_operator = control_flow_ops.vectorized_map(
fn, ops.convert_to_tensor(x))
self.assertIsInstance(batched_operator, linalg.LinearOperator)
self.assertAllEqual(batched_operator.batch_shape, [3, 2])
if __name__ == "__main__":
test.main()
| LinearOperatorTest |
python | apache__airflow | providers/git/src/airflow/providers/git/hooks/git.py | {
"start": 1059,
"end": 4340
} | class ____(BaseHook):
"""
Hook for git repositories.
:param git_conn_id: Connection ID for SSH connection to the repository
"""
conn_name_attr = "git_conn_id"
default_conn_name = "git_default"
conn_type = "git"
hook_name = "GIT"
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
return {
"hidden_fields": ["schema"],
"relabeling": {
"login": "Username or Access Token name",
"host": "Repository URL",
"password": "Access Token (optional)",
},
"placeholders": {
"extra": json.dumps(
{
"key_file": "optional/path/to/keyfile",
"private_key": "optional inline private key",
}
)
},
}
def __init__(
self, git_conn_id: str = "git_default", repo_url: str | None = None, *args, **kwargs
) -> None:
super().__init__()
connection = self.get_connection(git_conn_id)
self.repo_url = repo_url or connection.host
self.user_name = connection.login or "user"
self.auth_token = connection.password
self.private_key = connection.extra_dejson.get("private_key")
self.key_file = connection.extra_dejson.get("key_file")
self.strict_host_key_checking = connection.extra_dejson.get("strict_host_key_checking", "no")
self.env: dict[str, str] = {}
if self.key_file and self.private_key:
raise AirflowException("Both 'key_file' and 'private_key' cannot be provided at the same time")
self._process_git_auth_url()
def _build_ssh_command(self, key_path: str) -> str:
return (
f"ssh -i {key_path} "
f"-o IdentitiesOnly=yes "
f"-o StrictHostKeyChecking={self.strict_host_key_checking}"
)
def _process_git_auth_url(self):
if not isinstance(self.repo_url, str):
return
if self.auth_token and self.repo_url.startswith("https://"):
self.repo_url = self.repo_url.replace("https://", f"https://{self.user_name}:{self.auth_token}@")
elif self.auth_token and self.repo_url.startswith("http://"):
self.repo_url = self.repo_url.replace("http://", f"http://{self.user_name}:{self.auth_token}@")
elif self.repo_url.startswith("http://"):
# if no auth token, use the repo url as is
self.repo_url = self.repo_url
elif not self.repo_url.startswith("git@") or not self.repo_url.startswith("https://"):
self.repo_url = os.path.expanduser(self.repo_url)
def set_git_env(self, key: str) -> None:
self.env["GIT_SSH_COMMAND"] = self._build_ssh_command(key)
@contextlib.contextmanager
def configure_hook_env(self):
if self.private_key:
with tempfile.NamedTemporaryFile(mode="w", delete=True) as tmp_keyfile:
tmp_keyfile.write(self.private_key)
tmp_keyfile.flush()
os.chmod(tmp_keyfile.name, 0o600)
self.set_git_env(tmp_keyfile.name)
yield
else:
self.set_git_env(self.key_file)
yield
| GitHook |
python | getsentry__sentry | tests/sentry/api/test_api_pagination_check.py | {
"start": 285,
"end": 2788
} | class ____(TestCase):
def test_if_wrong_api_method_fails(self) -> None:
class ExampleEndpoint(TestCase, Endpoint):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.access = "read"
# Required to go through the dispatch method
def check_permissions(self, request: Request) -> None:
pass
def get(self, request, *args, **kwargs):
return Response(data=[])
# Test the endpoint, assert there is a MissingPaginationError
with pytest.raises(MissingPaginationError):
endpoint = ExampleEndpoint()
request = RequestFactory().get("/")
request.access = "read"
ExampleEndpoint.dispatch(endpoint, request)
def test_endpoint_in_allowlist(self) -> None:
class GroupTagsEndpoint(TestCase, Endpoint):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.access = "read"
# Required to go through the dispatch method
def check_permissions(self, request: Request) -> None:
pass
def get(self, request, *args, **kwargs):
return Response(data=[])
# Test the endpoint, assert there is no MissingPaginationError
endpoint = GroupTagsEndpoint()
request = RequestFactory().get("/")
request.access = "read"
GroupTagsEndpoint.dispatch(endpoint, request)
def test_empty_payload_with_pagination(self) -> None:
class ExampleEndpoint(Endpoint):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.access = "read"
# Required to go through the dispatch method
def check_permissions(self, request: Request) -> None:
pass
# call the pagination method
def get(self, request, *args, **kwargs):
return self.paginate(
request=request,
queryset=[],
paginator_cls=OffsetPaginator,
on_results=lambda data: Response(data),
)
# Test the endpoint, assert there is no MissingPaginationError
endpoint = ExampleEndpoint()
request = RequestFactory().get("/")
request.access = "read"
ExampleEndpoint.dispatch(endpoint, request)
| APIPaginationCheckTestCase |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/dataflow.py | {
"start": 6309,
"end": 6509
} | class ____:
"""Helper class with Dataflow job types."""
JOB_TYPE_UNKNOWN = "JOB_TYPE_UNKNOWN"
JOB_TYPE_BATCH = "JOB_TYPE_BATCH"
JOB_TYPE_STREAMING = "JOB_TYPE_STREAMING"
| DataflowJobType |
python | pandas-dev__pandas | pandas/core/reshape/merge.py | {
"start": 31594,
"end": 82366
} | class ____:
"""
Perform a database (SQL) merge operation between two DataFrame or Series
objects using either columns as keys or their row indexes
"""
_merge_type = "merge"
how: JoinHow | Literal["asof"]
on: IndexLabel | None
# left_on/right_on may be None when passed, but in validate_specification
# get replaced with non-None.
left_on: Sequence[Hashable | AnyArrayLike]
right_on: Sequence[Hashable | AnyArrayLike]
left_index: bool
right_index: bool
sort: bool
suffixes: Suffixes
indicator: str | bool
validate: str | None
join_names: list[Hashable]
right_join_keys: list[ArrayLike]
left_join_keys: list[ArrayLike]
def __init__(
self,
left: DataFrame | Series,
right: DataFrame | Series,
how: JoinHow | Literal["left_anti", "right_anti", "asof"] = "inner",
on: IndexLabel | AnyArrayLike | None = None,
left_on: IndexLabel | AnyArrayLike | None = None,
right_on: IndexLabel | AnyArrayLike | None = None,
left_index: bool = False,
right_index: bool = False,
sort: bool = True,
suffixes: Suffixes = ("_x", "_y"),
indicator: str | bool = False,
validate: str | None = None,
) -> None:
_left = _validate_operand(left)
_right = _validate_operand(right)
self.left = self.orig_left = _left
self.right = self.orig_right = _right
self.how, self.anti_join = self._validate_how(how)
self.on = com.maybe_make_list(on)
self.suffixes = suffixes
self.sort = sort or how == "outer"
self.left_index = left_index
self.right_index = right_index
self.indicator = indicator
if not is_bool(left_index):
raise ValueError(
f"left_index parameter must be of type bool, not {type(left_index)}"
)
if not is_bool(right_index):
raise ValueError(
f"right_index parameter must be of type bool, not {type(right_index)}"
)
# GH 40993: raise when merging between different levels; enforced in 2.0
if _left.columns.nlevels != _right.columns.nlevels:
msg = (
"Not allowed to merge between different levels. "
f"({_left.columns.nlevels} levels on the left, "
f"{_right.columns.nlevels} on the right)"
)
raise MergeError(msg)
self.left_on, self.right_on = self._validate_left_right_on(left_on, right_on)
(
self.left_join_keys,
self.right_join_keys,
self.join_names,
left_drop,
right_drop,
) = self._get_merge_keys()
if left_drop:
self.left = self.left._drop_labels_or_levels(left_drop)
if right_drop:
self.right = self.right._drop_labels_or_levels(right_drop)
self._maybe_require_matching_dtypes(self.left_join_keys, self.right_join_keys)
self._validate_tolerance(self.left_join_keys)
# validate the merge keys dtypes. We may need to coerce
# to avoid incompatible dtypes
self._maybe_coerce_merge_keys()
# If argument passed to validate,
# check if columns specified as unique
# are in fact unique.
if validate is not None:
self._validate_validate_kwd(validate)
@final
def _validate_how(
self, how: JoinHow | Literal["left_anti", "right_anti", "asof"]
) -> tuple[JoinHow | Literal["asof"], bool]:
"""
Validate the 'how' parameter and return the actual join type and whether
this is an anti join.
"""
# GH 59435: raise when "how" is not a valid Merge type
merge_type = {
"left",
"right",
"inner",
"outer",
"left_anti",
"right_anti",
"cross",
"asof",
}
if how not in merge_type:
raise ValueError(
f"'{how}' is not a valid Merge type: "
f"left, right, inner, outer, left_anti, right_anti, cross, asof"
)
anti_join = False
if how in {"left_anti", "right_anti"}:
how = how.split("_")[0] # type: ignore[assignment]
anti_join = True
how = cast(JoinHow | Literal["asof"], how)
return how, anti_join
def _maybe_require_matching_dtypes(
self, left_join_keys: list[ArrayLike], right_join_keys: list[ArrayLike]
) -> None:
# Overridden by AsOfMerge
pass
def _validate_tolerance(self, left_join_keys: list[ArrayLike]) -> None:
# Overridden by AsOfMerge
pass
@final
def _reindex_and_concat(
self,
join_index: Index,
left_indexer: npt.NDArray[np.intp] | None,
right_indexer: npt.NDArray[np.intp] | None,
) -> DataFrame:
"""
reindex along index and concat along columns.
"""
# Take views so we do not alter the originals
left = self.left[:]
right = self.right[:]
llabels, rlabels = _items_overlap_with_suffix(
self.left._info_axis, self.right._info_axis, self.suffixes
)
if left_indexer is not None and not is_range_indexer(left_indexer, len(left)):
# Pinning the index here (and in the right code just below) is not
# necessary, but makes the `.take` more performant if we have e.g.
# a MultiIndex for left.index.
lmgr = left._mgr.reindex_indexer(
join_index,
left_indexer,
axis=1,
only_slice=True,
allow_dups=True,
use_na_proxy=True,
)
left = left._constructor_from_mgr(lmgr, axes=lmgr.axes)
left.index = join_index
if right_indexer is not None and not is_range_indexer(
right_indexer, len(right)
):
rmgr = right._mgr.reindex_indexer(
join_index,
right_indexer,
axis=1,
only_slice=True,
allow_dups=True,
use_na_proxy=True,
)
right = right._constructor_from_mgr(rmgr, axes=rmgr.axes)
right.index = join_index
from pandas import concat
left.columns = llabels
right.columns = rlabels
result = concat([left, right], axis=1)
return result
def get_result(self) -> DataFrame:
"""
Execute the merge.
"""
if self.indicator:
self.left, self.right = self._indicator_pre_merge(self.left, self.right)
join_index, left_indexer, right_indexer = self._get_join_info()
result = self._reindex_and_concat(join_index, left_indexer, right_indexer)
if self.indicator:
result = self._indicator_post_merge(result)
self._maybe_add_join_keys(result, left_indexer, right_indexer)
self._maybe_restore_index_levels(result)
return result.__finalize__(
types.SimpleNamespace(input_objs=[self.left, self.right]), method="merge"
)
@final
@cache_readonly
def _indicator_name(self) -> str | None:
if isinstance(self.indicator, str):
return self.indicator
elif isinstance(self.indicator, bool):
return "_merge" if self.indicator else None
else:
raise ValueError(
"indicator option can only accept boolean or string arguments"
)
@final
def _indicator_pre_merge(
self, left: DataFrame, right: DataFrame
) -> tuple[DataFrame, DataFrame]:
"""
Add one indicator column to each of the left and right inputs.
These columns are used to produce another column in the output of the
merge, indicating for each row of the output whether it was produced
using the left, right or both inputs.
"""
columns = left.columns.union(right.columns)
for i in ["_left_indicator", "_right_indicator"]:
if i in columns:
raise ValueError(
"Cannot use `indicator=True` option when "
f"data contains a column named {i}"
)
if self._indicator_name in columns:
raise ValueError(
"Cannot use name of an existing column for indicator column"
)
left = left.copy(deep=False)
right = right.copy(deep=False)
left["_left_indicator"] = 1
left["_left_indicator"] = left["_left_indicator"].astype("int8")
right["_right_indicator"] = 2
right["_right_indicator"] = right["_right_indicator"].astype("int8")
return left, right
@final
def _indicator_post_merge(self, result: DataFrame) -> DataFrame:
"""
Add an indicator column to the merge result.
This column indicates for each row of the output whether it was produced using
the left, right or both inputs.
"""
result["_left_indicator"] = result["_left_indicator"].fillna(0)
result["_right_indicator"] = result["_right_indicator"].fillna(0)
result[self._indicator_name] = Categorical(
(result["_left_indicator"] + result["_right_indicator"]),
categories=[1, 2, 3],
)
result[self._indicator_name] = result[
self._indicator_name
].cat.rename_categories(["left_only", "right_only", "both"])
result = result.drop(labels=["_left_indicator", "_right_indicator"], axis=1)
return result
@final
def _maybe_restore_index_levels(self, result: DataFrame) -> None:
"""
Restore index levels specified as `on` parameters
Here we check for cases where `self.left_on` and `self.right_on` pairs
each reference an index level in their respective DataFrames. The
joined columns corresponding to these pairs are then restored to the
index of `result`.
**Note:** This method has side effects. It modifies `result` in-place
Parameters
----------
result: DataFrame
merge result
Returns
-------
None
"""
names_to_restore = []
for name, left_key, right_key in zip(
self.join_names, self.left_on, self.right_on, strict=True
):
if (
# Argument 1 to "_is_level_reference" of "NDFrame" has incompatible
# type "Union[Hashable, ExtensionArray, Index, Series]"; expected
# "Hashable"
self.orig_left._is_level_reference(left_key) # type: ignore[arg-type]
# Argument 1 to "_is_level_reference" of "NDFrame" has incompatible
# type "Union[Hashable, ExtensionArray, Index, Series]"; expected
# "Hashable"
and self.orig_right._is_level_reference(
right_key # type: ignore[arg-type]
)
and left_key == right_key
and name not in result.index.names
):
names_to_restore.append(name)
if names_to_restore:
result.set_index(names_to_restore, inplace=True)
@final
def _maybe_add_join_keys(
self,
result: DataFrame,
left_indexer: npt.NDArray[np.intp] | None,
right_indexer: npt.NDArray[np.intp] | None,
) -> None:
left_has_missing = None
right_has_missing = None
assert all(isinstance(x, _known) for x in self.left_join_keys)
keys = zip(self.join_names, self.left_on, self.right_on, strict=True)
for i, (name, lname, rname) in enumerate(keys):
if not _should_fill(lname, rname):
continue
take_left, take_right = None, None
if name in result:
if left_indexer is not None or right_indexer is not None:
if name in self.left:
if left_has_missing is None:
left_has_missing = (
False
if left_indexer is None
else (left_indexer == -1).any()
)
if left_has_missing:
take_right = self.right_join_keys[i]
if result[name].dtype != self.left[name].dtype:
take_left = self.left[name]._values
elif name in self.right:
if right_has_missing is None:
right_has_missing = (
False
if right_indexer is None
else (right_indexer == -1).any()
)
if right_has_missing:
take_left = self.left_join_keys[i]
if result[name].dtype != self.right[name].dtype:
take_right = self.right[name]._values
else:
take_left = self.left_join_keys[i]
take_right = self.right_join_keys[i]
if take_left is not None or take_right is not None:
if take_left is None:
lvals = result[name]._values
elif left_indexer is None:
lvals = take_left
else:
# TODO: can we pin down take_left's type earlier?
take_left = extract_array(take_left, extract_numpy=True)
lfill = na_value_for_dtype(take_left.dtype)
lvals = algos.take_nd(take_left, left_indexer, fill_value=lfill)
if take_right is None:
rvals = result[name]._values
elif right_indexer is None:
rvals = take_right
else:
# TODO: can we pin down take_right's type earlier?
taker = extract_array(take_right, extract_numpy=True)
rfill = na_value_for_dtype(taker.dtype)
rvals = algos.take_nd(taker, right_indexer, fill_value=rfill)
# if we have an all missing left_indexer
# make sure to just use the right values or vice-versa
if left_indexer is not None and (left_indexer == -1).all():
key_col = Index(rvals, dtype=rvals.dtype, copy=False)
result_dtype = rvals.dtype
elif right_indexer is not None and (right_indexer == -1).all():
key_col = Index(lvals, dtype=lvals.dtype, copy=False)
result_dtype = lvals.dtype
else:
key_col = Index(lvals, dtype=lvals.dtype, copy=False)
if left_indexer is not None:
mask_left = left_indexer == -1
key_col = key_col.where(~mask_left, rvals)
result_dtype = find_common_type([lvals.dtype, rvals.dtype])
if (
lvals.dtype.kind == "M"
and rvals.dtype.kind == "M"
and result_dtype.kind == "O"
):
# TODO(non-nano) Workaround for common_type not dealing
# with different resolutions
result_dtype = key_col.dtype
if result._is_label_reference(name):
result[name] = result._constructor_sliced(
key_col, dtype=result_dtype, index=result.index
)
elif result._is_level_reference(name):
if isinstance(result.index, MultiIndex):
key_col.name = name
idx_list = [
(
result.index.get_level_values(level_name)
if level_name != name
else key_col
)
for level_name in result.index.names
]
result.set_index(idx_list, inplace=True)
else:
result.index = Index(key_col, name=name)
else:
result.insert(i, name or f"key_{i}", key_col)
def _get_join_indexers(
self,
) -> tuple[npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
"""return the join indexers"""
# make mypy happy
assert self.how != "asof"
return get_join_indexers(
self.left_join_keys, self.right_join_keys, sort=self.sort, how=self.how
)
@final
def _get_join_info(
self,
) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
left_ax = self.left.index
right_ax = self.right.index
if self.left_index and self.right_index and self.how != "asof":
join_index, left_indexer, right_indexer = left_ax.join(
right_ax, how=self.how, return_indexers=True, sort=self.sort
)
elif self.right_index and self.how == "left":
join_index, left_indexer, right_indexer = _left_join_on_index(
left_ax, right_ax, self.left_join_keys, sort=self.sort
)
elif self.left_index and self.how == "right":
join_index, right_indexer, left_indexer = _left_join_on_index(
right_ax, left_ax, self.right_join_keys, sort=self.sort
)
else:
(left_indexer, right_indexer) = self._get_join_indexers()
if self.right_index:
if len(self.left) > 0:
join_index = self._create_join_index(
left_ax,
right_ax,
left_indexer,
how="right",
)
elif right_indexer is None:
join_index = right_ax.copy()
else:
join_index = right_ax.take(right_indexer)
elif self.left_index:
if self.how == "asof":
# GH#33463 asof should always behave like a left merge
join_index = self._create_join_index(
left_ax,
right_ax,
left_indexer,
how="left",
)
elif len(self.right) > 0:
join_index = self._create_join_index(
right_ax,
left_ax,
right_indexer,
how="left",
)
elif left_indexer is None:
join_index = left_ax.copy()
else:
join_index = left_ax.take(left_indexer)
else:
n = len(left_ax) if left_indexer is None else len(left_indexer)
join_index = default_index(n)
if self.anti_join:
join_index, left_indexer, right_indexer = self._handle_anti_join(
join_index, left_indexer, right_indexer
)
return join_index, left_indexer, right_indexer
@final
def _create_join_index(
self,
index: Index,
other_index: Index,
indexer: npt.NDArray[np.intp] | None,
how: JoinHow = "left",
) -> Index:
"""
Create a join index by rearranging one index to match another
Parameters
----------
index : Index
index being rearranged
other_index : Index
used to supply values not found in index
indexer : np.ndarray[np.intp] or None
how to rearrange index
how : str
Replacement is only necessary if indexer based on other_index.
Returns
-------
Index
"""
if self.how in (how, "outer") and not isinstance(other_index, MultiIndex):
# if final index requires values in other_index but not target
# index, indexer may hold missing (-1) values, causing Index.take
# to take the final value in target index. So, we set the last
# element to be the desired fill value. We do not use allow_fill
# and fill_value because it throws a ValueError on integer indices
mask = indexer == -1
if np.any(mask):
fill_value = na_value_for_dtype(index.dtype, compat=False)
if not index._can_hold_na:
new_index = Index([fill_value])
else:
new_index = Index([fill_value], dtype=index.dtype)
index = index.append(new_index)
if indexer is None:
return index.copy()
return index.take(indexer)
@final
def _handle_anti_join(
self,
join_index: Index,
left_indexer: npt.NDArray[np.intp] | None,
right_indexer: npt.NDArray[np.intp] | None,
) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
"""
Handle anti join by returning the correct join index and indexers
Parameters
----------
join_index : Index
join index
left_indexer : np.ndarray[np.intp] or None
left indexer
right_indexer : np.ndarray[np.intp] or None
right indexer
Returns
-------
Index, np.ndarray[np.intp] or None, np.ndarray[np.intp] or None
"""
# Make sure indexers are not None
if left_indexer is None:
left_indexer = np.arange(len(self.left))
if right_indexer is None:
right_indexer = np.arange(len(self.right))
assert self.how in {"left", "right"}
if self.how == "left":
# Filter to rows where left keys are not in right keys
filt = right_indexer == -1
else:
# Filter to rows where right keys are not in left keys
filt = left_indexer == -1
join_index = join_index[filt]
left_indexer = left_indexer[filt]
right_indexer = right_indexer[filt]
return join_index, left_indexer, right_indexer
@final
def _get_merge_keys(
self,
) -> tuple[
list[ArrayLike],
list[ArrayLike],
list[Hashable],
list[Hashable],
list[Hashable],
]:
"""
Returns
-------
left_keys, right_keys, join_names, left_drop, right_drop
"""
left_keys: list[ArrayLike] = []
right_keys: list[ArrayLike] = []
join_names: list[Hashable] = []
right_drop: list[Hashable] = []
left_drop: list[Hashable] = []
left, right = self.left, self.right
is_lkey = lambda x: isinstance(x, _known) and len(x) == len(left)
is_rkey = lambda x: isinstance(x, _known) and len(x) == len(right)
# Note that pd.merge_asof() has separate 'on' and 'by' parameters. A
# user could, for example, request 'left_index' and 'left_by'. In a
# regular pd.merge(), users cannot specify both 'left_index' and
# 'left_on'. (Instead, users have a MultiIndex). That means the
# self.left_on in this function is always empty in a pd.merge(), but
# a pd.merge_asof(left_index=True, left_by=...) will result in a
# self.left_on array with a None in the middle of it. This requires
# a work-around as designated in the code below.
# See _validate_left_right_on() for where this happens.
# ugh, spaghetti re #733
if _any(self.left_on) and _any(self.right_on):
for lk, rk in zip(self.left_on, self.right_on, strict=True):
lk = extract_array(lk, extract_numpy=True)
rk = extract_array(rk, extract_numpy=True)
if is_lkey(lk):
lk = cast(ArrayLike, lk)
left_keys.append(lk)
if is_rkey(rk):
rk = cast(ArrayLike, rk)
right_keys.append(rk)
join_names.append(None) # what to do?
else:
# Then we're either Hashable or a wrong-length arraylike,
# the latter of which will raise
rk = cast(Hashable, rk)
if rk is not None:
right_keys.append(right._get_label_or_level_values(rk))
join_names.append(rk)
else:
# work-around for merge_asof(right_index=True)
right_keys.append(right.index._values)
join_names.append(right.index.name)
else:
if not is_rkey(rk):
# Then we're either Hashable or a wrong-length arraylike,
# the latter of which will raise
rk = cast(Hashable, rk)
if rk is not None:
right_keys.append(right._get_label_or_level_values(rk))
else:
# work-around for merge_asof(right_index=True)
right_keys.append(right.index._values)
if lk is not None and lk == rk: # FIXME: what about other NAs?
right_drop.append(rk)
else:
rk = cast(ArrayLike, rk)
right_keys.append(rk)
if lk is not None:
# Then we're either Hashable or a wrong-length arraylike,
# the latter of which will raise
lk = cast(Hashable, lk)
left_keys.append(left._get_label_or_level_values(lk))
join_names.append(lk)
else:
# work-around for merge_asof(left_index=True)
left_keys.append(left.index._values)
join_names.append(left.index.name)
elif _any(self.left_on):
for k in self.left_on:
if is_lkey(k):
k = extract_array(k, extract_numpy=True)
k = cast(ArrayLike, k)
left_keys.append(k)
join_names.append(None)
else:
# Then we're either Hashable or a wrong-length arraylike,
# the latter of which will raise
k = cast(Hashable, k)
left_keys.append(left._get_label_or_level_values(k))
join_names.append(k)
if isinstance(self.right.index, MultiIndex):
right_keys = [
lev._values.take(lev_codes)
for lev, lev_codes in zip(
self.right.index.levels, self.right.index.codes, strict=True
)
]
else:
right_keys = [self.right.index._values]
elif _any(self.right_on):
for k in self.right_on:
k = extract_array(k, extract_numpy=True)
if is_rkey(k):
k = cast(ArrayLike, k)
right_keys.append(k)
join_names.append(None)
else:
# Then we're either Hashable or a wrong-length arraylike,
# the latter of which will raise
k = cast(Hashable, k)
right_keys.append(right._get_label_or_level_values(k))
join_names.append(k)
if isinstance(self.left.index, MultiIndex):
left_keys = [
lev._values.take(lev_codes)
for lev, lev_codes in zip(
self.left.index.levels, self.left.index.codes, strict=True
)
]
else:
left_keys = [self.left.index._values]
return left_keys, right_keys, join_names, left_drop, right_drop
@final
def _maybe_coerce_merge_keys(self) -> None:
# we have valid merges but we may have to further
# coerce these if they are originally incompatible types
#
# for example if these are categorical, but are not dtype_equal
# or if we have object and integer dtypes
for lk, rk, name in zip(
self.left_join_keys, self.right_join_keys, self.join_names, strict=True
):
if (len(lk) and not len(rk)) or (not len(lk) and len(rk)):
continue
lk = extract_array(lk, extract_numpy=True)
rk = extract_array(rk, extract_numpy=True)
lk_is_cat = isinstance(lk.dtype, CategoricalDtype)
rk_is_cat = isinstance(rk.dtype, CategoricalDtype)
lk_is_object_or_string = is_object_dtype(lk.dtype) or is_string_dtype(
lk.dtype
)
rk_is_object_or_string = is_object_dtype(rk.dtype) or is_string_dtype(
rk.dtype
)
# if either left or right is a categorical
# then the must match exactly in categories & ordered
if lk_is_cat and rk_is_cat:
lk = cast(Categorical, lk)
rk = cast(Categorical, rk)
if lk._categories_match_up_to_permutation(rk):
continue
elif lk_is_cat or rk_is_cat:
pass
elif lk.dtype == rk.dtype:
continue
msg = (
f"You are trying to merge on {lk.dtype} and {rk.dtype} columns "
f"for key '{name}'. If you wish to proceed you should use pd.concat"
)
# if we are numeric, then allow differing
# kinds to proceed, eg. int64 and int8, int and float
# further if we are object, but we infer to
# the same, then proceed
if is_numeric_dtype(lk.dtype) and is_numeric_dtype(rk.dtype):
if lk.dtype.kind == rk.dtype.kind:
continue
if isinstance(lk.dtype, ExtensionDtype) and not isinstance(
rk.dtype, ExtensionDtype
):
ct = find_common_type([lk.dtype, rk.dtype])
if isinstance(ct, ExtensionDtype):
com_cls = ct.construct_array_type()
rk = com_cls._from_sequence(rk, dtype=ct, copy=False)
else:
rk = rk.astype(ct)
elif isinstance(rk.dtype, ExtensionDtype):
ct = find_common_type([lk.dtype, rk.dtype])
if isinstance(ct, ExtensionDtype):
com_cls = ct.construct_array_type()
lk = com_cls._from_sequence(lk, dtype=ct, copy=False)
else:
lk = lk.astype(ct)
# check whether ints and floats
if is_integer_dtype(rk.dtype) and is_float_dtype(lk.dtype):
# GH 47391 numpy > 1.24 will raise a RuntimeError for nan -> int
with np.errstate(invalid="ignore"):
# error: Argument 1 to "astype" of "ndarray" has incompatible
# type "Union[ExtensionDtype, Any, dtype[Any]]"; expected
# "Union[dtype[Any], Type[Any], _SupportsDType[dtype[Any]]]"
casted = lk.astype(rk.dtype) # type: ignore[arg-type]
mask = ~np.isnan(lk)
match = lk == casted
# error: Item "ExtensionArray" of
# "ExtensionArray | Any" has no attribute "all"
if not match[mask].all(): # type: ignore[union-attr]
warnings.warn(
"You are merging on int and float "
"columns where the float values "
"are not equal to their int representation.",
UserWarning,
stacklevel=find_stack_level(),
)
continue
if is_float_dtype(rk.dtype) and is_integer_dtype(lk.dtype):
# GH 47391 numpy > 1.24 will raise a RuntimeError for nan -> int
with np.errstate(invalid="ignore"):
# error: Argument 1 to "astype" of "ndarray" has incompatible
# type "Union[ExtensionDtype, Any, dtype[Any]]"; expected
# "Union[dtype[Any], Type[Any], _SupportsDType[dtype[Any]]]"
casted = rk.astype(lk.dtype) # type: ignore[arg-type]
mask = ~np.isnan(rk)
match = rk == casted
# error: Item "ExtensionArray" of
# "ExtensionArray | Any" has no attribute "all"
if not match[mask].all(): # type: ignore[union-attr]
warnings.warn(
"You are merging on int and float "
"columns where the float values "
"are not equal to their int representation.",
UserWarning,
stacklevel=find_stack_level(),
)
continue
# let's infer and see if we are ok
if lib.infer_dtype(lk, skipna=False) == lib.infer_dtype(
rk, skipna=False
):
continue
# Check if we are trying to merge on obviously
# incompatible dtypes GH 9780, GH 15800
# bool values are coerced to object
elif (lk_is_object_or_string and is_bool_dtype(rk.dtype)) or (
is_bool_dtype(lk.dtype) and rk_is_object_or_string
):
pass
# object values are allowed to be merged
elif (lk_is_object_or_string and is_numeric_dtype(rk.dtype)) or (
is_numeric_dtype(lk.dtype) and rk_is_object_or_string
):
inferred_left = lib.infer_dtype(lk, skipna=False)
inferred_right = lib.infer_dtype(rk, skipna=False)
bool_types = ["integer", "mixed-integer", "boolean", "empty"]
string_types = ["string", "unicode", "mixed", "bytes", "empty"]
# inferred bool
if inferred_left in bool_types and inferred_right in bool_types:
pass
# unless we are merging non-string-like with string-like
elif (
inferred_left in string_types and inferred_right not in string_types
) or (
inferred_right in string_types and inferred_left not in string_types
):
raise ValueError(msg)
# datetimelikes must match exactly
elif needs_i8_conversion(lk.dtype) and not needs_i8_conversion(rk.dtype):
raise ValueError(msg)
elif not needs_i8_conversion(lk.dtype) and needs_i8_conversion(rk.dtype):
raise ValueError(msg)
elif isinstance(lk.dtype, DatetimeTZDtype) and not isinstance(
rk.dtype, DatetimeTZDtype
):
raise ValueError(msg)
elif not isinstance(lk.dtype, DatetimeTZDtype) and isinstance(
rk.dtype, DatetimeTZDtype
):
raise ValueError(msg)
elif (
isinstance(lk.dtype, DatetimeTZDtype)
and isinstance(rk.dtype, DatetimeTZDtype)
) or (lk.dtype.kind == "M" and rk.dtype.kind == "M"):
# allows datetime with different resolutions
continue
# datetime and timedelta not allowed
elif lk.dtype.kind == "M" and rk.dtype.kind == "m":
raise ValueError(msg)
elif lk.dtype.kind == "m" and rk.dtype.kind == "M":
raise ValueError(msg)
elif is_object_dtype(lk.dtype) and is_object_dtype(rk.dtype):
continue
# Houston, we have a problem!
# let's coerce to object if the dtypes aren't
# categorical, otherwise coerce to the category
# dtype. If we coerced categories to object,
# then we would lose type information on some
# columns, and end up trying to merge
# incompatible dtypes. See GH 16900.
if name in self.left.columns:
typ = cast(Categorical, lk).categories.dtype if lk_is_cat else object
self.left = self.left.copy(deep=False)
self.left[name] = self.left[name].astype(typ)
if name in self.right.columns:
typ = cast(Categorical, rk).categories.dtype if rk_is_cat else object
self.right = self.right.copy(deep=False)
self.right[name] = self.right[name].astype(typ)
def _validate_left_right_on(self, left_on, right_on):
left_on = com.maybe_make_list(left_on)
right_on = com.maybe_make_list(right_on)
# Hm, any way to make this logic less complicated??
if self.on is None and left_on is None and right_on is None:
if self.left_index and self.right_index:
left_on, right_on = (), ()
elif self.left_index:
raise MergeError("Must pass right_on or right_index=True")
elif self.right_index:
raise MergeError("Must pass left_on or left_index=True")
else:
# use the common columns
left_cols = self.left.columns
right_cols = self.right.columns
common_cols = left_cols.intersection(right_cols)
if len(common_cols) == 0:
raise MergeError(
"No common columns to perform merge on. "
f"Merge options: left_on={left_on}, "
f"right_on={right_on}, "
f"left_index={self.left_index}, "
f"right_index={self.right_index}"
)
if (
not left_cols.join(common_cols, how="inner").is_unique
or not right_cols.join(common_cols, how="inner").is_unique
):
raise MergeError(f"Data columns not unique: {common_cols!r}")
left_on = right_on = common_cols
elif self.on is not None:
if left_on is not None or right_on is not None:
raise MergeError(
'Can only pass argument "on" OR "left_on" '
'and "right_on", not a combination of both.'
)
if self.left_index or self.right_index:
raise MergeError(
'Can only pass argument "on" OR "left_index" '
'and "right_index", not a combination of both.'
)
left_on = right_on = self.on
elif left_on is not None:
if self.left_index:
raise MergeError(
'Can only pass argument "left_on" OR "left_index" not both.'
)
if not self.right_index and right_on is None:
raise MergeError('Must pass "right_on" OR "right_index".')
n = len(left_on)
if self.right_index:
if len(left_on) != self.right.index.nlevels:
raise ValueError(
"len(left_on) must equal the number "
'of levels in the index of "right"'
)
right_on = [None] * n
elif right_on is not None:
if self.right_index:
raise MergeError(
'Can only pass argument "right_on" OR "right_index" not both.'
)
if not self.left_index and left_on is None:
raise MergeError('Must pass "left_on" OR "left_index".')
n = len(right_on)
if self.left_index:
if len(right_on) != self.left.index.nlevels:
raise ValueError(
"len(right_on) must equal the number "
'of levels in the index of "left"'
)
left_on = [None] * n
if len(right_on) != len(left_on):
raise ValueError("len(right_on) must equal len(left_on)")
return left_on, right_on
@final
def _validate_validate_kwd(self, validate: str) -> None:
# Check uniqueness of each
if self.left_index:
left_join_index = self.orig_left.index
left_unique = left_join_index.is_unique
else:
left_join_index = MultiIndex.from_arrays(self.left_join_keys)
left_unique = left_join_index.is_unique
if self.right_index:
right_join_index = self.orig_right.index
right_unique = self.orig_right.index.is_unique
else:
right_join_index = MultiIndex.from_arrays(self.right_join_keys)
right_unique = right_join_index.is_unique
def left_error_msg(x: Index) -> str:
name = self.left_on if not self.left_index else lib.no_default
msg = x[x.duplicated()][:5].to_frame(name=name).to_string(index=False)
return f"\nDuplicates in left:\n {msg} ..."
def right_error_msg(x: Index) -> str:
name = self.right_on if not self.right_index else lib.no_default
msg = x[x.duplicated()][:5].to_frame(name=name).to_string(index=False)
return f"\nDuplicates in right:\n {msg} ..."
# Check data integrity
if validate in ["one_to_one", "1:1"]:
if not left_unique and not right_unique:
raise MergeError(
"Merge keys are not unique in either left "
"or right dataset; not a one-to-one merge."
f"{left_error_msg(left_join_index)}"
f"{right_error_msg(right_join_index)}"
)
if not left_unique:
raise MergeError(
"Merge keys are not unique in left dataset; not a one-to-one merge"
f"{left_error_msg(left_join_index)}"
)
if not right_unique:
raise MergeError(
"Merge keys are not unique in right dataset; not a one-to-one merge"
f"{right_error_msg(right_join_index)}"
)
elif validate in ["one_to_many", "1:m"]:
if not left_unique:
raise MergeError(
"Merge keys are not unique in left dataset; not a one-to-many merge"
f"{left_error_msg(left_join_index)}"
)
elif validate in ["many_to_one", "m:1"]:
if not right_unique:
raise MergeError(
"Merge keys are not unique in right dataset; "
"not a many-to-one merge\n"
f"{right_error_msg(right_join_index)}"
)
elif validate in ["many_to_many", "m:m"]:
pass
else:
raise ValueError(
f'"{validate}" is not a valid argument. '
"Valid arguments are:\n"
'- "1:1"\n'
'- "1:m"\n'
'- "m:1"\n'
'- "m:m"\n'
'- "one_to_one"\n'
'- "one_to_many"\n'
'- "many_to_one"\n'
'- "many_to_many"'
)
def get_join_indexers(
left_keys: list[ArrayLike],
right_keys: list[ArrayLike],
sort: bool = False,
how: JoinHow = "inner",
) -> tuple[npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
"""
Parameters
----------
left_keys : list[ndarray, ExtensionArray, Index, Series]
right_keys : list[ndarray, ExtensionArray, Index, Series]
sort : bool, default False
how : {'inner', 'outer', 'left', 'right'}, default 'inner'
Returns
-------
np.ndarray[np.intp] or None
Indexer into the left_keys.
np.ndarray[np.intp] or None
Indexer into the right_keys.
"""
assert len(left_keys) == len(right_keys), (
"left_keys and right_keys must be the same length"
)
# fast-path for empty left/right
left_n = len(left_keys[0])
right_n = len(right_keys[0])
if left_n == 0:
if how in ["left", "inner"]:
return _get_empty_indexer()
elif not sort and how in ["right", "outer"]:
return _get_no_sort_one_missing_indexer(right_n, True)
elif right_n == 0:
if how in ["right", "inner"]:
return _get_empty_indexer()
elif not sort and how in ["left", "outer"]:
return _get_no_sort_one_missing_indexer(left_n, False)
lkey: ArrayLike
rkey: ArrayLike
if len(left_keys) > 1:
# get left & right join labels and num. of levels at each location
mapped = (
_factorize_keys(left_keys[n], right_keys[n], sort=sort)
for n in range(len(left_keys))
)
zipped = zip(*mapped, strict=True)
llab, rlab, shape = (list(x) for x in zipped)
# get flat i8 keys from label lists
lkey, rkey = _get_join_keys(llab, rlab, tuple(shape), sort)
else:
lkey = left_keys[0]
rkey = right_keys[0]
left = Index(lkey)
right = Index(rkey)
if (
left.is_monotonic_increasing
and right.is_monotonic_increasing
and (left.is_unique or right.is_unique)
):
_, lidx, ridx = left.join(right, how=how, return_indexers=True, sort=sort)
else:
lidx, ridx = get_join_indexers_non_unique(
left._values, right._values, sort, how
)
if lidx is not None and is_range_indexer(lidx, len(left)):
lidx = None
if ridx is not None and is_range_indexer(ridx, len(right)):
ridx = None
return lidx, ridx
def get_join_indexers_non_unique(
left: ArrayLike,
right: ArrayLike,
sort: bool = False,
how: JoinHow = "inner",
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
"""
Get join indexers for left and right.
Parameters
----------
left : ArrayLike
right : ArrayLike
sort : bool, default False
how : {'inner', 'outer', 'left', 'right'}, default 'inner'
Returns
-------
np.ndarray[np.intp]
Indexer into left.
np.ndarray[np.intp]
Indexer into right.
"""
lkey, rkey, count = _factorize_keys(left, right, sort=sort, how=how)
if count == -1:
# hash join
return lkey, rkey
if how == "left":
lidx, ridx = libjoin.left_outer_join(lkey, rkey, count, sort=sort)
elif how == "right":
ridx, lidx = libjoin.left_outer_join(rkey, lkey, count, sort=sort)
elif how == "inner":
lidx, ridx = libjoin.inner_join(lkey, rkey, count, sort=sort)
elif how == "outer":
lidx, ridx = libjoin.full_outer_join(lkey, rkey, count)
return lidx, ridx
def restore_dropped_levels_multijoin(
left: MultiIndex,
right: MultiIndex,
dropped_level_names,
join_index: Index,
lindexer: npt.NDArray[np.intp],
rindexer: npt.NDArray[np.intp],
) -> tuple[FrozenList, FrozenList, FrozenList]:
"""
*this is an internal non-public method*
Returns the levels, labels and names of a multi-index to multi-index join.
Depending on the type of join, this method restores the appropriate
dropped levels of the joined multi-index.
The method relies on lindexer, rindexer which hold the index positions of
left and right, where a join was feasible
Parameters
----------
left : MultiIndex
left index
right : MultiIndex
right index
dropped_level_names : str array
list of non-common level names
join_index : Index
the index of the join between the
common levels of left and right
lindexer : np.ndarray[np.intp]
left indexer
rindexer : np.ndarray[np.intp]
right indexer
Returns
-------
levels : list of Index
levels of combined multiindexes
labels : np.ndarray[np.intp]
labels of combined multiindexes
names : List[Hashable]
names of combined multiindex levels
"""
def _convert_to_multiindex(index: Index) -> MultiIndex:
if isinstance(index, MultiIndex):
return index
else:
return MultiIndex.from_arrays([index._values], names=[index.name])
# For multi-multi joins with one overlapping level,
# the returned index if of type Index
# Assure that join_index is of type MultiIndex
# so that dropped levels can be appended
join_index = _convert_to_multiindex(join_index)
join_levels = join_index.levels
join_codes = join_index.codes
join_names = join_index.names
# Iterate through the levels that must be restored
for dropped_level_name in dropped_level_names:
if dropped_level_name in left.names:
idx = left
indexer = lindexer
else:
idx = right
indexer = rindexer
# The index of the level name to be restored
name_idx = idx.names.index(dropped_level_name)
restore_levels = idx.levels[name_idx]
# Inject -1 in the codes list where a join was not possible
# IOW indexer[i]=-1
codes = idx.codes[name_idx]
if indexer is None:
restore_codes = codes
else:
restore_codes = algos.take_nd(codes, indexer, fill_value=-1)
join_levels = join_levels + [restore_levels]
join_codes = join_codes + [restore_codes]
join_names = join_names + [dropped_level_name]
return join_levels, join_codes, join_names
| _MergeOperation |
python | PyCQA__pylint | tests/functional/u/using_constant_test.py | {
"start": 391,
"end": 3448
} | class ____:
def method(self):
pass
instance = Class()
if collections: # [using-constant-test]
pass
# GenExpr
if (node for node in range(10)): # [using-constant-test]
pass
if lambda: None: # [using-constant-test]
pass
if function: # [using-constant-test]
pass
if Class: # [using-constant-test]
pass
if 2: # [using-constant-test]
pass
if True: # [using-constant-test]
pass
if '': # [using-constant-test]
pass
if b'': # [using-constant-test]
pass
if 2.0: # [using-constant-test]
pass
if {}: # [using-constant-test]
pass
if {1, 2, 3}: # [using-constant-test]
pass
if (1, 2, 3): # [using-constant-test]
pass
if (): # [using-constant-test]
pass
if [1, 2, 3]: # [using-constant-test]
pass
if []: # [using-constant-test]
pass
# Generator
generator = function()
if generator: # [using-constant-test]
pass
if 1 if 2 else 3: # [using-constant-test]
pass
def test_comprehensions():
[data for data in range(100) if abs] # [using-constant-test]
[data for data in range(100) if 1] # [using-constant-test]
(data for data in range(100) if abs) # [using-constant-test]
(data for data in range(100) if 1) # [using-constant-test]
{data for data in range(100) if abs} # [using-constant-test]
{data: 1 for data in range(100) if abs} # [using-constant-test]
# UnboundMethod / Function
if Class.method: # [using-constant-test]
pass
# BoundMethod
if instance.method: # [using-constant-test]
pass
# For these, we require to do inference, even though the result can be a
# constant value. For some of them, we could determine that the test
# is constant, such as 2 + 3, but the components of the BinOp
# can be anything else (2 + somefunccall).
name = 42
if name:
pass
if 3 + 4:
pass
if 3 and 4:
pass
if not 3:
pass
if instance.method():
pass
if 2 < 3: # [comparison-of-constants]
pass
if tuple((1, 2, 3)):
pass
if dict():
pass
if tuple():
pass
if list():
pass
if [1, 2, 3][:1]:
pass
def test(*args):
if args:
return 42
return None
def test_good_comprehension_checks():
[data for data in range(100)]
[data for data in range(100) if data]
[data for data in range(100) if abs(data)]
(data for data in range(100) if data)
(data for data in range(100) if abs(data))
{data for data in range(100) if data}
{data for data in range(100) if abs(data)}
{data: 1 for data in range(100) if data}
{data: 1 for data in range(100)}
# Calls to functions returning generator expressions are always truthy
def get_generator():
return (x for x in range(0))
if get_generator(): # [using-constant-test]
pass
def maybe_get_generator(arg):
if arg:
return (x for x in range(0))
return None
if maybe_get_generator(None):
pass
y = (a for a in range(10))
if y: # [using-constant-test]
pass
z = (a for a in range(10))
z = "red herring"
if z:
pass
gen = get_generator()
if gen: # [using-constant-test]
pass
| Class |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataplex.py | {
"start": 14420,
"end": 16568
} | class ____:
@mock.patch(HOOK_STR)
@mock.patch(DATASCANJOB_STR)
def test_execute(self, mock_data_scan_job, hook_mock):
op = DataplexGetDataQualityScanResultOperator(
task_id="get_data_scan_result",
project_id=PROJECT_ID,
region=REGION,
job_id=JOB_ID,
data_scan_id=DATA_SCAN_ID,
api_version=API_VERSION,
wait_for_results=False,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(context=mock.MagicMock())
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
api_version=API_VERSION,
impersonation_chain=IMPERSONATION_CHAIN,
)
hook_mock.return_value.get_data_scan_job.assert_called_once_with(
project_id=PROJECT_ID,
region=REGION,
job_id=JOB_ID,
data_scan_id=DATA_SCAN_ID,
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(HOOK_STR)
@mock.patch(DATASCANJOB_STR)
def test_execute_deferrable(self, mock_data_scan_job, hook_mock):
op = DataplexGetDataQualityScanResultOperator(
task_id="get_data_scan_result",
project_id=PROJECT_ID,
region=REGION,
job_id=JOB_ID,
data_scan_id=DATA_SCAN_ID,
api_version=API_VERSION,
wait_for_results=True,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
deferrable=True,
)
with pytest.raises(TaskDeferred) as exc:
op.execute(mock.MagicMock())
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
api_version=API_VERSION,
impersonation_chain=IMPERSONATION_CHAIN,
)
hook_mock.return_value.wait_for_data_scan_job.assert_not_called()
assert isinstance(exc.value.trigger, DataplexDataQualityJobTrigger)
assert exc.value.method_name == GOOGLE_DEFAULT_DEFERRABLE_METHOD_NAME
| TestDataplexGetDataQualityScanResultOperator |
python | sympy__sympy | sympy/physics/secondquant.py | {
"start": 42721,
"end": 46920
} | class ____(Function):
"""
The Commutator: [A, B] = A*B - B*A
The arguments are ordered according to comparison operators
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import Commutator
>>> A, B = symbols('A,B', commutative=False)
>>> Commutator(B, A)
-Commutator(A, B)
Evaluate the commutator with .doit()
>>> comm = Commutator(A,B); comm
Commutator(A, B)
>>> comm.doit()
A*B - B*A
For two second quantization operators the commutator is evaluated
immediately:
>>> from sympy.physics.secondquant import Fd, F
>>> a = symbols('a', above_fermi=True)
>>> i = symbols('i', below_fermi=True)
>>> p,q = symbols('p,q')
>>> Commutator(Fd(a),Fd(i))
2*NO(CreateFermion(a)*CreateFermion(i))
But for more complicated expressions, the evaluation is triggered by
a call to .doit()
>>> comm = Commutator(Fd(p)*Fd(q),F(i)); comm
Commutator(CreateFermion(p)*CreateFermion(q), AnnihilateFermion(i))
>>> comm.doit(wicks=True)
-KroneckerDelta(i, p)*CreateFermion(q) +
KroneckerDelta(i, q)*CreateFermion(p)
"""
is_commutative = False
@classmethod
def eval(cls, a, b):
"""
The Commutator [A,B] is on canonical form if A < B.
Examples
========
>>> from sympy.physics.secondquant import Commutator, F, Fd
>>> from sympy.abc import x
>>> c1 = Commutator(F(x), Fd(x))
>>> c2 = Commutator(Fd(x), F(x))
>>> Commutator.eval(c1, c2)
0
"""
if not (a and b):
return S.Zero
if a == b:
return S.Zero
if a.is_commutative or b.is_commutative:
return S.Zero
#
# [A+B,C] -> [A,C] + [B,C]
#
a = a.expand()
if isinstance(a, Add):
return Add(*[cls(term, b) for term in a.args])
b = b.expand()
if isinstance(b, Add):
return Add(*[cls(a, term) for term in b.args])
#
# [xA,yB] -> xy*[A,B]
#
ca, nca = a.args_cnc()
cb, ncb = b.args_cnc()
c_part = list(ca) + list(cb)
if c_part:
return Mul(Mul(*c_part), cls(Mul._from_args(nca), Mul._from_args(ncb)))
#
# single second quantization operators
#
if isinstance(a, BosonicOperator) and isinstance(b, BosonicOperator):
if isinstance(b, CreateBoson) and isinstance(a, AnnihilateBoson):
return KroneckerDelta(a.state, b.state)
if isinstance(a, CreateBoson) and isinstance(b, AnnihilateBoson):
return S.NegativeOne*KroneckerDelta(a.state, b.state)
else:
return S.Zero
if isinstance(a, FermionicOperator) and isinstance(b, FermionicOperator):
return wicks(a*b) - wicks(b*a)
#
# Canonical ordering of arguments
#
if a.sort_key() > b.sort_key():
return S.NegativeOne*cls(b, a)
def doit(self, **hints):
"""
Enables the computation of complex expressions.
Examples
========
>>> from sympy.physics.secondquant import Commutator, F, Fd
>>> from sympy import symbols
>>> i, j = symbols('i,j', below_fermi=True)
>>> a, b = symbols('a,b', above_fermi=True)
>>> c = Commutator(Fd(a)*F(i),Fd(b)*F(j))
>>> c.doit(wicks=True)
0
"""
a = self.args[0]
b = self.args[1]
if hints.get("wicks"):
a = a.doit(**hints)
b = b.doit(**hints)
try:
return wicks(a*b) - wicks(b*a)
except ContractionAppliesOnlyToFermions:
pass
except WicksTheoremDoesNotApply:
pass
return (a*b - b*a).doit(**hints)
def __repr__(self):
return "Commutator(%s,%s)" % (self.args[0], self.args[1])
def __str__(self):
return "[%s,%s]" % (self.args[0], self.args[1])
def _latex(self, printer):
return "\\left[%s,%s\\right]" % tuple([
printer._print(arg) for arg in self.args])
| Commutator |
python | PyCQA__pylint | doc/data/messages/m/method-cache-max-size-none/good.py | {
"start": 161,
"end": 304
} | class ____:
def __init__(self):
self.result = []
def fibonacci(self, n):
self.result.append(cached_fibonacci(n))
| Fibonnaci |
python | bokeh__bokeh | src/bokeh/models/tickers.py | {
"start": 9662,
"end": 9986
} | class ____(AdaptiveTicker):
''' Generate ticks on a linear scale.
.. note::
This class may be renamed to ``LinearTicker`` in the future.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| BasicTicker |
python | getsentry__sentry | src/sentry/issues/search.py | {
"start": 789,
"end": 998
} | class ____(Protocol):
def __call__(
self,
groupby: Sequence[str],
having: Sequence[Any],
orderby: Sequence[str],
) -> Mapping[str, Any]: ...
| IntermediateSearchQueryPartial |
python | plotly__plotly.py | plotly/graph_objs/scattermap/_cluster.py | {
"start": 233,
"end": 9682
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattermap"
_path_str = "scattermap.cluster"
_valid_props = {
"color",
"colorsrc",
"enabled",
"maxzoom",
"opacity",
"opacitysrc",
"size",
"sizesrc",
"step",
"stepsrc",
}
@property
def color(self):
"""
Sets the color for each cluster step.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def enabled(self):
"""
Determines whether clustering is enabled or disabled.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def maxzoom(self):
"""
Sets the maximum zoom level. At zoom levels equal to or greater
than this, points will never be clustered.
The 'maxzoom' property is a number and may be specified as:
- An int or float in the interval [0, 24]
Returns
-------
int|float
"""
return self["maxzoom"]
@maxzoom.setter
def maxzoom(self, val):
self["maxzoom"] = val
@property
def opacity(self):
"""
Sets the marker opacity.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def opacitysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `opacity`.
The 'opacitysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["opacitysrc"]
@opacitysrc.setter
def opacitysrc(self, val):
self["opacitysrc"] = val
@property
def size(self):
"""
Sets the size for each cluster step.
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def step(self):
"""
Sets how many points it takes to create a cluster or advance to
the next cluster step. Use this in conjunction with arrays for
`size` and / or `color`. If an integer, steps start at
multiples of this number. If an array, each step extends from
the given value until one less than the next value.
The 'step' property is a number and may be specified as:
- An int or float in the interval [-1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["step"]
@step.setter
def step(self, val):
self["step"] = val
@property
def stepsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `step`.
The 'stepsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stepsrc"]
@stepsrc.setter
def stepsrc(self, val):
self["stepsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the color for each cluster step.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
enabled
Determines whether clustering is enabled or disabled.
maxzoom
Sets the maximum zoom level. At zoom levels equal to or
greater than this, points will never be clustered.
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud for
`opacity`.
size
Sets the size for each cluster step.
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
step
Sets how many points it takes to create a cluster or
advance to the next cluster step. Use this in
conjunction with arrays for `size` and / or `color`. If
an integer, steps start at multiples of this number. If
an array, each step extends from the given value until
one less than the next value.
stepsrc
Sets the source reference on Chart Studio Cloud for
`step`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
enabled=None,
maxzoom=None,
opacity=None,
opacitysrc=None,
size=None,
sizesrc=None,
step=None,
stepsrc=None,
**kwargs,
):
"""
Construct a new Cluster object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattermap.Cluster`
color
Sets the color for each cluster step.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
enabled
Determines whether clustering is enabled or disabled.
maxzoom
Sets the maximum zoom level. At zoom levels equal to or
greater than this, points will never be clustered.
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud for
`opacity`.
size
Sets the size for each cluster step.
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
step
Sets how many points it takes to create a cluster or
advance to the next cluster step. Use this in
conjunction with arrays for `size` and / or `color`. If
an integer, steps start at multiples of this number. If
an array, each step extends from the given value until
one less than the next value.
stepsrc
Sets the source reference on Chart Studio Cloud for
`step`.
Returns
-------
Cluster
"""
super().__init__("cluster")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattermap.Cluster
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattermap.Cluster`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("enabled", arg, enabled)
self._set_property("maxzoom", arg, maxzoom)
self._set_property("opacity", arg, opacity)
self._set_property("opacitysrc", arg, opacitysrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("step", arg, step)
self._set_property("stepsrc", arg, stepsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Cluster |
python | getsentry__sentry | src/sentry/api/serializers/models/relay.py | {
"start": 279,
"end": 598
} | class ____(Serializer):
def serialize(self, obj, attrs, user, **kwargs):
return {
"relayId": str(obj.relay_id),
"version": str(obj.version),
"publicKey": obj.public_key,
"firstSeen": obj.first_seen,
"lastSeen": obj.last_seen,
}
| RelaySerializer |
python | jmcnamara__XlsxWriter | xlsxwriter/app.py | {
"start": 316,
"end": 6103
} | class ____(xmlwriter.XMLwriter):
"""
A class for writing the Excel XLSX App file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self) -> None:
"""
Constructor.
"""
super().__init__()
self.part_names = []
self.heading_pairs = []
self.properties = {}
self.doc_security = 0
def _add_part_name(self, part_name: str) -> None:
# Add the name of a workbook Part such as 'Sheet1' or 'Print_Titles'.
self.part_names.append(part_name)
def _add_heading_pair(self, heading_pair: Tuple[str, int]) -> None:
# Add the name of a workbook Heading Pair such as 'Worksheets',
# 'Charts' or 'Named Ranges'.
# Ignore empty pairs such as chartsheets.
if not heading_pair[1]:
return
self.heading_pairs.append(("lpstr", heading_pair[0]))
self.heading_pairs.append(("i4", heading_pair[1]))
def _set_properties(self, properties: Dict[str, str]) -> None:
# Set the document properties.
self.properties = properties
###########################################################################
#
# Private API.
#
###########################################################################
def _assemble_xml_file(self) -> None:
# Assemble and write the XML file.
# Write the XML declaration.
self._xml_declaration()
self._write_properties()
self._write_application()
self._write_doc_security()
self._write_scale_crop()
self._write_heading_pairs()
self._write_titles_of_parts()
self._write_manager()
self._write_company()
self._write_links_up_to_date()
self._write_shared_doc()
self._write_hyperlink_base()
self._write_hyperlinks_changed()
self._write_app_version()
self._xml_end_tag("Properties")
# Close the file.
self._xml_close()
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_properties(self) -> None:
# Write the <Properties> element.
schema = "http://schemas.openxmlformats.org/officeDocument/2006/"
xmlns = schema + "extended-properties"
xmlns_vt = schema + "docPropsVTypes"
attributes = [
("xmlns", xmlns),
("xmlns:vt", xmlns_vt),
]
self._xml_start_tag("Properties", attributes)
def _write_application(self) -> None:
# Write the <Application> element.
self._xml_data_element("Application", "Microsoft Excel")
def _write_doc_security(self) -> None:
# Write the <DocSecurity> element.
self._xml_data_element("DocSecurity", self.doc_security)
def _write_scale_crop(self) -> None:
# Write the <ScaleCrop> element.
self._xml_data_element("ScaleCrop", "false")
def _write_heading_pairs(self) -> None:
# Write the <HeadingPairs> element.
self._xml_start_tag("HeadingPairs")
self._write_vt_vector("variant", self.heading_pairs)
self._xml_end_tag("HeadingPairs")
def _write_titles_of_parts(self) -> None:
# Write the <TitlesOfParts> element.
parts_data = []
self._xml_start_tag("TitlesOfParts")
for part_name in self.part_names:
parts_data.append(("lpstr", part_name))
self._write_vt_vector("lpstr", parts_data)
self._xml_end_tag("TitlesOfParts")
def _write_vt_vector(
self, base_type: str, vector_data: List[Tuple[str, int]]
) -> None:
# Write the <vt:vector> element.
attributes = [
("size", len(vector_data)),
("baseType", base_type),
]
self._xml_start_tag("vt:vector", attributes)
for vt_data in vector_data:
if base_type == "variant":
self._xml_start_tag("vt:variant")
self._write_vt_data(vt_data)
if base_type == "variant":
self._xml_end_tag("vt:variant")
self._xml_end_tag("vt:vector")
def _write_vt_data(self, vt_data: Tuple[str, int]) -> None:
# Write the <vt:*> elements such as <vt:lpstr> and <vt:if>.
self._xml_data_element(f"vt:{vt_data[0]}", vt_data[1])
def _write_company(self) -> None:
company = self.properties.get("company", "")
self._xml_data_element("Company", company)
def _write_manager(self) -> None:
# Write the <Manager> element.
if "manager" not in self.properties:
return
self._xml_data_element("Manager", self.properties["manager"])
def _write_links_up_to_date(self) -> None:
# Write the <LinksUpToDate> element.
self._xml_data_element("LinksUpToDate", "false")
def _write_shared_doc(self) -> None:
# Write the <SharedDoc> element.
self._xml_data_element("SharedDoc", "false")
def _write_hyperlink_base(self) -> None:
# Write the <HyperlinkBase> element.
hyperlink_base = self.properties.get("hyperlink_base")
if hyperlink_base is None:
return
self._xml_data_element("HyperlinkBase", hyperlink_base)
def _write_hyperlinks_changed(self) -> None:
# Write the <HyperlinksChanged> element.
self._xml_data_element("HyperlinksChanged", "false")
def _write_app_version(self) -> None:
# Write the <AppVersion> element.
self._xml_data_element("AppVersion", "12.0000")
| App |
python | pytorch__pytorch | torch/_inductor/fx_passes/group_batch_fusion.py | {
"start": 48277,
"end": 48458
} | class ____(BatchMathOpsPreGradFusion):
def __init__(self, **kwargs):
super().__init__(torch.detach, **kwargs)
@register_fusion("batch_nan_to_num")
| BatchDetachPreGradFusion |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/metadata.py | {
"start": 651,
"end": 850
} | class ____(graphene.ObjectType):
path = graphene.NonNull(graphene.String)
class Meta:
interfaces = (GrapheneMetadataEntry,)
name = "PathMetadataEntry"
| GraphenePathMetadataEntry |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/core_tests/resource_tests/pythonic_resources/test_type_signatures.py | {
"start": 4221,
"end": 4633
} | class ____(ConfigurableResource):
_my_schema: str = Field(alias="schema")
reveal_type(ResourceWithAlias.__init__)
my_resource = ResourceWithAlias(schema="foo")
"""
)
pyright_out = get_pyright_reveal_type_output(filename)
# Ensure constructor signature shows schema as the alias
assert pyright_out[0] == "(self: ResourceWithAlias, *, schema: str) -> None"
| ResourceWithAlias |
python | pytest-dev__pytest | src/_pytest/config/__init__.py | {
"start": 33872,
"end": 34587
} | class ____(MutableMapping[str, Any]):
"""Compatibility proxy for the deprecated Config.inicfg."""
__slots__ = ("_config",)
def __init__(self, config: Config) -> None:
self._config = config
def __getitem__(self, key: str) -> Any:
return self._config._inicfg[key].value
def __setitem__(self, key: str, value: Any) -> None:
self._config._inicfg[key] = ConfigValue(value, origin="override", mode="toml")
def __delitem__(self, key: str) -> None:
del self._config._inicfg[key]
def __iter__(self) -> Iterator[str]:
return iter(self._config._inicfg)
def __len__(self) -> int:
return len(self._config._inicfg)
@final
| _DeprecatedInicfgProxy |
python | walkccc__LeetCode | solutions/531. Lonely Pixel I/531.py | {
"start": 0,
"end": 707
} | class ____:
def findLonelyPixel(self, picture: list[list[str]]) -> int:
m = len(picture)
n = len(picture[0])
ans = 0
rows = [0] * m # rows[i] := the number of B's in rows i
cols = [0] * n # cols[i] := the number of B's in cols i
for i in range(m):
for j in range(n):
if picture[i][j] == 'B':
rows[i] += 1
cols[j] += 1
for i in range(m):
if rows[i] == 1: # Only have to examine the rows if rows[i] == 1.
for j in range(n):
# After meeting a 'B' in this rows, break and search the next row.
if picture[i][j] == 'B':
if cols[j] == 1:
ans += 1
break
return ans
| Solution |
python | h5py__h5py | h5py/tests/test_file.py | {
"start": 16059,
"end": 18827
} | class ____(TestCase):
"""
Feature: File format compatibility bounds can be specified when
opening a file.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
# Current latest library bound label
if h5py.version.hdf5_version_tuple < (1, 11, 4):
cls.latest = 'v110'
elif h5py.version.hdf5_version_tuple < (1, 13, 0):
cls.latest = 'v112'
elif h5py.version.hdf5_version_tuple < (2, 0, 0):
cls.latest = 'v114'
else:
cls.latest = 'v200'
def test_default(self):
""" Opening with no libver arg """
f = File(self.mktemp(), 'w')
self.assertEqual(f.libver, ('earliest', self.latest))
f.close()
def test_single(self):
""" Opening with single libver arg """
f = File(self.mktemp(), 'w', libver='latest')
self.assertEqual(f.libver, (self.latest, self.latest))
f.close()
def test_single_v108(self):
""" Opening with "v108" libver arg """
f = File(self.mktemp(), 'w', libver='v108')
self.assertEqual(f.libver, ('v108', self.latest))
f.close()
def test_single_v110(self):
""" Opening with "v110" libver arg """
f = File(self.mktemp(), 'w', libver='v110')
self.assertEqual(f.libver, ('v110', self.latest))
f.close()
@ut.skipIf(h5py.version.hdf5_version_tuple < (1, 11, 4),
'Requires HDF5 1.11.4 or later')
def test_single_v112(self):
""" Opening with "v112" libver arg """
f = File(self.mktemp(), 'w', libver='v112')
self.assertEqual(f.libver, ('v112', self.latest))
f.close()
@ut.skipIf(h5py.version.hdf5_version_tuple < (1, 14, 0),
'Requires HDF5 1.14 or later')
def test_single_v114(self):
""" Opening with "v114" libver arg """
f = File(self.mktemp(), 'w', libver='v114')
self.assertEqual(f.libver, ('v114', self.latest))
f.close()
@ut.skipIf(h5py.version.hdf5_version_tuple < (2, 0, 0),
'Requires HDF5 2.0 or later')
def test_single_v200(self):
""" Opening with "v200" libver arg """
f = File(self.mktemp(), 'w', libver='v200')
self.assertEqual(f.libver, ('v200', self.latest))
f.close()
def test_multiple(self):
""" Opening with two libver args """
f = File(self.mktemp(), 'w', libver=('earliest', 'v108'))
self.assertEqual(f.libver, ('earliest', 'v108'))
f.close()
def test_none(self):
""" Omitting libver arg results in maximum compatibility """
f = File(self.mktemp(), 'w')
self.assertEqual(f.libver, ('earliest', self.latest))
f.close()
| TestNewLibver |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 573107,
"end": 573873
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for DeploymentReviewer."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("DeploymentReviewerEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("DeploymentReviewer"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| DeploymentReviewerConnection |
python | google__jax | jax/_src/pallas/fuser/fusible_dtype.py | {
"start": 2614,
"end": 2671
} | class ____:
allow_conversion: bool = False
| FusibleTyRules |
python | prabhupant__python-ds | data_structures/bst/bfs.py | {
"start": 21,
"end": 601
} | class ____():
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def bfs(root):
if not root:
return
queue = collections.deque([root])
while queue:
temp = queue.popleft()
print(temp.val)
if temp.right:
queue.append(temp.right)
if temp.left:
queue.append(temp.left)
root = Node(3)
root.right = Node(4)
root.left = Node(2)
root.left.left = Node(1)
root.left.right = Node(2.5)
root.right.left = Node(3.5)
root.right.right = Node(5)
bfs(root)
| Node |
python | huggingface__transformers | src/transformers/models/umt5/modeling_umt5.py | {
"start": 3110,
"end": 4060
} | class ____(nn.Module):
def __init__(self, config: UMT5Config):
super().__init__()
self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.act = ACT2FN[config.dense_act_fn]
def forward(self, hidden_states):
hidden_states = self.wi(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.dropout(hidden_states)
if (
isinstance(self.wo.weight, torch.Tensor)
and hidden_states.dtype != self.wo.weight.dtype
and self.wo.weight.dtype != torch.int8
):
hidden_states = hidden_states.to(self.wo.weight.dtype)
hidden_states = self.wo(hidden_states)
return hidden_states
# Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->UMT5
| UMT5DenseActDense |
python | walkccc__LeetCode | solutions/2852. Sum of Remoteness of All Cells/2852.py | {
"start": 0,
"end": 998
} | class ____:
def sumRemoteness(self, grid: list[list[int]]) -> int:
DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0))
n = len(grid)
summ = sum(max(0, cell) for row in grid for cell in row)
ans = 0
def dfs(i: int, j: int) -> tuple[int, int]:
"""
Returns the (count, componentSum) of the connected component that contains
(x, y).
"""
if i < 0 or i == len(grid) or j < 0 or j == len(grid[0]):
return (0, 0)
if grid[i][j] == -1:
return (0, 0)
count = 1
componentSum = grid[i][j]
grid[i][j] = -1 # Mark as visited.
for dx, dy in DIRS:
x = i + dx
y = j + dy
nextCount, nextComponentSum = dfs(x, y)
count += nextCount
componentSum += nextComponentSum
return (count, componentSum)
for i in range(n):
for j in range(n):
if grid[i][j] > 0:
count, componentSum = dfs(i, j)
ans += (summ - componentSum) * count
return ans
| Solution |
python | PyCQA__pylint | tests/functional/a/arguments_differ.py | {
"start": 2366,
"end": 2484
} | class ____(Staticmethod):
def func(self, data): # [arguments-differ]
super().func(data)
| StaticmethodChild2 |
python | django__django | tests/check_framework/test_model_field_deprecation.py | {
"start": 1513,
"end": 2879
} | class ____(SimpleTestCase):
def test_default_details(self):
class MyField(models.Field):
system_check_removed_details = {}
class Model(models.Model):
name = MyField()
model = Model()
self.assertEqual(
model.check(),
[
checks.Error(
msg=(
"MyField has been removed except for support in historical "
"migrations."
),
obj=Model._meta.get_field("name"),
id="fields.EXXX",
)
],
)
def test_user_specified_details(self):
class MyField(models.Field):
system_check_removed_details = {
"msg": "Support for this field is gone.",
"hint": "Use something else.",
"id": "fields.E999",
}
class Model(models.Model):
name = MyField()
model = Model()
self.assertEqual(
model.check(),
[
checks.Error(
msg="Support for this field is gone.",
hint="Use something else.",
obj=Model._meta.get_field("name"),
id="fields.E999",
)
],
)
| TestRemovedField |
python | getsentry__sentry | tests/sentry/auth/test_helper.py | {
"start": 23397,
"end": 24774
} | class ____(AuthIdentityHandlerTest):
def setUp(self) -> None:
super().setUp()
with assume_test_silo_mode(SiloMode.REGION):
member = OrganizationMember.objects.get(
organization=self.organization, user_id=self.user.id
)
self.identity_id = self.identity["id"]
self.verification_value = {
"user_id": self.user.id,
"email": self.email,
"member_id": member.id,
"identity_id": self.identity_id,
}
def test_has_verified_account_success(self) -> None:
self.create_useremail(email=self.email, user=self.user)
assert self.handler.has_verified_account(self.verification_value) is True
def test_has_verified_account_fail_email(self) -> None:
self.create_useremail(email=self.email, user=self.user)
identity = {
"id": "1234",
"email": "b@test.com",
"name": "Morty",
"data": {"foo": "bar"},
}
assert self._handler_with(identity).has_verified_account(self.verification_value) is False
def test_has_verified_account_fail_user_id(self) -> None:
wrong_user = self.create_user()
self.create_useremail(email=self.email, user=wrong_user)
assert self.handler.has_verified_account(self.verification_value) is False
| HasVerifiedAccountTest |
python | pytorch__pytorch | torch/distributed/launcher/api.py | {
"start": 5947,
"end": 13786
} | class ____:
"""
Launches an torchelastic agent on the container that invoked the entrypoint.
1. Pass the ``entrypoint`` arguments as non ``kwargs`` (e.g. no named parameters)/
``entrypoint`` can be a function or a command.
2. The return value is a map of each worker's output mapped
by their respective global rank.
Usage
::
def worker_fn(foo):
# ...
def main():
# entrypoint is a function.
outputs = elastic_launch(LaunchConfig, worker_fn)(foo)
# return rank 0's output
return outputs[0]
# entrypoint is a command and ``script.py`` is the python module.
outputs = elastic_launch(LaunchConfig, "script.py")(args)
outputs = elastic_launch(LaunchConfig, "python")("script.py")
"""
def __init__(
self,
config: LaunchConfig,
entrypoint: Callable | str | None,
):
self._config = config
self._entrypoint = entrypoint
def __call__(self, *args):
return launch_agent(self._config, self._entrypoint, list(args))
def _get_entrypoint_name(entrypoint: Callable | str | None, args: list[Any]) -> str:
"""Retrieve entrypoint name with the rule:
1. If entrypoint is a function, use ``entrypoint.__qualname__``.
2. If entrypoint is a string, check its value:
2.1 if entrypoint equals to ``sys.executable`` (like "python"), use the first element from ``args``
which does not start with hifen letter (for example, "-u" will be skipped).
2.2 otherwise, use ``entrypoint`` value.
3. Otherwise, return empty string.
"""
if isinstance(entrypoint, Callable): # type: ignore[arg-type]
return entrypoint.__name__ # type: ignore[union-attr]
elif isinstance(entrypoint, str):
if entrypoint == sys.executable:
return next((arg for arg in args if arg[0] != "-"), "")
else:
return entrypoint
else:
return ""
def _get_addr_and_port(
rdzv_parameters: RendezvousParameters,
) -> tuple[str | None, int | None]:
if rdzv_parameters.backend != "static":
return (None, None)
endpoint = rdzv_parameters.endpoint
endpoint = endpoint.strip()
if not endpoint:
raise ValueError(
"Endpoint is missing in endpoint. Try to add --master-addr and --master-port"
)
master_addr, master_port = parse_rendezvous_endpoint(endpoint, default_port=-1)
if master_port == -1:
raise ValueError(
f"port is missing in endpoint: {endpoint}. Try to specify --master-port"
)
return (master_addr, master_port)
def launch_agent(
config: LaunchConfig,
entrypoint: Callable | str | None,
args: list[Any],
) -> dict[int, Any]:
if not config.run_id:
run_id = str(uuid.uuid4().int)
logger.warning("config has no run_id, generated a random run_id: %s", run_id)
config.run_id = run_id
entrypoint_name = _get_entrypoint_name(entrypoint, args)
logger.info(
"Starting elastic_operator with launch configs:\n"
" entrypoint : %(entrypoint)s\n"
" min_nodes : %(min_nodes)s\n"
" max_nodes : %(max_nodes)s\n"
" nproc_per_node : %(nproc_per_node)s\n"
" run_id : %(run_id)s\n"
" rdzv_backend : %(rdzv_backend)s\n"
" rdzv_endpoint : %(rdzv_endpoint)s\n"
" rdzv_configs : %(rdzv_configs)s\n"
" max_restarts : %(max_restarts)s\n"
" monitor_interval : %(monitor_interval)s\n"
" log_dir : %(log_dir)s\n"
" metrics_cfg : %(metrics_cfg)s\n"
" event_log_handler : %(event_log_handler)s\n"
" numa_options : %(numa_options)s\n"
" signals_to_handle : %(signals_to_handle)s\n"
" duplicate_stdout_filters : %(duplicate_stdout_filters)s\n"
" duplicate_stderr_filters : %(duplicate_stderr_filters)s\n",
{
"entrypoint": entrypoint_name,
"min_nodes": config.min_nodes,
"max_nodes": config.max_nodes,
"nproc_per_node": config.nproc_per_node,
"run_id": config.run_id,
"rdzv_backend": config.rdzv_backend,
"rdzv_endpoint": config.rdzv_endpoint,
"rdzv_configs": config.rdzv_configs,
"max_restarts": config.max_restarts,
"monitor_interval": config.monitor_interval,
"log_dir": config.logs_specs.root_log_dir, # type: ignore[union-attr]
"metrics_cfg": config.metrics_cfg,
"event_log_handler": config.event_log_handler,
"numa_options": config.numa_options,
"signals_to_handle": config.signals_to_handle,
"duplicate_stdout_filters": config.duplicate_stdout_filters,
"duplicate_stderr_filters": config.duplicate_stderr_filters,
},
)
rdzv_parameters = RendezvousParameters(
backend=config.rdzv_backend,
endpoint=config.rdzv_endpoint,
run_id=config.run_id,
min_nodes=config.min_nodes,
max_nodes=config.max_nodes,
local_addr=config.local_addr,
**config.rdzv_configs,
)
master_addr, master_port = _get_addr_and_port(rdzv_parameters)
# Set the signals to handle in the environment variable
os.environ["TORCHELASTIC_SIGNALS_TO_HANDLE"] = config.signals_to_handle
spec = WorkerSpec(
role=config.role,
local_world_size=config.nproc_per_node,
entrypoint=entrypoint,
args=tuple(args),
rdzv_handler=rdzv_registry.get_rendezvous_handler(rdzv_parameters),
max_restarts=config.max_restarts,
monitor_interval=config.monitor_interval,
master_addr=master_addr,
master_port=master_port,
local_addr=config.local_addr,
event_log_handler=config.event_log_handler,
numa_options=config.numa_options,
duplicate_stdout_filters=config.duplicate_stdout_filters,
duplicate_stderr_filters=config.duplicate_stderr_filters,
virtual_local_rank=config.virtual_local_rank,
)
agent = LocalElasticAgent(
spec=spec,
logs_specs=config.logs_specs, # type: ignore[arg-type]
start_method=config.start_method,
log_line_prefix_template=config.log_line_prefix_template,
)
shutdown_rdzv = True
try:
metrics.initialize_metrics(metrics.MetricsConfig(config.metrics_cfg))
result = agent.run()
# records that agent.run() has succeeded NOT that workers have succeeded
events.record(agent.get_event_succeeded(), config.event_log_handler)
if result.is_failed():
# ChildFailedError is treated specially by @record
# if the error files for the failed children exist
# @record will copy the first error (root cause)
# to the error file of the launcher process.
raise ChildFailedError(
name=entrypoint_name,
failures=result.failures,
)
return result.return_values
except ChildFailedError:
raise
except SignalException:
# when the agent dies with a signal do NOT shutdown the rdzv_handler
# since this closes the rendezvous on this rdzv_id permanently and
# prevents any additional scaling events
shutdown_rdzv = False
events.record(agent.get_event_failed(), config.event_log_handler)
raise
except Exception:
events.record(agent.get_event_failed(), config.event_log_handler)
raise
finally:
if shutdown_rdzv:
spec.rdzv_handler.shutdown()
| elastic_launch |
python | pallets__werkzeug | src/werkzeug/routing/matcher.py | {
"start": 391,
"end": 774
} | class ____:
"""A representation of a rule state.
This includes the *rules* that correspond to the state and the
possible *static* and *dynamic* transitions to the next state.
"""
dynamic: list[tuple[RulePart, State]] = field(default_factory=list)
rules: list[Rule] = field(default_factory=list)
static: dict[str, State] = field(default_factory=dict)
| State |
python | matplotlib__matplotlib | galleries/examples/animation/bayes_update.py | {
"start": 567,
"end": 2113
} | class ____:
def __init__(self, ax, prob=0.5):
self.success = 0
self.prob = prob
self.line, = ax.plot([], [], 'k-')
self.x = np.linspace(0, 1, 200)
self.ax = ax
# Set up plot parameters
self.ax.set_xlim(0, 1)
self.ax.set_ylim(0, 10)
self.ax.grid(True)
# This vertical line represents the theoretical value, to
# which the plotted distribution should converge.
self.ax.axvline(prob, linestyle='--', color='black')
def start(self):
# Used for the *init_func* parameter of FuncAnimation; this is called when
# initializing the animation, and also after resizing the figure.
return self.line,
def __call__(self, i):
# This way the plot can continuously run and we just keep
# watching new realizations of the process
if i == 0:
self.success = 0
self.line.set_data([], [])
return self.line,
# Choose success based on exceed a threshold with a uniform pick
if np.random.rand() < self.prob:
self.success += 1
y = beta_pdf(self.x, self.success + 1, (i - self.success) + 1)
self.line.set_data(self.x, y)
return self.line,
# Fixing random state for reproducibility
np.random.seed(19680801)
fig, ax = plt.subplots()
ud = UpdateDist(ax, prob=0.7)
anim = FuncAnimation(fig, ud, init_func=ud.start, frames=100, interval=100, blit=True)
plt.show()
# %%
# .. tags:: component: animation, plot-type: line
| UpdateDist |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.