language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pypa__pip | src/pip/_vendor/idna/codec.py | {
"start": 188,
"end": 767
} | class ____(codecs.Codec):
def encode(self, data: str, errors: str = "strict") -> Tuple[bytes, int]:
if errors != "strict":
raise IDNAError('Unsupported error handling "{}"'.format(errors))
if not data:
return b"", 0
return encode(data), len(data)
def decode(self, data: bytes, errors: str = "strict") -> Tuple[str, int]:
if errors != "strict":
raise IDNAError('Unsupported error handling "{}"'.format(errors))
if not data:
return "", 0
return decode(data), len(data)
| Codec |
python | google__pytype | pytype/tools/xref/indexer.py | {
"start": 9361,
"end": 9556
} | class ____:
"""Representation of a function call."""
name: str
scope: str
func: str
location: source.Location
end_location: source.Location
args: list[Any]
return_type: str
| Funcall |
python | numba__numba | numba/tests/npyufunc/test_vectorize_decor.py | {
"start": 2442,
"end": 2565
} | class ____(unittest.TestCase, BaseVectorizeDecor):
target = 'cpu'
wrapper = jit(nopython=True)
| TestCPUVectorizeJitted |
python | numba__numba | numba/np/arrayobj.py | {
"start": 27779,
"end": 30828
} | class ____(Indexer):
"""
Compute indices from an array of boolean predicates.
"""
def __init__(self, context, builder, idxty, idxary):
self.context = context
self.builder = builder
self.idxty = idxty
self.idxary = idxary
assert idxty.ndim == 1
self.ll_intp = self.context.get_value_type(types.intp)
self.zero = Constant(self.ll_intp, 0)
def prepare(self):
builder = self.builder
self.size = cgutils.unpack_tuple(builder, self.idxary.shape)[0]
self.idx_index = cgutils.alloca_once(builder, self.ll_intp)
self.count = cgutils.alloca_once(builder, self.ll_intp)
self.bb_start = builder.append_basic_block()
self.bb_tail = builder.append_basic_block()
self.bb_end = builder.append_basic_block()
def get_size(self):
builder = self.builder
count = cgutils.alloca_once_value(builder, self.zero)
# Sum all true values
with cgutils.for_range(builder, self.size) as loop:
c = builder.load(count)
pred = _getitem_array_single_int(
self.context, builder, self.idxty.dtype,
self.idxty, self.idxary, loop.index
)
c = builder.add(c, builder.zext(pred, c.type))
builder.store(c, count)
return builder.load(count)
def get_shape(self):
return (self.get_size(),)
def get_index_bounds(self):
# Pessimal heuristic, as we don't want to scan for the
# first and last true items
return (self.ll_intp(0), self.size)
def loop_head(self):
builder = self.builder
# Initialize loop variable
self.builder.store(self.zero, self.idx_index)
self.builder.store(self.zero, self.count)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_start)
cur_index = builder.load(self.idx_index)
cur_count = builder.load(self.count)
with builder.if_then(builder.icmp_signed('>=', cur_index, self.size),
likely=False):
builder.branch(self.bb_end)
# Load the predicate and branch if false
pred = _getitem_array_single_int(
self.context, builder, self.idxty.dtype, self.idxty, self.idxary,
cur_index
)
with builder.if_then(builder.not_(pred)):
builder.branch(self.bb_tail)
# Increment the count for next iteration
next_count = cgutils.increment_index(builder, cur_count)
builder.store(next_count, self.count)
return cur_index, cur_count
def loop_tail(self):
builder = self.builder
builder.branch(self.bb_tail)
builder.position_at_end(self.bb_tail)
next_index = cgutils.increment_index(builder,
builder.load(self.idx_index))
builder.store(next_index, self.idx_index)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_end)
| BooleanArrayIndexer |
python | tensorflow__tensorflow | tensorflow/python/keras/metrics.py | {
"start": 100910,
"end": 101773
} | class ____(MeanMetricWrapper):
"""Computes the Poisson metric between `y_true` and `y_pred`.
`metric = y_pred - y_true * log(y_pred)`
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.Poisson()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.49999997
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.99999994
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.Poisson()])
```
"""
def __init__(self, name='poisson', dtype=None):
super(Poisson, self).__init__(poisson, name, dtype=dtype)
| Poisson |
python | openai__openai-python | tests/api_resources/beta/test_threads.py | {
"start": 16203,
"end": 32777
} | class ____:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
thread = await async_client.beta.threads.create()
assert_matches_type(Thread, thread, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
thread = await async_client.beta.threads.create(
messages=[
{
"content": "string",
"role": "user",
"attachments": [
{
"file_id": "file_id",
"tools": [{"type": "code_interpreter"}],
}
],
"metadata": {"foo": "string"},
}
],
metadata={"foo": "string"},
tool_resources={
"code_interpreter": {"file_ids": ["string"]},
"file_search": {
"vector_store_ids": ["string"],
"vector_stores": [
{
"chunking_strategy": {"type": "auto"},
"file_ids": ["string"],
"metadata": {"foo": "string"},
}
],
},
},
)
assert_matches_type(Thread, thread, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
response = await async_client.beta.threads.with_raw_response.create()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = response.parse()
assert_matches_type(Thread, thread, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
async with async_client.beta.threads.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = await response.parse()
assert_matches_type(Thread, thread, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
thread = await async_client.beta.threads.retrieve(
"thread_id",
)
assert_matches_type(Thread, thread, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
response = await async_client.beta.threads.with_raw_response.retrieve(
"thread_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = response.parse()
assert_matches_type(Thread, thread, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
async with async_client.beta.threads.with_streaming_response.retrieve(
"thread_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = await response.parse()
assert_matches_type(Thread, thread, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
await async_client.beta.threads.with_raw_response.retrieve(
"",
)
@parametrize
async def test_method_update(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
thread = await async_client.beta.threads.update(
thread_id="thread_id",
)
assert_matches_type(Thread, thread, path=["response"])
@parametrize
async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
thread = await async_client.beta.threads.update(
thread_id="thread_id",
metadata={"foo": "string"},
tool_resources={
"code_interpreter": {"file_ids": ["string"]},
"file_search": {"vector_store_ids": ["string"]},
},
)
assert_matches_type(Thread, thread, path=["response"])
@parametrize
async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
response = await async_client.beta.threads.with_raw_response.update(
thread_id="thread_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = response.parse()
assert_matches_type(Thread, thread, path=["response"])
@parametrize
async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
async with async_client.beta.threads.with_streaming_response.update(
thread_id="thread_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = await response.parse()
assert_matches_type(Thread, thread, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
await async_client.beta.threads.with_raw_response.update(
thread_id="",
)
@parametrize
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
thread = await async_client.beta.threads.delete(
"thread_id",
)
assert_matches_type(ThreadDeleted, thread, path=["response"])
@parametrize
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
response = await async_client.beta.threads.with_raw_response.delete(
"thread_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = response.parse()
assert_matches_type(ThreadDeleted, thread, path=["response"])
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
async with async_client.beta.threads.with_streaming_response.delete(
"thread_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = await response.parse()
assert_matches_type(ThreadDeleted, thread, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
await async_client.beta.threads.with_raw_response.delete(
"",
)
@parametrize
async def test_method_create_and_run_overload_1(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
thread = await async_client.beta.threads.create_and_run(
assistant_id="assistant_id",
)
assert_matches_type(Run, thread, path=["response"])
@parametrize
async def test_method_create_and_run_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
thread = await async_client.beta.threads.create_and_run(
assistant_id="assistant_id",
instructions="instructions",
max_completion_tokens=256,
max_prompt_tokens=256,
metadata={"foo": "string"},
model="string",
parallel_tool_calls=True,
response_format="auto",
stream=False,
temperature=1,
thread={
"messages": [
{
"content": "string",
"role": "user",
"attachments": [
{
"file_id": "file_id",
"tools": [{"type": "code_interpreter"}],
}
],
"metadata": {"foo": "string"},
}
],
"metadata": {"foo": "string"},
"tool_resources": {
"code_interpreter": {"file_ids": ["string"]},
"file_search": {
"vector_store_ids": ["string"],
"vector_stores": [
{
"chunking_strategy": {"type": "auto"},
"file_ids": ["string"],
"metadata": {"foo": "string"},
}
],
},
},
},
tool_choice="none",
tool_resources={
"code_interpreter": {"file_ids": ["string"]},
"file_search": {"vector_store_ids": ["string"]},
},
tools=[{"type": "code_interpreter"}],
top_p=1,
truncation_strategy={
"type": "auto",
"last_messages": 1,
},
)
assert_matches_type(Run, thread, path=["response"])
@parametrize
async def test_raw_response_create_and_run_overload_1(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
response = await async_client.beta.threads.with_raw_response.create_and_run(
assistant_id="assistant_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = response.parse()
assert_matches_type(Run, thread, path=["response"])
@parametrize
async def test_streaming_response_create_and_run_overload_1(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
async with async_client.beta.threads.with_streaming_response.create_and_run(
assistant_id="assistant_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = await response.parse()
assert_matches_type(Run, thread, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_create_and_run_overload_2(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
thread_stream = await async_client.beta.threads.create_and_run(
assistant_id="assistant_id",
stream=True,
)
await thread_stream.response.aclose()
@parametrize
async def test_method_create_and_run_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
thread_stream = await async_client.beta.threads.create_and_run(
assistant_id="assistant_id",
stream=True,
instructions="instructions",
max_completion_tokens=256,
max_prompt_tokens=256,
metadata={"foo": "string"},
model="string",
parallel_tool_calls=True,
response_format="auto",
temperature=1,
thread={
"messages": [
{
"content": "string",
"role": "user",
"attachments": [
{
"file_id": "file_id",
"tools": [{"type": "code_interpreter"}],
}
],
"metadata": {"foo": "string"},
}
],
"metadata": {"foo": "string"},
"tool_resources": {
"code_interpreter": {"file_ids": ["string"]},
"file_search": {
"vector_store_ids": ["string"],
"vector_stores": [
{
"chunking_strategy": {"type": "auto"},
"file_ids": ["string"],
"metadata": {"foo": "string"},
}
],
},
},
},
tool_choice="none",
tool_resources={
"code_interpreter": {"file_ids": ["string"]},
"file_search": {"vector_store_ids": ["string"]},
},
tools=[{"type": "code_interpreter"}],
top_p=1,
truncation_strategy={
"type": "auto",
"last_messages": 1,
},
)
await thread_stream.response.aclose()
@parametrize
async def test_raw_response_create_and_run_overload_2(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
response = await async_client.beta.threads.with_raw_response.create_and_run(
assistant_id="assistant_id",
stream=True,
)
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = response.parse()
await stream.close()
@parametrize
async def test_streaming_response_create_and_run_overload_2(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
async with async_client.beta.threads.with_streaming_response.create_and_run(
assistant_id="assistant_id",
stream=True,
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = await response.parse()
await stream.close()
assert cast(Any, response.is_closed) is True
| TestAsyncThreads |
python | huggingface__transformers | src/transformers/models/swinv2/modeling_swinv2.py | {
"start": 17684,
"end": 24696
} | class ____(nn.Module):
def __init__(self, config, dim, num_heads, window_size, pretrained_window_size=[0, 0]):
super().__init__()
if dim % num_heads != 0:
raise ValueError(
f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})"
)
self.num_attention_heads = num_heads
self.attention_head_size = int(dim / num_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.window_size = (
window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size)
)
self.pretrained_window_size = pretrained_window_size
self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))
# mlp to generate continuous relative position bias
self.continuous_position_bias_mlp = nn.Sequential(
nn.Linear(2, 512, bias=True), nn.ReLU(inplace=True), nn.Linear(512, num_heads, bias=False)
)
# get relative_coords_table
relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.int64).float()
relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.int64).float()
relative_coords_table = (
torch.stack(meshgrid([relative_coords_h, relative_coords_w], indexing="ij"))
.permute(1, 2, 0)
.contiguous()
.unsqueeze(0)
) # [1, 2*window_height - 1, 2*window_width - 1, 2]
if pretrained_window_size[0] > 0:
relative_coords_table[:, :, :, 0] /= pretrained_window_size[0] - 1
relative_coords_table[:, :, :, 1] /= pretrained_window_size[1] - 1
elif window_size > 1:
relative_coords_table[:, :, :, 0] /= self.window_size[0] - 1
relative_coords_table[:, :, :, 1] /= self.window_size[1] - 1
relative_coords_table *= 8 # normalize to -8, 8
relative_coords_table = (
torch.sign(relative_coords_table) * torch.log2(torch.abs(relative_coords_table) + 1.0) / math.log2(8)
)
# set to same dtype as mlp weight
relative_coords_table = relative_coords_table.to(next(self.continuous_position_bias_mlp.parameters()).dtype)
self.register_buffer("relative_coords_table", relative_coords_table, persistent=False)
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij"))
coords_flatten = torch.flatten(coords, 1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1)
self.register_buffer("relative_position_index", relative_position_index, persistent=False)
self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=False)
self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
batch_size, dim, num_channels = hidden_states.shape
query_layer = (
self.query(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
key_layer = (
self.key(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
value_layer = (
self.value(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
# cosine attention
attention_scores = nn.functional.normalize(query_layer, dim=-1) @ nn.functional.normalize(
key_layer, dim=-1
).transpose(-2, -1)
logit_scale = torch.clamp(self.logit_scale, max=math.log(1.0 / 0.01)).exp()
attention_scores = attention_scores * logit_scale
relative_position_bias_table = self.continuous_position_bias_mlp(self.relative_coords_table).view(
-1, self.num_attention_heads
)
# [window_height*window_width,window_height*window_width,num_attention_heads]
relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
)
# [num_attention_heads,window_height*window_width,window_height*window_width]
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
relative_position_bias = 16 * torch.sigmoid(relative_position_bias)
attention_scores = attention_scores + relative_position_bias.unsqueeze(0)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in Swinv2Model forward() function)
mask_shape = attention_mask.shape[0]
attention_scores = attention_scores.view(
batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim
) + attention_mask.unsqueeze(1).unsqueeze(0)
attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0)
attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim)
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
# Copied from transformers.models.swin.modeling_swin.SwinSelfOutput with Swin->Swinv2
| Swinv2SelfAttention |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 35156,
"end": 36390
} | class ____(Loops):
def make_loader(self) -> Callable[[Sequence[Expr]], OpsValue]:
# Make zero-element loops into a no-op
if self.is_zero_elements():
return partial(nop_loader_fn, dtype=self.dtype)
return self.inner_fn
def __str__(self) -> str:
return self._to_str(("ranges",))
__repr__ = __str__
def get_reduction_size(self) -> Sequence[sympy.Expr]:
return []
def get_reduction_type(self) -> Optional[str]:
return None
def store_output(
self,
output_name: Optional[str],
indexer: Callable[[Sequence[Expr]], Never],
vars: Sequence[Expr],
) -> None:
loader = self.make_loader()
return ops.store(output_name or "unnamed", indexer(vars), loader(vars))
def constant_to_device(self, device: torch.device) -> IRNode:
"""Move this to a given device. Requires that all reads are to constants."""
loader = self.make_loader()
loader = patch.object(ConstantBuffer, "override_device", device)(loader)
return Pointwise(
device=device,
dtype=self.dtype,
inner_fn=loader,
ranges=self.ranges,
)
@ir_dataclass
| Pointwise |
python | keras-team__keras | keras/src/layers/regularization/alpha_dropout_test.py | {
"start": 125,
"end": 2212
} | class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_alpha_dropout_basics(self):
self.run_layer_test(
layers.AlphaDropout,
init_kwargs={
"rate": 0.2,
},
input_shape=(2, 3),
call_kwargs={"training": True},
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_alpha_dropout_correctness(self):
inputs = np.ones((20, 500)).astype("float32")
layer = layers.AlphaDropout(0.3, seed=1337)
outputs = layer(inputs, training=True)
self.assertAllClose(
np.std(backend.convert_to_numpy(outputs)), 1.0, atol=1e-1
)
def test_alpha_dropout_partial_noise_shape_dynamic(self):
inputs = np.ones((20, 5, 10))
layer = layers.AlphaDropout(0.5, noise_shape=(None, 1, None))
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_alpha_dropout_partial_noise_shape_static(self):
inputs = np.ones((20, 5, 10))
layer = layers.AlphaDropout(0.5, noise_shape=(20, 1, 10))
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_alpha_dropout_negative_rate(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `rate`. "
"Expected a float value between 0 and 1.",
):
_ = layers.AlphaDropout(rate=-0.5)
def test_alpha_dropout_rate_greater_than_one(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `rate`. "
"Expected a float value between 0 and 1.",
):
_ = layers.AlphaDropout(rate=1.5)
| AlphaDropoutTest |
python | run-llama__llama_index | llama-index-experimental/llama_index/experimental/query_engine/pandas/pandas_query_engine.py | {
"start": 1911,
"end": 9355
} | class ____(BaseQueryEngine):
"""
Pandas query engine.
Convert natural language to Pandas python code.
WARNING: This tool provides the Agent access to the `eval` function.
Arbitrary code execution is possible on the machine running this tool.
This tool is not recommended to be used in a production setting, and would
require heavy sandboxing or virtual machines
Args:
df (pd.DataFrame): Pandas dataframe to use.
instruction_str (Optional[str]): Instruction string to use.
instruction_parser (Optional[PandasInstructionParser]): The output parser
that takes the pandas query output string and returns a string.
It defaults to PandasInstructionParser and takes pandas DataFrame,
and any output kwargs as parameters.
eg.kwargs["max_colwidth"] = [int] is used to set the length of text
that each column can display during str(df). Set it to a higher number
if there is possibly long text in the dataframe.
pandas_prompt (Optional[BasePromptTemplate]): Pandas prompt to use.
output_kwargs (dict): Additional output processor kwargs for the
PandasInstructionParser.
head (int): Number of rows to show in the table context.
verbose (bool): Whether to print verbose output.
llm (Optional[LLM]): Language model to use.
synthesize_response (bool): Whether to synthesize a response from the
query results. Defaults to False.
response_synthesis_prompt (Optional[BasePromptTemplate]): A
Response Synthesis BasePromptTemplate to use for the query. Defaults to
DEFAULT_RESPONSE_SYNTHESIS_PROMPT.
Examples:
`pip install llama-index-experimental`
```python
import pandas as pd
from llama_index.experimental.query_engine.pandas import PandasQueryEngine
df = pd.DataFrame(
{
"city": ["Toronto", "Tokyo", "Berlin"],
"population": [2930000, 13960000, 3645000]
}
)
query_engine = PandasQueryEngine(df=df, verbose=True)
response = query_engine.query("What is the population of Tokyo?")
```
"""
def __init__(
self,
df: pd.DataFrame,
instruction_str: Optional[str] = None,
instruction_parser: Optional[PandasInstructionParser] = None,
pandas_prompt: Optional[BasePromptTemplate] = None,
output_kwargs: Optional[dict] = None,
head: int = 5,
verbose: bool = False,
llm: Optional[LLM] = None,
synthesize_response: bool = False,
response_synthesis_prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
self._df = df
self._head = head
self._pandas_prompt = pandas_prompt or DEFAULT_PANDAS_PROMPT
self._instruction_str = instruction_str or DEFAULT_INSTRUCTION_STR
self._instruction_parser = instruction_parser or PandasInstructionParser(
df, output_kwargs or {}
)
self._verbose = verbose
self._llm = llm or Settings.llm
self._synthesize_response = synthesize_response
self._response_synthesis_prompt = (
response_synthesis_prompt or DEFAULT_RESPONSE_SYNTHESIS_PROMPT
)
super().__init__(callback_manager=Settings.callback_manager)
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
return {}
def _get_prompts(self) -> Dict[str, Any]:
"""Get prompts."""
return {
"pandas_prompt": self._pandas_prompt,
"response_synthesis_prompt": self._response_synthesis_prompt,
}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
if "pandas_prompt" in prompts:
self._pandas_prompt = prompts["pandas_prompt"]
if "response_synthesis_prompt" in prompts:
self._response_synthesis_prompt = prompts["response_synthesis_prompt"]
@classmethod
def from_index(cls, index: PandasIndex, **kwargs: Any) -> "PandasQueryEngine":
logger.warning(
"PandasIndex is deprecated. "
"Directly construct PandasQueryEngine with df instead."
)
return cls(df=index.df, **kwargs)
def _get_table_context(self) -> str:
"""Get table context."""
pd.set_option("display.max_colwidth", None)
pd.set_option("display.max_columns", None)
# since head() is only used.
pd.set_option("display.max_rows", self._head)
pd.set_option("display.width", None)
return str(self._df.head(self._head))
def _query(self, query_bundle: QueryBundle) -> Response:
"""Answer a query."""
context = self._get_table_context()
pandas_response_str = self._llm.predict(
self._pandas_prompt,
df_str=context,
query_str=query_bundle.query_str,
instruction_str=self._instruction_str,
)
if self._verbose:
print_text(f"> Pandas Instructions:\n```\n{pandas_response_str}\n```\n")
pandas_output = self._instruction_parser.parse(pandas_response_str)
if self._verbose:
print_text(f"> Pandas Output: {pandas_output}\n")
response_metadata = {
"pandas_instruction_str": pandas_response_str,
"raw_pandas_output": pandas_output,
}
if self._synthesize_response:
response_str = str(
self._llm.predict(
self._response_synthesis_prompt,
query_str=query_bundle.query_str,
pandas_instructions=pandas_response_str,
pandas_output=pandas_output,
)
)
else:
response_str = str(pandas_output)
return Response(response=response_str, metadata=response_metadata)
async def _aquery(self, query_bundle: QueryBundle) -> Response:
"""Answer a query asynchronously."""
context = self._get_table_context()
pandas_response_str = await self._llm.apredict(
self._pandas_prompt,
df_str=context,
query_str=query_bundle.query_str,
instruction_str=self._instruction_str,
)
if self._verbose:
print_text(f"> Pandas Instructions:\n```\n{pandas_response_str}\n```\n")
pandas_output = self._instruction_parser.parse(pandas_response_str)
if self._verbose:
print_text(f"> Pandas Output: {pandas_output}\n")
response_metadata = {
"pandas_instruction_str": pandas_response_str,
"raw_pandas_output": pandas_output,
}
if self._synthesize_response:
response_str = str(
await self._llm.apredict(
self._response_synthesis_prompt,
query_str=query_bundle.query_str,
pandas_instructions=pandas_response_str,
pandas_output=pandas_output,
)
)
else:
response_str = str(pandas_output)
return Response(response=response_str, metadata=response_metadata)
# legacy
NLPandasQueryEngine = PandasQueryEngine
GPTNLPandasQueryEngine = PandasQueryEngine
| PandasQueryEngine |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/env_vars.py | {
"start": 1885,
"end": 2061
} | class ____(graphene.Union):
class Meta:
types = (GrapheneLocationDocsJson, GraphenePythonError)
name = "LocationDocsJsonOrError"
| GrapheneLocationDocsJsonOrError |
python | pandas-dev__pandas | asv_bench/benchmarks/io/csv.py | {
"start": 7798,
"end": 8237
} | class ____(StringIORewind):
iso8601 = "%Y-%m-%d %H:%M:%S"
def setup(self):
rng = date_range("1/1/2000", periods=50000, freq="s")
self.StringIO_input = StringIO("\n".join(rng.strftime(self.iso8601).tolist()))
def time_read_csv(self):
read_csv(
self.data(self.StringIO_input),
header=None,
names=["foo"],
parse_dates=["foo"],
)
| ReadCSVConcatDatetime |
python | google__jax | jax/_src/lax/lax.py | {
"start": 17091,
"end": 72719
} | class ____(enum.Enum):
HIGHEST = 1
DEFAULT = 2
@export
def exp(x: ArrayLike, accuracy=None) -> Array:
r"""Elementwise exponential: :math:`e^x`.
This function lowers directly to the `stablehlo.exponential`_ operation.
Args:
x: input array. Must have floating-point or complex type.
accuracy: Optional `lax.Tolerance` or `lax.AccuracyMode` object that
selects the implementation of the op based on the requested accuracy. If
the implementation cannot satisfy the requested tolerance, the
compiler will return an error. If mode is specified and there are no
multiple implementations available, the default implementation will be
used.
Returns:
Array of the same shape and dtype as ``x`` containing the element-wise
exponential.
See also:
- :func:`jax.lax.exp2`: elementwise base-2 exponentional: :math:`2^x`.
- :func:`jax.lax.log`: elementwise natural logarithm: :math:`\mathrm{log}(x)`.
.. _stablehlo.exponential: https://openxla.org/stablehlo/spec#exponential
"""
return exp_p.bind(x, accuracy=accuracy)
def exp2(x: ArrayLike, accuracy=None) -> Array:
r"""Elementwise base-2 exponential: :math:`2^x`.
This function is implemented in terms of the `stablehlo.exponential`_
and `stablehlo.multiply`_ operations.
Args:
x: input array. Must have floating-point or complex type.
accuracy: Optional `lax.Tolerance` or `lax.AccuracyMode` object that
selects the implementation of the op based on the requested accuracy. If
the implementation cannot satisfy the requested tolerance, the
compiler will return an error. If mode is specified and there are no
multiple implementations available, the default implementation will be
used.
Returns:
Array of the same shape and dtype as ``x`` containing the element-wise
base-2 exponential.
See also:
- :func:`jax.lax.exp`: elementwise exponentional: :math:`e^x`.
- :func:`jax.lax.log`: elementwise natural logarithm: :math:`\mathrm{log}(x)`.
.. _stablehlo.exponential: https://openxla.org/stablehlo/spec#exponential
.. _stablehlo.multiply: https://openxla.org/stablehlo/spec#multiply
"""
return exp2_p.bind(x, accuracy=accuracy)
@export
def expm1(x: ArrayLike, accuracy=None) -> Array:
r"""Elementwise :math:`e^{x} - 1`.
This function lowers directly to the `stablehlo.exponential_minus_one`_
operation. Compared to the naive expression ``lax.exp(x) - 1``, it is
more accurate for ``x`` near zero.
Args:
x: input array. Must have floating-point or complex type.
accuracy: Optional `lax.Tolerance` or `lax.AccuracyMode` object that
selects the implementation of the op based on the requested accuracy. If
the implementation cannot satisfy the requested tolerance, the
compiler will return an error. If mode is specified and there are no
multiple implementations available, the default implementation will be
used.
Returns:
Array of the same shape and dtype as ``x`` containing the element-wise
exponential minus 1.
See also:
- :func:`jax.lax.exp`: elementwise exponentional: :math:`e^x`.
- :func:`jax.lax.log1p`: elementwise :math:`\mathrm{log}(1 + x)`.
.. _stablehlo.exponential_minus_one: https://openxla.org/stablehlo/spec#exponential_minus_one
"""
return expm1_p.bind(x, accuracy=accuracy)
@export
def log(x: ArrayLike, accuracy=None) -> Array:
r"""Elementwise natural logarithm: :math:`\mathrm{log}(x)`.
This function lowers directly to the `stablehlo.log`_ operation.
Args:
x: input array. Must have floating-point or complex type.
accuracy: Optional `lax.Tolerance` or `lax.AccuracyMode` object that
selects the implementation of the op based on the requested accuracy. If
the implementation cannot satisfy the requested tolerance, the
compiler will return an error. If mode is specified and there are no
multiple implementations available, the default implementation will be
used.
Returns:
Array of the same shape and dtype as ``x`` containing the element-wise
natural logarithm.
See also:
- :func:`jax.lax.exp`: elementwise exponentional: :math:`e^x`.
.. _stablehlo.log: https://openxla.org/stablehlo/spec#log
"""
return log_p.bind(x, accuracy=accuracy)
@export
def log1p(x: ArrayLike, accuracy=None) -> Array:
r"""Elementwise :math:`\mathrm{log}(1 + x)`.
This function lowers directly to the `stablehlo.log_plus_one`_ operation.
Compared to the naive expression ``lax.log(1 + x)``, it is more accurate
for ``x`` near zero.
Args:
x: input array. Must have floating-point or complex type.
accuracy: Optional `lax.Tolerance` or `lax.AccuracyMode` object that
selects the implementation of the op based on the requested accuracy. If
the implementation cannot satisfy the requested tolerance, the
compiler will return an error. If mode is specified and there are no
multiple implementations available, the default implementation will be
used.
Returns:
Array of the same shape and dtype as ``x`` containing the element-wise
natural logarithm of ``x + 1``.
See also:
- :func:`jax.lax.expm1`: elementwise :math:`e^x - 1`.
- :func:`jax.lax.log`: elementwise natural logarithm :math:`\mathrm{log}(x)`.
.. _stablehlo.log_plus_one: https://openxla.org/stablehlo/spec#log_plus_one
"""
return log1p_p.bind(x, accuracy=accuracy)
@export
def tanh(x: ArrayLike, accuracy=None) -> Array:
r"""Elementwise hyperbolic tangent: :math:`\mathrm{tanh}(x)`.
This function lowers directly to the `stablehlo.tanh`_ operation.
Args:
x: input array. Must have floating-point or complex type.
accuracy: Optional `lax.Tolerance` or `lax.AccuracyMode` object that
selects the implementation of the op based on the requested accuracy. If
the implementation cannot satisfy the requested tolerance, the
compiler will return an error. If mode is specified and there are no
multiple implementations available, the default implementation will be
used.
Returns:
Array of the same shape and dtype as ``x`` containing the element-wise
hyperbolic tangent.
See also:
- :func:`jax.lax.atanh`: elementwise inverse hyperbolic tangent.
- :func:`jax.lax.cosh`: elementwise hyperbolic cosine.
- :func:`jax.lax.sinh`: elementwise hyperbolic sine.
.. _stablehlo.tanh: https://openxla.org/stablehlo/spec#tanh
"""
return tanh_p.bind(x, accuracy=accuracy)
@export
def logistic(x: ArrayLike, accuracy=None) -> Array:
r"""Elementwise logistic (sigmoid) function: :math:`\frac{1}{1 + e^{-x}}`.
There is no HLO logistic/sigmoid primitive, so this lowers to a sequence
of HLO arithmetic operations.
Args:
x: input array. Must have floating point or complex dtype.
Returns:
Array of the same shape and dtype as ``x`` containing the element-wise
logistic/sigmoid function.
See also:
- :func:`jax.nn.sigmoid`: an alternative API for this functionality.
"""
return logistic_p.bind(x, accuracy=accuracy)
@export
def sin(x: ArrayLike, accuracy=None) -> Array:
r"""Elementwise sine: :math:`\mathrm{sin}(x)`.
For floating-point inputs, this function lowers directly to the
`stablehlo.sine`_ operation. For complex inputs, it lowers to a
sequence of HLO operations implementing the complex sine.
Args:
x: input array. Must have floating-point or complex type.
accuracy: Optional `lax.Tolerance` or `lax.AccuracyMode` object that
selects the implementation of the op based on the requested accuracy. If
the implementation cannot satisfy the requested tolerance, the
compiler will return an error. If mode is specified and there are no
multiple implementations available, the default implementation will be
used.
Returns:
Array of the same shape and dtype as ``x`` containing the element-wise
sine.
See also:
- :func:`jax.lax.cos`: elementwise cosine.
- :func:`jax.lax.tan`: elementwise tangent.
- :func:`jax.lax.asin`: elementwise arc sine.
.. _stablehlo.sine: https://openxla.org/stablehlo/spec#sine
"""
return sin_p.bind(x, accuracy=accuracy)
@export
def cos(x: ArrayLike, accuracy=None) -> Array:
r"""Elementwise cosine: :math:`\mathrm{cos}(x)`.
For floating-point inputs, this function lowers directly to the
`stablehlo.cosine`_ operation. For complex inputs, it lowers to a
sequence of HLO operations implementing the complex cosine.
Args:
x: input array. Must have floating-point or complex type.
accuracy: Optional `lax.Tolerance` or `lax.AccuracyMode` object that
selects the implementation of the op based on the requested accuracy. If
the implementation cannot satisfy the requested tolerance, the
compiler will return an error. If mode is specified and there are no
multiple implementations available, the default implementation will be
used.
Returns:
Array of the same shape and dtype as ``x`` containing the element-wise
cosine.
See also:
- :func:`jax.lax.sin`: elementwise sine.
- :func:`jax.lax.tan`: elementwise tangent.
- :func:`jax.lax.acos`: elementwise arc cosine.
.. _stablehlo.cosine: https://openxla.org/stablehlo/spec#cosine
"""
return cos_p.bind(x, accuracy=accuracy)
@export
def atan2(x: ArrayLike, y: ArrayLike) -> Array:
r"""Elementwise two-term arc tangent: :math:`\mathrm{atan}({x \over y})`.
This function lowers directly to the `stablehlo.atan2`_ operation.
Args:
x, y: input arrays. Must have a matching floating-point or complex dtypes. If
neither is a scalar, the two arrays must have the same number of dimensions
and be broadcast-compatible.
Returns:
Array of the same shape and dtype as ``x`` and ``y`` containing the element-wise
arc tangent of :math:`x \over y`, respecting the quadrant indicated by the sign
of each input.
See also:
- :func:`jax.lax.tan`: elementwise tangent.
- :func:`jax.lax.atan`: elementwise one-term arc tangent.
.. _stablehlo.atan2: https://openxla.org/stablehlo/spec#atan2
"""
x, y = core.standard_insert_pvary(x, y)
return atan2_p.bind(x, y)
@export
def real(x: ArrayLike) -> Array:
r"""Elementwise extract real part: :math:`\mathrm{Re}(x)`.
This function lowers directly to the `stablehlo.real`_ operation.
Args:
x: input array. Must have complex dtype.
Returns:
Array of the same shape as ``x`` containing its real part. Will have dtype
float32 if ``x.dtype == complex64``, or float64 if ``x.dtype == complex128``.
See also:
- :func:`jax.lax.complex`: elementwise construct complex number.
- :func:`jax.lax.imag`: elementwise extract imaginary part.
- :func:`jax.lax.conj`: elementwise complex conjugate.
.. _stablehlo.real: https://openxla.org/stablehlo/spec#real
"""
return real_p.bind(x)
@export
def imag(x: ArrayLike) -> Array:
r"""Elementwise extract imaginary part: :math:`\mathrm{Im}(x)`.
This function lowers directly to the `stablehlo.imag`_ operation.
Args:
x: input array. Must have complex dtype.
Returns:
Array of the same shape as ``x`` containing its imaginary part. Will have dtype
float32 if ``x.dtype == complex64``, or float64 if ``x.dtype == complex128``.
See also:
- :func:`jax.lax.complex`: elementwise construct complex number.
- :func:`jax.lax.real`: elementwise extract real part.
- :func:`jax.lax.conj`: elementwise complex conjugate.
.. _stablehlo.imag: https://openxla.org/stablehlo/spec#imag
"""
return imag_p.bind(x)
@export
def complex(x: ArrayLike, y: ArrayLike) -> Array:
r"""Elementwise make complex number: :math:`x + jy`.
This function lowers directly to the `stablehlo.complex`_ operation.
Args:
x, y: input arrays. Must have matching floating-point dtypes. If
neither is a scalar, the two arrays must have the same number
of dimensions and be broadcast-compatible.
Returns:
The complex array with the real part given by ``x``, and the imaginary
part given by ``y``. For inputs of dtype float32 or float64, the result
will have dtype complex64 or complex128 respectively.
See also:
- :func:`jax.lax.real`: elementwise extract real part.
- :func:`jax.lax.imag`: elementwise extract imaginary part.
- :func:`jax.lax.conj`: elementwise complex conjugate.
.. _stablehlo.complex: https://openxla.org/stablehlo/spec#complex
"""
x, y = core.standard_insert_pvary(x, y)
return complex_p.bind(x, y)
@export
def conj(x: ArrayLike) -> Array:
r"""Elementwise complex conjugate function: :math:`\overline{x}`.
This function lowers to a combination of `stablehlo.real`_, `stablehlo.imag`_,
and `stablehlo.complex`_.
Args:
x: input array. Must have complex dtype.
Returns:
Array of the same shape and dtype as ``x`` containing its complex conjugate.
See also:
- :func:`jax.lax.complex`: elementwise construct complex number.
- :func:`jax.lax.real`: elementwise extract real part.
- :func:`jax.lax.imag`: elementwise extract imaginary part.
- :func:`jax.lax.abs`: elementwise absolute value / complex magnitude.
.. _stablehlo.real: https://openxla.org/stablehlo/spec#real
.. _stablehlo.imag: https://openxla.org/stablehlo/spec#imag
.. _stablehlo.complex: https://openxla.org/stablehlo/spec#complex
"""
# TODO(mattjj): remove input_dtype, not needed anymore
return conj_p.bind(x, input_dtype=_dtype(x))
@export
def abs(x: ArrayLike) -> Array:
r"""Elementwise absolute value: :math:`|x|`.
This function lowers directly to the `stablehlo.abs`_ operation.
Args:
x: Input array. Must have signed integer, floating, or complex dtype.
Returns:
An array of the same shape as ``x`` containing the elementwise absolute value.
For complex valued input, :math:`a + ib`, ``abs(x)`` returns :math:`\sqrt{a^2+b^2}`.
See also:
- :func:`jax.numpy.abs`: a more flexible NumPy-style ``abs`` implementation.
.. _stablehlo.abs: https://openxla.org/stablehlo/spec#abs
"""
return abs_p.bind(x)
@export
def pow(x: ArrayLike, y: ArrayLike) -> Array:
r"""Elementwise power: :math:`x^y`.
This function lowers directly to the `stablehlo.pow`_ operation, along with
a `stablehlo.convert`_ when the argument dtypes do not match.
Args:
x: Input array giving the base value. Must have floating or complex type.
y: Input array giving the exponent value. Must have integer, floating, or
complex type. Its dtype will be cast to that of ``x.dtype`` if necessary.
If neither ``x`` nor ``y`` is a scalar, then ``x`` and ``y`` must have
the same number of dimensions and be broadcast-compatible.
Returns:
An array of the same dtype as ``x`` containing the elementwise power.
See also:
:func:`jax.lax.integer_pow`: Elementwise power where ``y`` is a static integer.
.. _stablehlo.convert: https://openxla.org/stablehlo/spec#convert
.. _stablehlo.pow: https://openxla.org/stablehlo/spec#pow
"""
x, y = core.standard_insert_pvary(x, y)
return pow_p.bind(x, y)
@export
def integer_pow(x: ArrayLike, y: int) -> Array:
r"""Elementwise power: :math:`x^y`, where :math:`y` is a static integer.
This will lower to a sequence of :math:`O[\log_2(y)]` repetitions of
`stablehlo.multiply`_.
Args:
x: Input array giving the base value. Must have numerical dtype.
y: Static scalar integer giving the exponent.
Returns:
An array of the same shape and dtype as ``x`` containing the elementwise power.
See also:
:func:`jax.lax.pow`: Elementwise power where ``y`` is an array.
.. _stablehlo.multiply: https://openxla.org/stablehlo/spec#multiply
"""
return integer_pow_p.bind(x, y=y)
@export
def sqrt(x: ArrayLike, accuracy=None) -> Array:
r"""Elementwise square root: :math:`\sqrt{x}`.
This function lowers directly to the `stablehlo.sqrt`_ operation.
Args:
x: Input array. Must have floating or complex dtype.
accuracy: Optional `lax.Tolerance` or `lax.AccuracyMode` object that
selects the implementation of the op based on the requested accuracy. If
the implementation cannot satisfy the requested tolerance, the
compiler will return an error. If mode is specified and there are no
multiple implementations available, the default implementation will be
used.
Returns:
An array of the same shape and dtype as ``x`` containing the square root.
See also:
:func:`jax.lax.pow`: Elementwise power.
:func:`jax.lax.cbrt`: Elementwise cube root.
:func:`jax.lax.rsqrt`: Elementwise reciporical square root.
.. _stablehlo.sqrt: https://openxla.org/stablehlo/spec#sqrt
"""
return sqrt_p.bind(x, accuracy=accuracy)
@export
def rsqrt(x: ArrayLike, accuracy=None) -> Array:
r"""Elementwise reciprocal square root: :math:`1 \over \sqrt{x}`.
This function lowers directly to the `stablehlo.rsqrt`_ operation.
Args:
x: Input array. Must have floating or complex dtype.
accuracy: Optional `lax.Tolerance` or `lax.AccuracyMode` object that
selects the implementation of the op based on the requested accuracy. If
the implementation cannot satisfy the requested tolerance, the
compiler will return an error. If mode is specified and there are no
multiple implementations available, the default implementation will be
used.
Returns:
An array of the same shape and dtype as ``x`` containing the
reciporical square root.
See also:
:func:`jax.lax.pow`: Elementwise power.
:func:`jax.lax.sqrt`: Elementwise square root.
:func:`jax.lax.cbrt`: Elementwise cube root.
.. _stablehlo.rsqrt: https://openxla.org/stablehlo/spec#rsqrt
"""
return rsqrt_p.bind(x, accuracy=accuracy)
@export
def cbrt(x: ArrayLike, accuracy=None) -> Array:
r"""Elementwise cube root: :math:`\sqrt[3]{x}`.
This function lowers directly to the `stablehlo.cbrt`_ operation.
Args:
x: Input array. Must have floating or complex dtype.
accuracy: Optional `lax.Tolerance` or `lax.AccuracyMode` object that
selects the implementation of the op based on the requested accuracy. If
the implementation cannot satisfy the requested tolerance, the
compiler will return an error. If mode is specified and there are no
multiple implementations available, the default implementation will be
used.
Returns:
An array of the same shape and dtype as ``x`` containing the cube root.
See also:
:func:`jax.lax.pow`: Elementwise power.
:func:`jax.lax.sqrt`: Elementwise square root.
:func:`jax.lax.rsqrt`: Elementwise reciporical square root.
.. _stablehlo.cbrt: https://openxla.org/stablehlo/spec#cbrt
"""
return cbrt_p.bind(x, accuracy=accuracy)
@export
def bitwise_not(x: ArrayLike) -> Array:
r"""Elementwise NOT: :math:`\neg x`.
This function lowers directly to the `stablehlo.not`_ operation.
Args:
x: Input array. Must have boolean or integer dtype.
Returns:
An array of the same shape and dtype as ``x`` containing the bitwise
inversion of each entry.
See also:
- :func:`jax.numpy.invert`: NumPy wrapper for this API, also accessible
via the ``~x`` operator on JAX arrays.
- :func:`jax.lax.bitwise_and`: Elementwise AND.
- :func:`jax.lax.bitwise_or`: Elementwise OR.
- :func:`jax.lax.bitwise_xor`: Elementwise exclusive OR.
.. _stablehlo.not: https://openxla.org/stablehlo/spec#not
"""
return not_p.bind(x)
@export
def bitwise_and(x: ArrayLike, y: ArrayLike) -> Array:
r"""Elementwise AND: :math:`x \wedge y`.
This function lowers directly to the `stablehlo.and`_ operation.
Args:
x, y: Input arrays. Must have matching boolean or integer dtypes.
If neither is a scalar, ``x`` and ``y`` must have the same number
of dimensions and be broadcast compatible.
Returns:
An array of the same dtype as ``x`` and ``y`` containing the bitwise
AND of each pair of broadcasted entries.
See also:
- :func:`jax.numpy.bitwise_and`: NumPy wrapper for this API, also accessible
via the ``x & y`` operator on JAX arrays.
- :func:`jax.lax.bitwise_not`: Elementwise NOT.
- :func:`jax.lax.bitwise_or`: Elementwise OR.
- :func:`jax.lax.bitwise_xor`: Elementwise exclusive OR.
.. _stablehlo.and: https://openxla.org/stablehlo/spec#and
"""
x, y = core.standard_insert_pvary(x, y)
return and_p.bind(x, y)
@export
def bitwise_or(x: ArrayLike, y: ArrayLike) -> Array:
r"""Elementwise OR: :math:`x \vee y`.
This function lowers directly to the `stablehlo.or`_ operation.
Args:
x, y: Input arrays. Must have matching boolean or integer dtypes.
If neither is a scalar, ``x`` and ``y`` must have the same number
of dimensions and be broadcast compatible.
Returns:
An array of the same dtype as ``x`` and ``y`` containing the bitwise
OR of each pair of broadcasted entries.
See also:
- :func:`jax.numpy.invert`: NumPy wrapper for this API, also accessible
via the ``x | y`` operator on JAX arrays.
- :func:`jax.lax.bitwise_not`: Elementwise NOT.
- :func:`jax.lax.bitwise_and`: Elementwise AND.
- :func:`jax.lax.bitwise_xor`: Elementwise exclusive OR.
.. _stablehlo.or: https://openxla.org/stablehlo/spec#or
"""
x, y = core.standard_insert_pvary(x, y)
return or_p.bind(x, y)
@export
def bitwise_xor(x: ArrayLike, y: ArrayLike) -> Array:
r"""Elementwise exclusive OR: :math:`x \oplus y`.
This function lowers directly to the `stablehlo.xor`_ operation.
Args:
x, y: Input arrays. Must have matching boolean or integer dtypes.
If neither is a scalar, ``x`` and ``y`` must have the same number
of dimensions and be broadcast compatible.
Returns:
An array of the same dtype as ``x`` and ``y`` containing the bitwise
XOR of each pair of broadcasted entries.
See also:
- :func:`jax.numpy.bitwise_xor`: NumPy wrapper for this API, also accessible
via the ``x ^ y`` operator on JAX arrays.
- :func:`jax.lax.bitwise_not`: Elementwise NOT.
- :func:`jax.lax.bitwise_and`: Elementwise AND.
- :func:`jax.lax.bitwise_or`: Elementwise OR.
.. _stablehlo.xor: https://openxla.org/stablehlo/spec#xor
"""
x, y = core.standard_insert_pvary(x, y)
return xor_p.bind(x, y)
@export
def population_count(x: ArrayLike) -> Array:
r"""Elementwise popcount, count the number of set bits in each element.
This function lowers directly to the `stablehlo.popcnt`_ operation.
Args:
x: Input array. Must have integer dtype.
Returns:
An array of the same shape and dtype as ``x``, containing the number of
set bits in the input.
See also:
- :func:`jax.lax.clz`: Elementwise count leading zeros.
- :func:`jax.numpy.bitwise_count`: More flexible NumPy-style API for bit counts.
.. _stablehlo.popcnt: https://openxla.org/stablehlo/spec#popcnt
"""
return population_count_p.bind(x)
@export
def clz(x: ArrayLike) -> Array:
r"""Elementwise count-leading-zeros.
This function lowers directly to the `stablehlo.count_leading_zeros`_ operation.
Args:
x: Input array. Must have integer dtype.
Returns:
An array of the same shape and dtype as ``x``, containing the number of
set bits in the input.
See also:
- :func:`jax.lax.population_count`: Count the number of set bits in each element.
.. _stablehlo.count_leading_zeros: https://openxla.org/stablehlo/spec#count_leading_zeros
"""
return clz_p.bind(x)
@export
def add(x: ArrayLike, y: ArrayLike) -> Array:
r"""Elementwise addition: :math:`x + y`.
This function lowers directly to the `stablehlo.add`_ operation.
Args:
x, y: Input arrays. Must have matching numerical dtypes. If neither
is a scalar, ``x`` and ``y`` must have the same number of dimensions
and be broadcast compatible.
Returns:
An array of the same dtype as ``x`` and ``y`` containing the sum
of each pair of broadcasted entries.
See also:
- :func:`jax.numpy.add`: NumPy-style addition supporting inputs
with mixed dtypes and ranks.
.. _stablehlo.add: https://openxla.org/stablehlo/spec#add
"""
x, y = core.standard_insert_pvary(x, y)
return add_p.bind(x, y)
@export
def sub(x: ArrayLike, y: ArrayLike) -> Array:
r"""Elementwise subtraction: :math:`x - y`.
This function lowers directly to the `stablehlo.subtract`_ operation.
Args:
x, y: Input arrays. Must have matching numerical dtypes. If neither
is a scalar, ``x`` and ``y`` must have the same number of dimensions
and be broadcast compatible.
Returns:
An array of the same dtype as ``x`` and ``y`` containing the difference
of each pair of broadcasted entries.
See also:
- :func:`jax.numpy.subtract`: NumPy-style subtraction supporting
inputs with mixed dtypes and ranks.
.. _stablehlo.subtract: https://openxla.org/stablehlo/spec#subtract
"""
x, y = core.standard_insert_pvary(x, y)
return sub_p.bind(x, y)
@export
def mul(x: ArrayLike, y: ArrayLike) -> Array:
r"""Elementwise multiplication: :math:`x \times y`.
This function lowers directly to the `stablehlo.multiply`_ operation.
Args:
x, y: Input arrays. Must have matching numerical dtypes. If neither
is a scalar, ``x`` and ``y`` must have the same number of dimensions
and be broadcast compatible.
Returns:
An array of the same dtype as ``x`` and ``y`` containing the product
of each pair of broadcasted entries.
See also:
- :func:`jax.numpy.multiply`: NumPy-style multiplication supporting
inputs with mixed dtypes and ranks.
.. _stablehlo.multiply: https://openxla.org/stablehlo/spec#multiply
"""
x, y = core.standard_insert_pvary(x, y)
return mul_p.bind(x, y)
@export
def div(x: ArrayLike, y: ArrayLike) -> Array:
r"""Elementwise division: :math:`x \over y`.
This function lowers directly to the `stablehlo.divide`_ operation.
Integer division overflow (division by zero or signed division of
INT_SMIN with -1) produces an implementation defined value.
Args:
x, y: Input arrays. Must have matching numerical dtypes. If neither
is a scalar, ``x`` and ``y`` must have the same number of dimensions
and be broadcast compatible.
Returns:
An array of the same dtype as ``x`` and ``y`` containing the quotient
of each pair of broadcasted entries. For integer inputs, any fractional
part is discarded.
See also:
- :func:`jax.numpy.divide`: NumPy-style true division supporting
inputs with mixed dtypes and ranks.
- :func:`jax.numpy.floor_divide`: NumPy-style floor division supporting
inputs with mixed dtypes and ranks.
.. _stablehlo.divide: https://openxla.org/stablehlo/spec#divide
"""
x, y = core.standard_insert_pvary(x, y)
return div_p.bind(x, y)
@export
def rem(x: ArrayLike, y: ArrayLike) -> Array:
r"""Elementwise remainder: :math:`x \bmod y`.
This function lowers directly to the `stablehlo.remainder`_ operation.
The sign of the result is taken from the dividend, and the absolute value
of the result is always less than the divisor's absolute value.
Integer division overflow (remainder by zero or remainder of INT_SMIN with -1)
produces an implementation defined value.
Args:
x, y: Input arrays. Must have matching int or float dtypes. If neither
is a scalar, ``x`` and ``y`` must have the same number of dimensions
and be broadcast compatible.
Returns:
An array of the same dtype as ``x`` and ``y`` containing the remainder.
See also:
- :func:`jax.numpy.remainder`: NumPy-style remainder with different
sign semantics.
.. _stablehlo.remainder: https://openxla.org/stablehlo/spec#remainder
"""
x, y = core.standard_insert_pvary(x, y)
return rem_p.bind(x, y)
@export
def max(x: ArrayLike, y: ArrayLike) -> Array:
r"""Elementwise maximum: :math:`\mathrm{max}(x, y)`.
This function lowers directly to the `stablehlo.maximum`_ operation for
non-complex inputs. For complex numbers, this uses a lexicographic
comparison on the `(real, imaginary)` pairs.
Args:
x, y: Input arrays. Must have matching dtypes. If neither is a scalar,
``x`` and ``y`` must have the same rank and be broadcast compatible.
Returns:
An array of the same dtype as ``x`` and ``y`` containing the elementwise
maximum.
See also:
- :func:`jax.numpy.maximum`: more flexibly NumPy-style maximum.
- :func:`jax.lax.reduce_max`: maximum along an axis of an array.
- :func:`jax.lax.min`: elementwise minimum.
.. _stablehlo.maximum: https://openxla.org/stablehlo/spec#maximum
"""
x, y = core.standard_insert_pvary(x, y)
return max_p.bind(x, y)
@export
def min(x: ArrayLike, y: ArrayLike) -> Array:
r"""Elementwise minimum: :math:`\mathrm{min}(x, y)`
This function lowers directly to the `stablehlo.minimum`_ operation for
non-complex inputs. For complex numbers, this uses a lexicographic
comparison on the `(real, imaginary)` pairs.
Args:
x, y: Input arrays. Must have matching dtypes. If neither is a scalar,
``x`` and ``y`` must have the same rank and be broadcast compatible.
Returns:
An array of the same dtype as ``x`` and ``y`` containing the elementwise
minimum.
See also:
- :func:`jax.numpy.minimum`: more flexibly NumPy-style minimum.
- :func:`jax.lax.reduce_min`: minimum along an axis of an array.
- :func:`jax.lax.max`: elementwise maximum.
.. _stablehlo.minimum: https://openxla.org/stablehlo/spec#minimum
"""
x, y = core.standard_insert_pvary(x, y)
return min_p.bind(x, y)
@export
def shift_left(x: ArrayLike, y: ArrayLike) -> Array:
r"""Elementwise left shift: :math:`x \ll y`.
This function lowers directly to the `stablehlo.shift_left`_ operation.
Args:
x, y: Input arrays. Must have matching integer dtypes. If neither is a
scalar, ``x`` and ``y`` must have the same number of dimensions and
be broadcast compatible.
Returns:
An array of the same dtype as ``x`` and ``y`` containing the element-wise
left shift of each pair of broadcasted entries.
See also:
- :func:`jax.numpy.left_shift`: NumPy wrapper for this API, also accessible
via the ``x << y`` operator on JAX arrays.
- :func:`jax.lax.shift_right_arithmetic`: Elementwise arithmetic right shift.
- :func:`jax.lax.shift_right_logical`: Elementwise logical right shift.
.. _stablehlo.shift_left: https://openxla.org/stablehlo/spec#shift_left
"""
x, y = core.standard_insert_pvary(x, y)
return shift_left_p.bind(x, y)
@export
def shift_right_arithmetic(x: ArrayLike, y: ArrayLike) -> Array:
r"""Elementwise arithmetic right shift: :math:`x \gg y`.
This function lowers directly to the `stablehlo.shift_right_arithmetic`_ operation.
Args:
x, y: Input arrays. Must have matching integer dtypes. If neither is a
scalar, ``x`` and ``y`` must have the same number of dimensions and
be broadcast compatible.
Returns:
An array of the same dtype as ``x`` and ``y`` containing the element-wise
arithmetic right shift of each pair of broadcasted entries.
See also:
- :func:`jax.numpy.right_shift`: NumPy wrapper for this API when applied to
signed integers, also accessible via the ``x >> y`` operator on JAX arrays
with signed integer dtype.
- :func:`jax.lax.shift_left`: Elementwise left shift.
- :func:`jax.lax.shift_right_logical`: Elementwise logical right shift.
.. _stablehlo.shift_right_arithmetic: https://openxla.org/stablehlo/spec#shift_right_arithmetic
"""
x, y = core.standard_insert_pvary(x, y)
return shift_right_arithmetic_p.bind(x, y)
@export
def shift_right_logical(x: ArrayLike, y: ArrayLike) -> Array:
r"""Elementwise logical right shift: :math:`x \gg y`.
This function lowers directly to the `stablehlo.shift_right_logical`_ operation.
Args:
x, y: Input arrays. Must have matching integer dtypes. If neither is a
scalar, ``x`` and ``y`` must have the same number of dimensions and
be broadcast compatible.
Returns:
An array of the same dtype as ``x`` and ``y`` containing the element-wise
logical right shift of each pair of broadcasted entries.
See also:
- :func:`jax.numpy.right_shift`: NumPy wrapper for this API when applied to
unsigned integers, also accessible via the ``x >> y`` operator on JAX arrays
with unsigned integer dtype.
- :func:`jax.lax.shift_left`: Elementwise left shift.
- :func:`jax.lax.shift_right_arithmetic`: Elementwise arithmetic right shift.
.. _stablehlo.shift_right_logical: https://openxla.org/stablehlo/spec#shift_right_logical
"""
x, y = core.standard_insert_pvary(x, y)
return shift_right_logical_p.bind(x, y)
@export
def eq(x: ArrayLike, y: ArrayLike) -> Array:
r"""Elementwise equals: :math:`x = y`.
This function lowers directly to the `stablehlo.compare`_ operation
with ``comparison_direction=EQ`` and ``compare_type`` set according
to the input dtype.
Args:
x, y: Input arrays. Must have matching dtypes. If neither is a
scalar, ``x`` and ``y`` must have the same number of dimensions and
be broadcast compatible.
Returns:
A boolean array of shape ``lax.broadcast_shapes(x.shape, y.shape)``
containing the elementwise equal comparison.
See also:
- :func:`jax.numpy.equal`: NumPy wrapper for this API, also accessible
via the ``x == y`` operator on JAX arrays.
- :func:`jax.lax.ne`: elementwise not-equal
- :func:`jax.lax.ge`: elementwise greater-than-or-equal
- :func:`jax.lax.gt`: elementwise greater-than
- :func:`jax.lax.le`: elementwise less-than-or-equal
- :func:`jax.lax.lt`: elementwise less-than
.. _stablehlo.compare: https://openxla.org/stablehlo/spec#compare
"""
x, y = core.standard_insert_pvary(x, y)
return eq_p.bind(x, y)
@export
def ne(x: ArrayLike, y: ArrayLike) -> Array:
r"""Elementwise not-equals: :math:`x \neq y`.
This function lowers directly to the `stablehlo.compare`_ operation
with ``comparison_direction=NE`` and ``compare_type`` set according
to the input dtype.
Args:
x, y: Input arrays. Must have matching dtypes. If neither is a
scalar, ``x`` and ``y`` must have the same number of dimensions and
be broadcast compatible.
Returns:
A boolean array of shape ``lax.broadcast_shapes(x.shape, y.shape)``
containing the elementwise not-equal comparison.
See also:
- :func:`jax.numpy.not_equal`: NumPy wrapper for this API, also accessible
via the ``x != y`` operator on JAX arrays.
- :func:`jax.lax.eq`: elementwise equal
- :func:`jax.lax.ge`: elementwise greater-than-or-equal
- :func:`jax.lax.gt`: elementwise greater-than
- :func:`jax.lax.le`: elementwise less-than-or-equal
- :func:`jax.lax.lt`: elementwise less-than
.. _stablehlo.compare: https://openxla.org/stablehlo/spec#compare
"""
x, y = core.standard_insert_pvary(x, y)
return ne_p.bind(x, y)
@export
def ge(x: ArrayLike, y: ArrayLike) -> Array:
r"""Elementwise greater-than-or-equals: :math:`x \geq y`.
This function lowers directly to the `stablehlo.compare`_ operation
with ``comparison_direction=GE`` and ``compare_type`` set according
to the input dtype.
Args:
x, y: Input arrays. Must have matching non-complex dtypes. If neither is
a scalar, ``x`` and ``y`` must have the same number of dimensions and
be broadcast compatible.
Returns:
A boolean array of shape ``lax.broadcast_shapes(x.shape, y.shape)``
containing the elementwise greater-than-or-equal comparison.
See also:
- :func:`jax.numpy.greater_equal`: NumPy wrapper for this API, also
accessible via the ``x >= y`` operator on JAX arrays.
- :func:`jax.lax.eq`: elementwise equal
- :func:`jax.lax.ne`: elementwise not-equal
- :func:`jax.lax.gt`: elementwise greater-than
- :func:`jax.lax.le`: elementwise less-than-or-equal
- :func:`jax.lax.lt`: elementwise less-than
.. _stablehlo.compare: https://openxla.org/stablehlo/spec#compare
"""
x, y = core.standard_insert_pvary(x, y)
return ge_p.bind(x, y)
@export
def gt(x: ArrayLike, y: ArrayLike) -> Array:
r"""Elementwise greater-than: :math:`x > y`.
This function lowers directly to the `stablehlo.compare`_ operation
with ``comparison_direction=GT`` and ``compare_type`` set according
to the input dtype.
Args:
x, y: Input arrays. Must have matching non-complex dtypes. If neither is
a scalar, ``x`` and ``y`` must have the same number of dimensions and
be broadcast compatible.
Returns:
A boolean array of shape ``lax.broadcast_shapes(x.shape, y.shape)``
containing the elementwise greater-than comparison.
See also:
- :func:`jax.numpy.greater`: NumPy wrapper for this API, also accessible
via the ``x > y`` operator on JAX arrays.
- :func:`jax.lax.eq`: elementwise equal
- :func:`jax.lax.ne`: elementwise not-equal
- :func:`jax.lax.ge`: elementwise greater-than-or-equal
- :func:`jax.lax.le`: elementwise less-than-or-equal
- :func:`jax.lax.lt`: elementwise less-than
.. _stablehlo.compare: https://openxla.org/stablehlo/spec#compare
"""
x, y = core.standard_insert_pvary(x, y)
return gt_p.bind(x, y)
@export
def le(x: ArrayLike, y: ArrayLike) -> Array:
r"""Elementwise less-than-or-equals: :math:`x \leq y`.
This function lowers directly to the `stablehlo.compare`_ operation
with ``comparison_direction=LE`` and ``compare_type`` set according
to the input dtype.
Args:
x, y: Input arrays. Must have matching non-complex dtypes. If neither is
a scalar, ``x`` and ``y`` must have the same number of dimensions and
be broadcast compatible.
Returns:
A boolean array of shape ``lax.broadcast_shapes(x.shape, y.shape)``
containing the elementwise less-than-or-equal comparison.
See also:
- :func:`jax.numpy.less_equal`: NumPy wrapper for this API, also
accessible via the ``x <= y`` operator on JAX arrays.
- :func:`jax.lax.eq`: elementwise equal
- :func:`jax.lax.ne`: elementwise not-equal
- :func:`jax.lax.ge`: elementwise greater-than-or-equal
- :func:`jax.lax.gt`: elementwise greater-than
- :func:`jax.lax.lt`: elementwise less-than
.. _stablehlo.compare: https://openxla.org/stablehlo/spec#compare
"""
x, y = core.standard_insert_pvary(x, y)
return le_p.bind(x, y)
@export
def lt(x: ArrayLike, y: ArrayLike) -> Array:
r"""Elementwise less-than: :math:`x < y`.
This function lowers directly to the `stablehlo.compare`_ operation
with ``comparison_direction=LT`` and ``compare_type`` set according
to the input dtype.
Args:
x, y: Input arrays. Must have matching non-complex dtypes. If neither is
a scalar, ``x`` and ``y`` must have the same number of dimensions and
be broadcast compatible.
Returns:
A boolean array of shape ``lax.broadcast_shapes(x.shape, y.shape)``
containing the elementwise less-than comparison.
See also:
- :func:`jax.numpy.less`: NumPy wrapper for this API, also accessible
via the ``x < y`` operator on JAX arrays.
- :func:`jax.lax.eq`: elementwise equal
- :func:`jax.lax.ne`: elementwise not-equal
- :func:`jax.lax.ge`: elementwise greater-than-or-equal
- :func:`jax.lax.gt`: elementwise greater-than
- :func:`jax.lax.le`: elementwise less-than-or-equal
.. _stablehlo.compare: https://openxla.org/stablehlo/spec#compare
"""
x, y = core.standard_insert_pvary(x, y)
return lt_p.bind(x, y)
@export
def convert_element_type(operand: ArrayLike,
new_dtype: DTypeLike | dtypes.ExtendedDType) -> Array:
"""Elementwise cast.
This function lowers directly to the `stablehlo.convert`_ operation, which
performs an elementwise conversion from one type to another, similar to a
C++ ``static_cast``.
Args:
operand: an array or scalar value to be cast.
new_dtype: a dtype-like object (e.g. a :class:`numpy.dtype`, a scalar type,
or a valid dtype name) representing the target dtype.
Returns:
An array with the same shape as ``operand``, cast elementwise to ``new_dtype``.
.. note::
If ``new_dtype`` is a 64-bit type and `x64 mode`_ is not enabled,
the appropriate 32-bit type will be used in its place.
If the input is a JAX array and the input dtype and output dtype match, then
the input array will be returned unmodified.
See also:
- :func:`jax.numpy.astype`: NumPy-style dtype casting API.
- :meth:`jax.Array.astype`: dtype casting as an array method.
- :func:`jax.lax.bitcast_convert_type`: cast bits directly to a new dtype.
.. _stablehlo.convert: https://openxla.org/stablehlo/spec#convert
.. _x64 mode: https://docs.jax.dev/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
"""
new_dtype = dtypes.check_and_canonicalize_user_dtype(
new_dtype, 'convert_element_type')
return _convert_element_type(operand, new_dtype, weak_type=False) # type: ignore[unused-ignore,bad-return-type]
def _convert_element_type(
operand: ArrayLike | literals.TypedNdArray,
new_dtype: DType | None = None,
weak_type: bool = False,
sharding: Sharding | None = None,
warn_on_complex_to_real_cast: bool = True):
if hasattr(operand, '__jax_array__'):
operand = operand.__jax_array__()
old_dtype = dtypes.dtype(operand)
if (isinstance(new_dtype, dtypes.ExtendedDType) or
isinstance(old_dtype, dtypes.ExtendedDType)):
if new_dtype == old_dtype:
if sharding is None:
return operand
if isinstance(operand, core.Tracer) and operand.aval.sharding == sharding:
return operand
if sharding is not None or weak_type:
raise NotImplementedError
if (isinstance(new_dtype, dtypes.ExtendedDType) and
isinstance(old_dtype, dtypes.ExtendedDType)):
old_rep_dtype = core.physical_element_aval(old_dtype).dtype
new_rep_dtype = core.physical_element_aval(new_dtype).dtype
raise ValueError(
"cannot directly convert between extended dtypes: from "
f"{dtype_to_string(old_dtype)} to {dtype_to_string(new_dtype)}. "
"Instead, convert to and from their representation dtypes, e.g.:\n"
f"{dtype_to_string(old_dtype)} -> {dtype_to_string(old_rep_dtype)} "
f"-> {dtype_to_string(new_rep_dtype)} -> {dtype_to_string(new_dtype)}")
if isinstance(new_dtype, dtypes.ExtendedDType):
return to_edtype_p.bind(operand, edtype=new_dtype)
return from_edtype_p.bind(operand, dtype=np.dtype(new_dtype))
old_weak_type = dtypes.is_weakly_typed(operand)
if new_dtype is None:
new_dtype = old_dtype
else:
assert isinstance(new_dtype, DType), new_dtype
if sharding is not None and not isinstance(sharding, Sharding):
raise ValueError(f'{sharding=} must be an instance of jax.sharding.Sharding')
if (warn_on_complex_to_real_cast and
dtypes.issubdtype(old_dtype, np.complexfloating) and
not dtypes.issubdtype(new_dtype, np.complexfloating)):
msg = "Casting complex values to real discards the imaginary part"
warnings.warn(msg, np.exceptions.ComplexWarning, stacklevel=2)
# Python has big integers, but convert_element_type(2 ** 100, np.float32) need
# not be an error since the target dtype fits the value. Handle this case by
# converting to a NumPy array before calling bind. Without this step, we'd
# first canonicalize the input to a value of dtype int32 or int64, leading to
# an overflow error.
if type(operand) is int and new_dtype != dtypes.float0:
operand = literals.TypedNdArray(np.asarray(operand).astype(new_dtype),
weak_type)
if ((old_dtype, old_weak_type) == (new_dtype, weak_type) and
isinstance(operand, Array) and
not (isinstance(operand, core.Tracer) and core.is_concrete(operand)) and
(sharding is None or
(sharding._is_concrete and getattr(operand, 'sharding', None) == sharding))):
return operand
else:
return convert_element_type_p.bind(
operand, new_dtype=new_dtype, weak_type=bool(weak_type),
sharding=sharding)
@export
def bitcast_convert_type(operand: ArrayLike, new_dtype: DTypeLike) -> Array:
"""Elementwise bitcast.
This function lowers directly to the `stablehlo.bitcast_convert`_ operation.
The output shape depends on the size of the input and output dtypes with
the following logic::
if new_dtype.itemsize == operand.dtype.itemsize:
output_shape = operand.shape
if new_dtype.itemsize < operand.dtype.itemsize:
output_shape = (*operand.shape, operand.dtype.itemsize // new_dtype.itemsize)
if new_dtype.itemsize > operand.dtype.itemsize:
assert operand.shape[-1] * operand.dtype.itemsize == new_dtype.itemsize
output_shape = operand.shape[:-1]
Args:
operand: an array or scalar value to be cast
new_dtype: the new type. Should be a NumPy type.
Returns:
An array of shape `output_shape` (see above) and type `new_dtype`,
constructed from the same bits as operand.
See also:
- :func:`jax.lax.convert_element_type`: value-preserving dtype conversion.
- :func:`jax.Array.view`: NumPy-style API for bitcast type conversion.
.. _stablehlo.bitcast_convert: https://openxla.org/stablehlo/spec#bitcast_convert
"""
new_dtype = dtypes.check_and_canonicalize_user_dtype(
new_dtype, 'bitcast_convert_type')
return bitcast_convert_type_p.bind(operand, new_dtype=new_dtype)
def clamp(min: ArrayLike, x: ArrayLike, max: ArrayLike) -> Array:
r"""Elementwise clamp.
Returns :math:`\mathrm{clamp}(x) = \begin{cases}
\mathit{min} & \text{if } x < \mathit{min},\\
\mathit{max} & \text{if } x > \mathit{max},\\
x & \text{otherwise}
\end{cases}`.
"""
min, x, max = core.standard_insert_pvary(min, x, max)
return clamp_p.bind(min, x, max)
@weakref_lru_cache
def _trace_composite_to_jaxpr(fun: Callable,
in_tree: tree_util.PyTreeDef,
in_avals: Sequence[core.AbstractValue],
name: str,
debug_info: core.DebugInfo):
flat_fun, out_tree = api_util.flatten_fun_nokwargs(
lu.wrap_init(fun, debug_info=debug_info), in_tree)
jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(flat_fun, in_avals)
if any(isinstance(c, core.Tracer) for c in consts):
raise UnexpectedTracerError(
"Found a JAX Tracer as a constant in the decomposition for the "
f"composite op '{name}'. This means that the decomposition function "
"closes over a value that is involved in a JAX transformation. "
"Any values that aren't explicitly known at compile time must be "
"explicitly passed as arguments to the composite.")
closed_jaxpr = pe.close_jaxpr(pe.convert_constvars_jaxpr(jaxpr))
return closed_jaxpr, consts, out_tree
def composite(
decomposition: Callable,
name: str,
version: int = 0,
):
"""Composite with semantics defined by the decomposition function.
A composite is a higher-order JAX function that encapsulates an operation made
up (composed) of other JAX functions. The semantics of the op are implemented
by the ``decomposition`` function. In other words, the defined composite
function can be replaced with its decomposed implementation without changing
the semantics of the encapsulated operation.
The compiler can recognize specific composite operations by their ``name``,
``version``, ``kwargs``, and dtypes to emit more efficient code, potentially
leveraging hardware-specific instructions or optimizations. If the compiler
doesn't recognize the composite, it falls back to compiling the
``decomposition`` function.
Consider a "tangent" composite operation. Its ``decomposition`` function could
be implemented as ``sin(x) / cos(x)``. A hardware-aware compiler could
recognize the "tangent" composite and emit a single ``tangent`` instruction
instead of three separate instructions (``sin``, ``divide``, and ``cos``).
For hardware without dedicated tangent support, it would fall back to
compiling the decomposition.
This is useful for preserving high-level abstractions that would otherwise be
lost while lowering, which allows for easier pattern-matching in low-level IR.
Args:
decomposition: function that implements the semantics of the composite op.
name: name of the encapsulated operation.
version: optional int to indicate semantic changes to the composite.
Returns:
Callable: Returns a composite function. Note that positional arguments to
this function should be interpreted as inputs and keyword arguments should
be interpreted as attributes of the op. Any keyword arguments that are
passed with ``None`` as a value will be omitted from the
``composite_attributes``.
Examples:
Tangent kernel:
>>> def my_tangent_composite(x):
... return lax.composite(
... lambda x: lax.sin(x) / lax.cos(x), name="my.tangent"
... )(x)
>>>
>>> pi = jnp.pi
>>> x = jnp.array([0.0, pi / 4, 3 * pi / 4, pi])
>>> with jnp.printoptions(precision=3, suppress=True):
... print(my_tangent_composite(x))
... print(lax.tan(x))
[ 0. 1. -1. 0.]
[ 0. 1. -1. 0.]
The recommended way to create composites is via a decorator. Use ``/`` and
``*`` in the function signature to be explicit about positional and keyword
arguments, respectively:
>>> @partial(lax.composite, name="my.softmax")
... def my_softmax_composite(x, /, *, axis):
... return jax.nn.softmax(x, axis)
"""
@functools.wraps(decomposition)
def _decorator(*args, **kwargs):
debug_info = api_util.debug_info("composite", decomposition,
args, {})
flat_args, in_tree = tree_util.tree_flatten(args)
in_avals = tuple(core.get_aval(x) for x in flat_args)
if any(isinstance(v, core.Tracer) for v in kwargs.values()):
raise UnexpectedTracerError(
"Found a JAX Tracer as an attribute in the decomposition for the "
f"composite op '{name}'. This means that the decomposition function "
"closes over a value that is involved in a JAX transformation. "
"Any values that aren't explicitly known at compile time must be "
"explicitly passed as arguments to the composite."
"\n\nNote: If you are passing jax arrays as attributes, use numpy "
"arrays instead.")
closed_jaxpr, consts, out_tree = _trace_composite_to_jaxpr(
partial(decomposition, **kwargs), in_tree, in_avals, name, debug_info
)
attributes = []
for k, v in kwargs.items():
leaves, treedef = tree_util.tree_flatten(v)
leaves = tuple(
HashableArray(v) if isinstance(v, np.ndarray) else v for v in leaves
)
attributes.append((k, leaves, treedef))
flat_consts_and_args = core.standard_insert_pvary(*consts, *flat_args)
out_flat = composite_p.bind(
*flat_consts_and_args,
name=name,
attributes=tuple(attributes),
version=version,
jaxpr=closed_jaxpr,
)
return tree_util.tree_unflatten(out_tree(), out_flat)
return _decorator
def _composite_lowering(
ctx: mlir.LoweringRuleContext,
*args: Any,
name: str,
attributes: Sequence[tuple[str, tuple[Any, ...], tree_util.PyTreeDef]],
version: int,
jaxpr: core.ClosedJaxpr,
):
"""Makes composite which calls the implementation function.
Lowering a composite primitive to a ``stablehlo.composite`` op.
Args:
ctx: The MLIR context.
*args: The arguments to the composite.
name: The name of the composite.
attributes: The attributes of the composite.
version: The version of the composite.
jaxpr: The jaxpr of the underlying composite.
Returns:
The results of the composite.
"""
const_args_and_avals = core.jaxpr_const_args(jaxpr.jaxpr)
const_args, const_avals = util.unzip2(const_args_and_avals)
const_arg_values = tuple(
mlir.ir_constant(c, const_lowering=ctx.const_lowering, aval=aval)
for c, aval in const_args_and_avals
)
in_avals = (*const_avals, *ctx.avals_in)
func_op, _, _ = mlir.lower_called_computation(
name,
jaxpr,
ctx.module_context,
len(const_args),
in_avals,
ctx.avals_out,
ctx.tokens_in,
)
composite_attrs = {}
for k, leaves, treedef in attributes:
v = treedef.unflatten(leaves)
if v is not None:
composite_attrs[k] = mlir.ir_attribute(v)
symbol_name = func_op.name.value
composite = hlo.CompositeOp(
func_op.type.results,
mlir.flatten_ir_values(const_arg_values + args),
name=ir.StringAttr.get(name),
decomposition=ir.FlatSymbolRefAttr.get(symbol_name),
composite_attributes=ir.DictAttr.get(composite_attrs),
version=mlir.i32_attr(version),
)
return composite.results
def _composite_impl(*args, jaxpr, **_):
return core.jaxpr_as_fun(jaxpr)(*args)
def _composite_abstract_eval(*args, jaxpr, **_):
del args
return jaxpr.out_avals
def composite_jvp(*args, **_):
del args
raise ValueError(
"JVP rule for composite not implemented. You can use `jax.custom_jvp` to "
"add support. See "
"https://docs.jax.dev/en/latest/_autosummary/jax.custom_jvp.html"
)
def composite_transpose(*args, **_):
del args
raise ValueError(
"Transpose rule for composite not implemented. You can use"
"`jax.custom_jvp` or `jax.custom_vjp` to add support. See "
"https://docs.jax.dev/en/latest/_autosummary/jax.custom_jvp.html"
)
composite_p = core.Primitive("composite")
composite_p.def_impl(_composite_impl)
composite_p.def_abstract_eval(_composite_abstract_eval)
composite_p.multiple_results = True
ad.primitive_jvps[composite_p] = composite_jvp
ad.primitive_transposes[composite_p] = composite_transpose
mlir.register_lowering(composite_p, _composite_lowering)
def concatenate(operands: Array | Sequence[ArrayLike], dimension: int) -> Array:
"""Concatenates a sequence of arrays along `dimension`.
Wraps XLA's `Concatenate
<https://www.openxla.org/xla/operation_semantics#concatenate>`_
operator.
Args:
operands: a sequence of arrays to concatenate. The arrays must have equal
shapes, except in the `dimension` axis.
dimension: the dimension along which to concatenate the arrays.
Returns:
An array containing the concatenation.
"""
if len(operands) == 0:
raise ValueError("concatenate requires a non-empty sequences of arrays")
if len(operands) == 1:
op, = operands
if isinstance(op, Array):
return op
operands = core.standard_insert_pvary(*operands)
return concatenate_p.bind(*operands, dimension=dimension)
def split(operand: ArrayLike, sizes: Sequence[int],
axis: int = 0) -> Sequence[Array]:
"""Splits an array along ``axis``.
Args:
operand: an array to split
sizes: the sizes of the split arrays. The sum of the sizes must be equal
to the size of the ``axis`` dimension of ``operand``.
axis: the axis along which to split the array.
Returns:
A sequence of ``len(sizes)`` arrays. If ``sizes`` is
``[s1, s2, ...]``, this function returns chunks of sizes ``s1``, ``s2``,
taken along ``axis``.
"""
operand = asarray(operand)
return split_p.bind(operand, sizes=tuple(sizes),
axis=canonicalize_axis(axis, operand.ndim))
_precision_strings: dict[Any, Precision] = {}
| AccuracyMode |
python | scikit-learn__scikit-learn | sklearn/mixture/_base.py | {
"start": 1235,
"end": 21074
} | class ____(DensityMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for mixture models.
This abstract class specifies an interface for all mixture classes and
provides basic common methods for mixture models.
"""
_parameter_constraints: dict = {
"n_components": [Interval(Integral, 1, None, closed="left")],
"tol": [Interval(Real, 0.0, None, closed="left")],
"reg_covar": [Interval(Real, 0.0, None, closed="left")],
"max_iter": [Interval(Integral, 0, None, closed="left")],
"n_init": [Interval(Integral, 1, None, closed="left")],
"init_params": [
StrOptions({"kmeans", "random", "random_from_data", "k-means++"})
],
"random_state": ["random_state"],
"warm_start": ["boolean"],
"verbose": ["verbose"],
"verbose_interval": [Interval(Integral, 1, None, closed="left")],
}
def __init__(
self,
n_components,
tol,
reg_covar,
max_iter,
n_init,
init_params,
random_state,
warm_start,
verbose,
verbose_interval,
):
self.n_components = n_components
self.tol = tol
self.reg_covar = reg_covar
self.max_iter = max_iter
self.n_init = n_init
self.init_params = init_params
self.random_state = random_state
self.warm_start = warm_start
self.verbose = verbose
self.verbose_interval = verbose_interval
@abstractmethod
def _check_parameters(self, X, xp=None):
"""Check initial parameters of the derived class.
Parameters
----------
X : array-like of shape (n_samples, n_features)
"""
pass
def _initialize_parameters(self, X, random_state, xp=None):
"""Initialize the model parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
random_state : RandomState
A random number generator instance that controls the random seed
used for the method chosen to initialize the parameters.
"""
xp, _, device = get_namespace_and_device(X, xp=xp)
n_samples, _ = X.shape
if self.init_params == "kmeans":
resp = np.zeros((n_samples, self.n_components), dtype=X.dtype)
label = (
cluster.KMeans(
n_clusters=self.n_components, n_init=1, random_state=random_state
)
.fit(X)
.labels_
)
resp[np.arange(n_samples), label] = 1
elif self.init_params == "random":
resp = xp.asarray(
random_state.uniform(size=(n_samples, self.n_components)),
dtype=X.dtype,
device=device,
)
resp /= xp.sum(resp, axis=1)[:, xp.newaxis]
elif self.init_params == "random_from_data":
resp = xp.zeros(
(n_samples, self.n_components), dtype=X.dtype, device=device
)
indices = random_state.choice(
n_samples, size=self.n_components, replace=False
)
# TODO: when array API supports __setitem__ with fancy indexing we
# can use the previous code:
# resp[indices, xp.arange(self.n_components)] = 1
# Until then we use a for loop on one dimension.
for col, index in enumerate(indices):
resp[index, col] = 1
elif self.init_params == "k-means++":
resp = np.zeros((n_samples, self.n_components), dtype=X.dtype)
_, indices = kmeans_plusplus(
X,
self.n_components,
random_state=random_state,
)
resp[indices, np.arange(self.n_components)] = 1
self._initialize(X, resp)
@abstractmethod
def _initialize(self, X, resp):
"""Initialize the model parameters of the derived class.
Parameters
----------
X : array-like of shape (n_samples, n_features)
resp : array-like of shape (n_samples, n_components)
"""
pass
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
The method fits the model ``n_init`` times and sets the parameters with
which the model has the largest likelihood or lower bound. Within each
trial, the method iterates between E-step and M-step for ``max_iter``
times until the change of likelihood or lower bound is less than
``tol``, otherwise, a ``ConvergenceWarning`` is raised.
If ``warm_start`` is ``True``, then ``n_init`` is ignored and a single
initialization is performed upon the first call. Upon consecutive
calls, training starts where it left off.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
The fitted mixture.
"""
# parameters are validated in fit_predict
self.fit_predict(X, y)
return self
@_fit_context(prefer_skip_nested_validation=True)
def fit_predict(self, X, y=None):
"""Estimate model parameters using X and predict the labels for X.
The method fits the model ``n_init`` times and sets the parameters with
which the model has the largest likelihood or lower bound. Within each
trial, the method iterates between E-step and M-step for `max_iter`
times until the change of likelihood or lower bound is less than
`tol`, otherwise, a :class:`~sklearn.exceptions.ConvergenceWarning` is
raised. After fitting, it predicts the most probable label for the
input data points.
.. versionadded:: 0.20
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
labels : array, shape (n_samples,)
Component labels.
"""
xp, _ = get_namespace(X)
X = validate_data(self, X, dtype=[xp.float64, xp.float32], ensure_min_samples=2)
if X.shape[0] < self.n_components:
raise ValueError(
"Expected n_samples >= n_components "
f"but got n_components = {self.n_components}, "
f"n_samples = {X.shape[0]}"
)
self._check_parameters(X, xp=xp)
# if we enable warm_start, we will have a unique initialisation
do_init = not (self.warm_start and hasattr(self, "converged_"))
n_init = self.n_init if do_init else 1
max_lower_bound = -xp.inf
best_lower_bounds = []
self.converged_ = False
random_state = check_random_state(self.random_state)
n_samples, _ = X.shape
for init in range(n_init):
self._print_verbose_msg_init_beg(init)
if do_init:
self._initialize_parameters(X, random_state, xp=xp)
lower_bound = -xp.inf if do_init else self.lower_bound_
current_lower_bounds = []
if self.max_iter == 0:
best_params = self._get_parameters()
best_n_iter = 0
else:
converged = False
for n_iter in range(1, self.max_iter + 1):
prev_lower_bound = lower_bound
log_prob_norm, log_resp = self._e_step(X, xp=xp)
self._m_step(X, log_resp, xp=xp)
lower_bound = self._compute_lower_bound(log_resp, log_prob_norm)
current_lower_bounds.append(lower_bound)
change = lower_bound - prev_lower_bound
self._print_verbose_msg_iter_end(n_iter, change)
if abs(change) < self.tol:
converged = True
break
self._print_verbose_msg_init_end(lower_bound, converged)
if lower_bound > max_lower_bound or max_lower_bound == -xp.inf:
max_lower_bound = lower_bound
best_params = self._get_parameters()
best_n_iter = n_iter
best_lower_bounds = current_lower_bounds
self.converged_ = converged
# Should only warn about convergence if max_iter > 0, otherwise
# the user is assumed to have used 0-iters initialization
# to get the initial means.
if not self.converged_ and self.max_iter > 0:
warnings.warn(
(
"Best performing initialization did not converge. "
"Try different init parameters, or increase max_iter, "
"tol, or check for degenerate data."
),
ConvergenceWarning,
)
self._set_parameters(best_params, xp=xp)
self.n_iter_ = best_n_iter
self.lower_bound_ = max_lower_bound
self.lower_bounds_ = best_lower_bounds
# Always do a final e-step to guarantee that the labels returned by
# fit_predict(X) are always consistent with fit(X).predict(X)
# for any value of max_iter and tol (and any random_state).
_, log_resp = self._e_step(X, xp=xp)
return xp.argmax(log_resp, axis=1)
def _e_step(self, X, xp=None):
"""E step.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
log_prob_norm : float
Mean of the logarithms of the probabilities of each sample in X
log_responsibility : array, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
xp, _ = get_namespace(X, xp=xp)
log_prob_norm, log_resp = self._estimate_log_prob_resp(X, xp=xp)
return xp.mean(log_prob_norm), log_resp
@abstractmethod
def _m_step(self, X, log_resp):
"""M step.
Parameters
----------
X : array-like of shape (n_samples, n_features)
log_resp : array-like of shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
pass
@abstractmethod
def _get_parameters(self):
pass
@abstractmethod
def _set_parameters(self, params):
pass
def score_samples(self, X):
"""Compute the log-likelihood of each sample.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
log_prob : array, shape (n_samples,)
Log-likelihood of each sample in `X` under the current model.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
return _logsumexp(self._estimate_weighted_log_prob(X), axis=1)
def score(self, X, y=None):
"""Compute the per-sample average log-likelihood of the given data X.
Parameters
----------
X : array-like of shape (n_samples, n_dimensions)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
log_likelihood : float
Log-likelihood of `X` under the Gaussian mixture model.
"""
xp, _ = get_namespace(X)
return float(xp.mean(self.score_samples(X)))
def predict(self, X):
"""Predict the labels for the data samples in X using trained model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
labels : array, shape (n_samples,)
Component labels.
"""
check_is_fitted(self)
xp, _ = get_namespace(X)
X = validate_data(self, X, reset=False)
return xp.argmax(self._estimate_weighted_log_prob(X), axis=1)
def predict_proba(self, X):
"""Evaluate the components' density for each sample.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
resp : array, shape (n_samples, n_components)
Density of each Gaussian component for each sample in X.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
xp, _ = get_namespace(X)
_, log_resp = self._estimate_log_prob_resp(X, xp=xp)
return xp.exp(log_resp)
def sample(self, n_samples=1):
"""Generate random samples from the fitted Gaussian distribution.
Parameters
----------
n_samples : int, default=1
Number of samples to generate.
Returns
-------
X : array, shape (n_samples, n_features)
Randomly generated sample.
y : array, shape (nsamples,)
Component labels.
"""
check_is_fitted(self)
xp, _, device_ = get_namespace_and_device(self.means_)
if n_samples < 1:
raise ValueError(
"Invalid value for 'n_samples': %d . The sampling requires at "
"least one sample." % (self.n_components)
)
_, n_features = self.means_.shape
rng = check_random_state(self.random_state)
n_samples_comp = rng.multinomial(
n_samples, _convert_to_numpy(self.weights_, xp)
)
if self.covariance_type == "full":
X = np.vstack(
[
rng.multivariate_normal(mean, covariance, int(sample))
for (mean, covariance, sample) in zip(
_convert_to_numpy(self.means_, xp),
_convert_to_numpy(self.covariances_, xp),
n_samples_comp,
)
]
)
elif self.covariance_type == "tied":
X = np.vstack(
[
rng.multivariate_normal(
mean, _convert_to_numpy(self.covariances_, xp), int(sample)
)
for (mean, sample) in zip(
_convert_to_numpy(self.means_, xp), n_samples_comp
)
]
)
else:
X = np.vstack(
[
mean
+ rng.standard_normal(size=(sample, n_features))
* np.sqrt(covariance)
for (mean, covariance, sample) in zip(
_convert_to_numpy(self.means_, xp),
_convert_to_numpy(self.covariances_, xp),
n_samples_comp,
)
]
)
y = xp.concat(
[
xp.full(int(n_samples_comp[i]), i, dtype=xp.int64, device=device_)
for i in range(len(n_samples_comp))
]
)
max_float_dtype = _max_precision_float_dtype(xp=xp, device=device_)
return xp.asarray(X, dtype=max_float_dtype, device=device_), y
def _estimate_weighted_log_prob(self, X, xp=None):
"""Estimate the weighted log-probabilities, log P(X | Z) + log weights.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
weighted_log_prob : array, shape (n_samples, n_component)
"""
return self._estimate_log_prob(X, xp=xp) + self._estimate_log_weights(xp=xp)
@abstractmethod
def _estimate_log_weights(self, xp=None):
"""Estimate log-weights in EM algorithm, E[ log pi ] in VB algorithm.
Returns
-------
log_weight : array, shape (n_components, )
"""
pass
@abstractmethod
def _estimate_log_prob(self, X, xp=None):
"""Estimate the log-probabilities log P(X | Z).
Compute the log-probabilities per each component for each sample.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
log_prob : array, shape (n_samples, n_component)
"""
pass
def _estimate_log_prob_resp(self, X, xp=None):
"""Estimate log probabilities and responsibilities for each sample.
Compute the log probabilities, weighted log probabilities per
component and responsibilities for each sample in X with respect to
the current state of the model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
log_prob_norm : array, shape (n_samples,)
log p(X)
log_responsibilities : array, shape (n_samples, n_components)
logarithm of the responsibilities
"""
xp, _ = get_namespace(X, xp=xp)
weighted_log_prob = self._estimate_weighted_log_prob(X, xp=xp)
log_prob_norm = _logsumexp(weighted_log_prob, axis=1, xp=xp)
# There is no errstate equivalent for warning/error management in array API
context_manager = (
np.errstate(under="ignore") if _is_numpy_namespace(xp) else nullcontext()
)
with context_manager:
# ignore underflow
log_resp = weighted_log_prob - log_prob_norm[:, xp.newaxis]
return log_prob_norm, log_resp
def _print_verbose_msg_init_beg(self, n_init):
"""Print verbose message on initialization."""
if self.verbose == 1:
print("Initialization %d" % n_init)
elif self.verbose >= 2:
print("Initialization %d" % n_init)
self._init_prev_time = time()
self._iter_prev_time = self._init_prev_time
def _print_verbose_msg_iter_end(self, n_iter, diff_ll):
"""Print verbose message on initialization."""
if n_iter % self.verbose_interval == 0:
if self.verbose == 1:
print(" Iteration %d" % n_iter)
elif self.verbose >= 2:
cur_time = time()
print(
" Iteration %d\t time lapse %.5fs\t ll change %.5f"
% (n_iter, cur_time - self._iter_prev_time, diff_ll)
)
self._iter_prev_time = cur_time
def _print_verbose_msg_init_end(self, lb, init_has_converged):
"""Print verbose message on the end of iteration."""
converged_msg = "converged" if init_has_converged else "did not converge"
if self.verbose == 1:
print(f"Initialization {converged_msg}.")
elif self.verbose >= 2:
t = time() - self._init_prev_time
print(
f"Initialization {converged_msg}. time lapse {t:.5f}s\t lower bound"
f" {lb:.5f}."
)
| BaseMixture |
python | pypa__warehouse | warehouse/admin/views/organizations.py | {
"start": 17474,
"end": 42120
} | class ____(OrganizationNameMixin, SaveOrganizationForm):
def __init__(self, *args, organization_service, user, **kwargs):
super().__init__(*args, **kwargs)
self.organization_service = organization_service
self.user = user
@view_config(
route_name="admin.organization_application.detail",
require_methods=False,
renderer="warehouse.admin:templates/admin/organization_applications/detail.html",
permission=Permissions.AdminOrganizationsRead,
has_translations=True,
uses_session=True,
require_csrf=True,
)
def organization_application_detail(request):
organization_service = request.find_service(IOrganizationService, context=None)
user_service = request.find_service(IUserService, context=None)
organization_application_id = request.matchdict["organization_application_id"]
organization_application = organization_service.get_organization_application(
organization_application_id
)
if organization_application is None:
raise HTTPNotFound
form = OrganizationApplicationForm(
request.POST if request.method == "POST" else None,
organization_application,
organization_service=organization_service,
user=request.user,
)
if request.method == "POST" and form.validate():
form.populate_obj(organization_application)
request.session.flash(
f"Application for {organization_application.name!r} updated",
queue="success",
)
return HTTPSeeOther(location=request.current_route_path())
parts = organization_application.normalized_name.split("-")
conflicting_applications = (
request.db.query(OrganizationApplication)
.filter(
or_(
*(
[
OrganizationApplication.normalized_name == parts[0],
OrganizationApplication.normalized_name.startswith(
parts[0] + "-"
),
]
+ [
OrganizationApplication.normalized_name.startswith(
"-".join(parts[: i + 1])
)
for i in range(1, len(parts))
]
)
)
)
.filter(OrganizationApplication.id != organization_application.id)
.order_by(
desc(
func.similarity(
OrganizationApplication.normalized_name,
organization_application.normalized_name,
)
)
)
.all()
)
user = user_service.get_user(organization_application.submitted_by_id)
return {
"organization_application": organization_application,
"form": form,
"conflicting_applications": conflicting_applications,
"user": user,
}
@view_config(
route_name="admin.organization_application.approve",
require_methods=["POST"],
permission=Permissions.AdminOrganizationsWrite,
has_translations=True,
uses_session=True,
require_csrf=True,
)
def organization_application_approve(request):
organization_service = request.find_service(IOrganizationService, context=None)
organization_application_id = request.matchdict["organization_application_id"]
organization_application = organization_service.get_organization_application(
organization_application_id
)
if organization_application is None:
raise HTTPNotFound
organization = organization_service.approve_organization_application(
organization_application.id, request
)
request.session.flash(
f'Request for "{organization.name}" organization approved', queue="success"
)
if request.params.get("organization_applications_turbo_mode") == "true":
return _turbo_mode(request)
return HTTPSeeOther(
request.route_path("admin.organization.detail", organization_id=organization.id)
)
@view_config(
route_name="admin.organization_application.defer",
require_methods=["POST"],
permission=Permissions.AdminOrganizationsWrite,
has_translations=True,
uses_session=True,
require_csrf=True,
)
def organization_application_defer(request):
organization_service = request.find_service(IOrganizationService, context=None)
organization_application_id = request.matchdict["organization_application_id"]
organization_application = organization_service.get_organization_application(
organization_application_id
)
if organization_application is None:
raise HTTPNotFound
organization_service.defer_organization_application(
organization_application.id, request
)
request.session.flash(
f'Request for "{organization_application.name}" organization deferred',
queue="success",
)
if request.params.get("organization_applications_turbo_mode") == "true":
return _turbo_mode(request)
return HTTPSeeOther(
request.route_path(
"admin.organization_application.detail",
organization_application_id=organization_application.id,
)
)
@view_config(
route_name="admin.organization_application.requestmoreinfo",
require_methods=["POST"],
permission=Permissions.AdminOrganizationsWrite,
has_translations=True,
uses_session=True,
require_csrf=True,
)
def organization_application_request_more_information(request):
organization_service = request.find_service(IOrganizationService, context=None)
organization_application_id = request.matchdict["organization_application_id"]
organization_application = organization_service.get_organization_application(
organization_application_id
)
if organization_application is None:
raise HTTPNotFound
try:
organization_service.request_more_information(
organization_application.id, request
)
request.session.flash(
(
f'Request for more info from "{organization_application.name}" '
"organization sent"
),
queue="success",
)
if request.params.get("organization_applications_turbo_mode") == "true":
return _turbo_mode(request)
except ValueError:
request.session.flash("No message provided", queue="error")
return HTTPSeeOther(
request.route_path(
"admin.organization_application.detail",
organization_application_id=organization_application.id,
)
)
@view_config(
route_name="admin.organization_application.decline",
require_methods=["POST"],
permission=Permissions.AdminOrganizationsWrite,
has_translations=True,
uses_session=True,
require_csrf=True,
)
def organization_application_decline(request):
organization_service = request.find_service(IOrganizationService, context=None)
organization_application_id = request.matchdict["organization_application_id"]
organization_application = organization_service.get_organization_application(
organization_application_id
)
if organization_application is None:
raise HTTPNotFound
organization_service.decline_organization_application(
organization_application.id, request
)
request.session.flash(
f'Request for "{organization_application.name}" organization declined',
queue="success",
)
if request.params.get("organization_applications_turbo_mode") == "true":
return _turbo_mode(request)
return HTTPSeeOther(
request.route_path(
"admin.organization_application.detail",
organization_application_id=organization_application.id,
)
)
@view_config(
route_name="admin.organization.add_role",
permission=Permissions.AdminRoleAdd,
request_method="POST",
uses_session=True,
require_csrf=True,
require_methods=False,
)
def add_organization_role(request):
organization_service = request.find_service(IOrganizationService, context=None)
organization_id = request.matchdict["organization_id"]
organization = organization_service.get_organization(organization_id)
if organization is None:
raise HTTPNotFound
username = request.POST.get("username")
if not username:
request.session.flash("Provide a username", queue="error")
return HTTPSeeOther(
request.route_path(
"admin.organization.detail", organization_id=organization.id
)
)
try:
user = request.db.query(User).filter(User.username == username).one()
except NoResultFound:
request.session.flash(f"Unknown username '{username}'", queue="error")
return HTTPSeeOther(
request.route_path(
"admin.organization.detail", organization_id=organization.id
)
)
role_name = request.POST.get("role_name")
if not role_name:
request.session.flash("Provide a role", queue="error")
return HTTPSeeOther(
request.route_path(
"admin.organization.detail", organization_id=organization.id
)
)
# Check if user already has a role in this organization
already_there = (
request.db.query(OrganizationRole)
.filter(
OrganizationRole.user == user, OrganizationRole.organization == organization
)
.count()
)
if already_there > 0:
request.session.flash(
f"User '{user.username}' already has a role in this organization",
queue="error",
)
return HTTPSeeOther(
request.route_path(
"admin.organization.detail", organization_id=organization.id
)
)
# Create the role
organization_role = OrganizationRole(
role_name=OrganizationRoleType(role_name),
user=user,
organization=organization,
)
request.db.add(organization_role)
# Record the event
organization.record_event(
request=request,
tag="admin:organization:role:add",
additional={
"action": f"add {role_name} {user.username}",
"user_id": str(user.id),
"role_name": role_name,
},
)
request.session.flash(
f"Added '{user.username}' as '{role_name}' to '{organization.name}'",
queue="success",
)
return HTTPSeeOther(
request.route_path("admin.organization.detail", organization_id=organization.id)
)
@view_config(
route_name="admin.organization.update_role",
permission=Permissions.AdminRoleUpdate,
request_method="POST",
uses_session=True,
require_csrf=True,
require_methods=False,
)
def update_organization_role(request):
organization_service = request.find_service(IOrganizationService, context=None)
organization_id = request.matchdict["organization_id"]
organization = organization_service.get_organization(organization_id)
if organization is None:
raise HTTPNotFound
role_id = request.matchdict.get("role_id")
role = request.db.get(OrganizationRole, role_id)
if not role:
request.session.flash("This role no longer exists", queue="error")
return HTTPSeeOther(
request.route_path(
"admin.organization.detail", organization_id=organization.id
)
)
new_role_name = request.POST.get("role_name")
if not new_role_name:
request.session.flash("Provide a role", queue="error")
return HTTPSeeOther(
request.route_path(
"admin.organization.detail", organization_id=organization.id
)
)
# Don't update if it's the same role
if role.role_name.value == new_role_name:
request.session.flash("Role is already set to this value", queue="error")
return HTTPSeeOther(
request.route_path(
"admin.organization.detail", organization_id=organization.id
)
)
old_role_name = role.role_name.value
# Update the role
role.role_name = OrganizationRoleType(new_role_name)
request.db.add(role)
request.db.flush() # Ensure the role is updated before recording event
# Record the event
organization.record_event(
request=request,
tag="admin:organization:role:change",
additional={
"action": (
f"change {role.user.username} from {old_role_name} to {new_role_name}"
),
"user_id": str(role.user.id),
"old_role_name": old_role_name,
"new_role_name": new_role_name,
},
)
request.session.flash(
f"Changed '{role.user.username}' from '{old_role_name}' to "
f"'{new_role_name}' in '{organization.name}'",
queue="success",
)
return HTTPSeeOther(
request.route_path("admin.organization.detail", organization_id=organization.id)
)
@view_config(
route_name="admin.organization.delete_role",
permission=Permissions.AdminRoleDelete,
request_method="POST",
uses_session=True,
require_csrf=True,
require_methods=False,
)
def delete_organization_role(request):
organization_service = request.find_service(IOrganizationService, context=None)
organization_id = request.matchdict["organization_id"]
organization = organization_service.get_organization(organization_id)
if organization is None:
raise HTTPNotFound
role_id = request.matchdict.get("role_id")
role = request.db.get(OrganizationRole, role_id)
if not role:
request.session.flash("This role no longer exists", queue="error")
return HTTPSeeOther(
request.route_path(
"admin.organization.detail", organization_id=organization.id
)
)
confirm = request.POST.get("username")
if not confirm or confirm != role.user.username:
request.session.flash("Confirm the request", queue="error")
return HTTPSeeOther(
request.route_path(
"admin.organization.detail", organization_id=organization.id
)
)
# Record the event before deleting
organization.record_event(
request=request,
tag="admin:organization:role:remove",
additional={
"action": f"remove {role.role_name.value} {role.user.username}",
"user_id": str(role.user.id),
"role_name": role.role_name.value,
},
)
request.session.flash(
f"Removed '{role.user.username}' as '{role.role_name.value}' "
f"from '{organization.name}'",
queue="success",
)
request.db.delete(role)
return HTTPSeeOther(
request.route_path("admin.organization.detail", organization_id=organization.id)
)
@view_config(
route_name="admin.organization.add_manual_activation",
permission=Permissions.AdminOrganizationsWrite,
request_method="POST",
uses_session=True,
require_csrf=True,
require_methods=False,
)
def add_manual_activation(request):
organization_service = request.find_service(IOrganizationService, context=None)
organization_id = request.matchdict["organization_id"]
organization = organization_service.get_organization(organization_id)
if organization is None:
raise HTTPNotFound
# Check if organization already has manual activation
existing_activation = (
request.db.query(OrganizationManualActivation)
.filter(OrganizationManualActivation.organization_id == organization.id)
.first()
)
if existing_activation:
request.session.flash(
f"Organization '{organization.name}' already has manual activation",
queue="error",
)
return HTTPSeeOther(
request.route_path(
"admin.organization.detail", organization_id=organization.id
)
)
form = ManualActivationForm(request.POST)
if not form.validate():
for field, errors in form.errors.items():
for error in errors:
request.session.flash(f"{field}: {error}", queue="error")
return HTTPSeeOther(
request.route_path(
"admin.organization.detail", organization_id=organization.id
)
)
# Create manual activation
manual_activation = OrganizationManualActivation(
organization_id=organization.id,
seat_limit=form.seat_limit.data,
expires=form.expires.data,
created_by_id=request.user.id,
)
request.db.add(manual_activation)
# Record the event
organization.record_event(
request=request,
tag="admin:organization:manual_activation:add",
additional={
"seat_limit": form.seat_limit.data,
"expires": form.expires.data.isoformat(),
},
)
request.session.flash(
f"Manual activation added for '{organization.name}' "
f"(seat limit: {form.seat_limit.data}, expires: {form.expires.data})",
queue="success",
)
return HTTPSeeOther(
request.route_path("admin.organization.detail", organization_id=organization.id)
)
@view_config(
route_name="admin.organization.set_upload_limit",
permission=Permissions.AdminOrganizationsSetLimit,
request_method="POST",
uses_session=True,
require_csrf=True,
require_methods=False,
)
def set_upload_limit(request):
organization_id = request.matchdict["organization_id"]
organization = request.db.get(Organization, organization_id)
if organization is None:
raise HTTPNotFound
form = SetUploadLimitForm(request.POST)
if not form.validate():
for field, errors in form.errors.items():
for error in errors:
request.session.flash(f"{field}: {error}", queue="error")
return HTTPSeeOther(
request.route_path(
"admin.organization.detail", organization_id=organization.id
)
)
# Form validation has already converted to bytes or None
organization.upload_limit = form.upload_limit.data
if organization.upload_limit:
limit_msg = f"{organization.upload_limit / ONE_MIB}MiB"
else:
limit_msg = "(default)"
request.session.flash(
f"Upload limit set to {limit_msg}",
queue="success",
)
return HTTPSeeOther(
request.route_path(
"admin.organization.detail",
organization_id=organization.id,
)
)
@view_config(
route_name="admin.organization.update_manual_activation",
permission=Permissions.AdminOrganizationsWrite,
request_method="POST",
uses_session=True,
require_csrf=True,
require_methods=False,
)
def update_manual_activation(request):
organization_service = request.find_service(IOrganizationService, context=None)
organization_id = request.matchdict["organization_id"]
organization = organization_service.get_organization(organization_id)
if organization is None:
raise HTTPNotFound
manual_activation = (
request.db.query(OrganizationManualActivation)
.filter(OrganizationManualActivation.organization_id == organization.id)
.first()
)
if not manual_activation:
request.session.flash(
f"Organization '{organization.name}' has no manual activation to update",
queue="error",
)
return HTTPSeeOther(
request.route_path(
"admin.organization.detail", organization_id=organization.id
)
)
form = ManualActivationForm(request.POST)
if not form.validate():
for field, errors in form.errors.items():
for error in errors:
request.session.flash(f"{field}: {error}", queue="error")
return HTTPSeeOther(
request.route_path(
"admin.organization.detail", organization_id=organization.id
)
)
old_seat_limit = manual_activation.seat_limit
old_expires = manual_activation.expires
# Update manual activation
manual_activation.seat_limit = form.seat_limit.data
manual_activation.expires = form.expires.data
manual_activation.created_by_id = request.user.id
request.db.add(manual_activation)
# Record the event
organization.record_event(
request=request,
tag="admin:organization:manual_activation:update",
additional={
"old_seat_limit": old_seat_limit,
"new_seat_limit": form.seat_limit.data,
"old_expires": old_expires.isoformat(),
"new_expires": form.expires.data.isoformat(),
},
)
request.session.flash(
f"Manual activation updated for '{organization.name}' "
f"(seat limit: {form.seat_limit.data}, expires: {form.expires.data})",
queue="success",
)
return HTTPSeeOther(
request.route_path("admin.organization.detail", organization_id=organization.id)
)
@view_config(
route_name="admin.organization.delete_manual_activation",
permission=Permissions.AdminOrganizationsWrite,
request_method="POST",
uses_session=True,
require_csrf=True,
require_methods=False,
)
def delete_manual_activation(request):
organization_service = request.find_service(IOrganizationService, context=None)
organization_id = request.matchdict["organization_id"]
organization = organization_service.get_organization(organization_id)
if organization is None:
raise HTTPNotFound
manual_activation = (
request.db.query(OrganizationManualActivation)
.filter(OrganizationManualActivation.organization_id == organization.id)
.first()
)
if not manual_activation:
request.session.flash(
f"Organization '{organization.name}' has no manual activation to delete",
queue="error",
)
return HTTPSeeOther(
request.route_path(
"admin.organization.detail", organization_id=organization.id
)
)
confirm = request.POST.get("confirm")
if not confirm or confirm != organization.name:
request.session.flash("Confirm the request", queue="error")
return HTTPSeeOther(
request.route_path(
"admin.organization.detail", organization_id=organization.id
)
)
# Record the event before deleting
organization.record_event(
request=request,
tag="admin:organization:manual_activation:delete",
additional={
"seat_limit": manual_activation.seat_limit,
"expires": manual_activation.expires.isoformat(),
},
)
request.session.flash(
f"Manual activation removed from '{organization.name}'",
queue="success",
)
request.db.delete(manual_activation)
return HTTPSeeOther(
request.route_path("admin.organization.detail", organization_id=organization.id)
)
@view_config(
route_name="admin.organization.set_total_size_limit",
permission=Permissions.AdminOrganizationsSetLimit,
request_method="POST",
uses_session=True,
require_csrf=True,
require_methods=False,
)
def set_total_size_limit(request):
organization_id = request.matchdict["organization_id"]
organization = request.db.get(Organization, organization_id)
if organization is None:
raise HTTPNotFound
form = SetTotalSizeLimitForm(request.POST)
if not form.validate():
for field, errors in form.errors.items():
for error in errors:
request.session.flash(f"{field}: {error}", queue="error")
return HTTPSeeOther(
request.route_path(
"admin.organization.detail", organization_id=organization.id
)
)
# Form validation has already converted to bytes or None
organization.total_size_limit = form.total_size_limit.data
if organization.total_size_limit:
limit_msg = f"{organization.total_size_limit / ONE_GIB}GiB"
else:
limit_msg = "(default)"
request.session.flash(
f"Total size limit set to {limit_msg}",
queue="success",
)
return HTTPSeeOther(
request.route_path(
"admin.organization.detail",
organization_id=organization.id,
)
)
| OrganizationApplicationForm |
python | vyperlang__vyper | vyper/ast/nodes.py | {
"start": 35165,
"end": 35252
} | class ____(Operator):
__slots__ = ()
_description = "logical or"
_op = any
| Or |
python | numba__numba | numba/core/typing/builtins.py | {
"start": 17766,
"end": 18372
} | class ____(AbstractTemplate):
key = "static_getitem"
def generic(self, args, kws):
tup, idx = args
ret = None
if not isinstance(tup, types.BaseTuple):
return
if isinstance(idx, int):
try:
ret = tup.types[idx]
except IndexError:
raise errors.NumbaIndexError("tuple index out of range")
elif isinstance(idx, slice):
ret = types.BaseTuple.from_types(tup.types[idx])
if ret is not None:
sig = signature(ret, *args)
return sig
@infer
| StaticGetItemTuple |
python | ray-project__ray | rllib/evaluation/tests/test_rollout_worker.py | {
"start": 2055,
"end": 2403
} | class ____(RandomPolicy):
@override(RandomPolicy)
def compute_actions(
self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None,
explore=None,
timestep=None,
**kwargs
):
raise Exception("intentional error")
| BadPolicy |
python | spyder-ide__spyder | spyder/plugins/explorer/widgets/utils.py | {
"start": 2765,
"end": 3481
} | class ____(QFileIconProvider):
"""Project tree widget icon provider"""
@Slot(int)
@Slot(QFileInfo)
def icon(self, icontype_or_qfileinfo):
"""Reimplement Qt method"""
if isinstance(icontype_or_qfileinfo, QFileIconProvider.IconType):
return super().icon(icontype_or_qfileinfo)
else:
qfileinfo = icontype_or_qfileinfo
fname = osp.normpath(str(qfileinfo.absoluteFilePath()))
if osp.isfile(fname) or osp.isdir(fname):
icon = ima.get_icon_by_extension_or_type(
fname, scale_factor=1.0
)
else:
icon = ima.icon('binary')
return icon
| IconProvider |
python | pallets__flask | src/flask/json/provider.py | {
"start": 318,
"end": 3966
} | class ____:
"""A standard set of JSON operations for an application. Subclasses
of this can be used to customize JSON behavior or use different
JSON libraries.
To implement a provider for a specific library, subclass this base
class and implement at least :meth:`dumps` and :meth:`loads`. All
other methods have default implementations.
To use a different provider, either subclass ``Flask`` and set
:attr:`~flask.Flask.json_provider_class` to a provider class, or set
:attr:`app.json <flask.Flask.json>` to an instance of the class.
:param app: An application instance. This will be stored as a
:class:`weakref.proxy` on the :attr:`_app` attribute.
.. versionadded:: 2.2
"""
def __init__(self, app: App) -> None:
self._app: App = weakref.proxy(app)
def dumps(self, obj: t.Any, **kwargs: t.Any) -> str:
"""Serialize data as JSON.
:param obj: The data to serialize.
:param kwargs: May be passed to the underlying JSON library.
"""
raise NotImplementedError
def dump(self, obj: t.Any, fp: t.IO[str], **kwargs: t.Any) -> None:
"""Serialize data as JSON and write to a file.
:param obj: The data to serialize.
:param fp: A file opened for writing text. Should use the UTF-8
encoding to be valid JSON.
:param kwargs: May be passed to the underlying JSON library.
"""
fp.write(self.dumps(obj, **kwargs))
def loads(self, s: str | bytes, **kwargs: t.Any) -> t.Any:
"""Deserialize data as JSON.
:param s: Text or UTF-8 bytes.
:param kwargs: May be passed to the underlying JSON library.
"""
raise NotImplementedError
def load(self, fp: t.IO[t.AnyStr], **kwargs: t.Any) -> t.Any:
"""Deserialize data as JSON read from a file.
:param fp: A file opened for reading text or UTF-8 bytes.
:param kwargs: May be passed to the underlying JSON library.
"""
return self.loads(fp.read(), **kwargs)
def _prepare_response_obj(
self, args: tuple[t.Any, ...], kwargs: dict[str, t.Any]
) -> t.Any:
if args and kwargs:
raise TypeError("app.json.response() takes either args or kwargs, not both")
if not args and not kwargs:
return None
if len(args) == 1:
return args[0]
return args or kwargs
def response(self, *args: t.Any, **kwargs: t.Any) -> Response:
"""Serialize the given arguments as JSON, and return a
:class:`~flask.Response` object with the ``application/json``
mimetype.
The :func:`~flask.json.jsonify` function calls this method for
the current application.
Either positional or keyword arguments can be given, not both.
If no arguments are given, ``None`` is serialized.
:param args: A single value to serialize, or multiple values to
treat as a list to serialize.
:param kwargs: Treat as a dict to serialize.
"""
obj = self._prepare_response_obj(args, kwargs)
return self._app.response_class(self.dumps(obj), mimetype="application/json")
def _default(o: t.Any) -> t.Any:
if isinstance(o, date):
return http_date(o)
if isinstance(o, (decimal.Decimal, uuid.UUID)):
return str(o)
if dataclasses and dataclasses.is_dataclass(o):
return dataclasses.asdict(o) # type: ignore[arg-type]
if hasattr(o, "__html__"):
return str(o.__html__())
raise TypeError(f"Object of type {type(o).__name__} is not JSON serializable")
| JSONProvider |
python | RaRe-Technologies__gensim | gensim/test/test_bm25model.py | {
"start": 271,
"end": 551
} | class ____(BM25ABC):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def precompute_idfs(self, dfs, num_docs):
return dict()
def get_term_weights(self, num_tokens, term_frequencies, idfs):
return term_frequencies
| BM25Stub |
python | astropy__astropy | astropy/units/quantity.py | {
"start": 4393,
"end": 5456
} | class ____(ParentDtypeInfo):
# This is on a base class rather than QuantityInfo directly, so that
# it can be used for EarthLocationInfo yet make clear that that class
# should not be considered a typical Quantity subclass by Table.
attrs_from_parent = {"dtype", "unit"} # dtype and unit taken from parent
_supports_indexing = True
@staticmethod
def default_format(val):
return f"{val.value}"
@staticmethod
def possible_string_format_functions(format_):
"""Iterate through possible string-derived format functions.
A string can either be a format specifier for the format built-in,
a new-style format string, or an old-style format string.
This method is overridden in order to suppress printing the unit
in each row since it is already at the top in the column header.
"""
yield lambda format_, val: format(val.value, format_)
yield lambda format_, val: format_.format(val.value)
yield lambda format_, val: format_ % val.value
| QuantityInfoBase |
python | django__django | django/db/models/functions/datetime.py | {
"start": 404,
"end": 1010
} | class ____:
tzinfo = None
def get_tzname(self):
# Timezone conversions must happen to the input datetime *before*
# applying a function. 2015-12-31 23:00:00 -02:00 is stored in the
# database as 2016-01-01 01:00:00 +00:00. Any results should be
# based on the input datetime not the stored datetime.
tzname = None
if settings.USE_TZ:
if self.tzinfo is None:
tzname = timezone.get_current_timezone_name()
else:
tzname = timezone._get_timezone_name(self.tzinfo)
return tzname
| TimezoneMixin |
python | pennersr__django-allauth | allauth/core/internal/ratelimit.py | {
"start": 1207,
"end": 1536
} | class ____:
cache_key: str
cache_duration: Union[float, int]
timestamp: float
def rollback(self) -> None:
history = cache.get(self.cache_key, [])
history = [ts for ts in history if ts != self.timestamp]
cache.set(self.cache_key, history, self.cache_duration)
@dataclass
| SingleRateLimitUsage |
python | doocs__leetcode | solution/2500-2599/2588.Count the Number of Beautiful Subarrays/Solution.py | {
"start": 0,
"end": 247
} | class ____:
def beautifulSubarrays(self, nums: List[int]) -> int:
cnt = Counter({0: 1})
ans = mask = 0
for x in nums:
mask ^= x
ans += cnt[mask]
cnt[mask] += 1
return ans
| Solution |
python | ray-project__ray | rllib/utils/metrics/metrics_logger.py | {
"start": 566,
"end": 65156
} | class ____:
"""A generic class collecting and processing metrics in RL training and evaluation.
This class represents the main API used by all of RLlib's components (internal and
user facing) in order to log, collect, and process (reduce) stats during training
and evaluation/inference.
It supports:
- Logging of simple float/int values (for example a loss) over time or from
parallel runs (n Learner workers, each one reporting a loss from their respective
data shard).
- Logging of images, videos, or other more complex data structures over time.
- Reducing these collected values using a user specified reduction method (for
example "min" or "mean") and other settings controlling the reduction and internal
data, such as sliding windows or EMA coefficients.
- Optionally clearing all logged values after a `reduce()` call to make space for
new data.
- Tracking throughputs of logged values.
.. testcode::
import time
from ray.rllib.utils.metrics.metrics_logger import MetricsLogger
from ray.rllib.utils.test_utils import check
logger = MetricsLogger(root=True)
# 1) Logging float values (mean over window):
# Log some loss under the "loss" key. By default, all logged values
# under that key are averaged and reported back, once `reduce()` is called.
logger.log_value("loss", 0.001, reduce="mean", window=10)
logger.log_value("loss", 0.002, reduce="mean", window=10)
# Peek at the current (reduced) value of "loss":
check(logger.peek("loss"), 0.0015) # <- expect average value
# Actually reduce the underlying Stats object(s).
results = logger.reduce()
check(results["loss"], 0.0015)
# 2) Logging float values (minimum over window):
# Log the minimum of loss values under the "min_loss" key.
logger.log_value("min_loss", 0.1, reduce="min", window=2)
logger.log_value("min_loss", 0.01, reduce="min", window=2)
logger.log_value("min_loss", 0.1, reduce="min", window=2)
logger.log_value("min_loss", 0.02, reduce="min", window=2)
# Peek at the current (reduced) value of "min_loss":
check(logger.peek("min_loss"), 0.02) # <- expect min value (over window=2)
# Actually reduce the underlying Stats object(s).
results = logger.reduce()
check(results["min_loss"], 0.02)
# 3) Log n counts in different (remote?) components and merge them on the
# controller side.
remote_logger_1 = MetricsLogger()
remote_logger_2 = MetricsLogger()
main_logger = MetricsLogger()
remote_logger_1.log_value("count", 2, reduce="sum", clear_on_reduce=True)
remote_logger_2.log_value("count", 3, reduce="sum", clear_on_reduce=True)
# Reduce the two remote loggers ..
remote_results_1 = remote_logger_1.reduce()
remote_results_2 = remote_logger_2.reduce()
# .. then merge the two results into the controller logger.
main_logger.aggregate([remote_results_1, remote_results_2])
check(main_logger.peek("count"), 5)
# 4) Time blocks of code using EMA (coeff=0.1). Note that the higher the coeff
# (the closer to 1.0), the more short term the EMA turns out.
logger = MetricsLogger()
# First delta measurement:
with logger.log_time("my_block_to_be_timed", reduce="mean", ema_coeff=0.1):
time.sleep(1.0)
# EMA should be ~1sec.
assert 1.1 > logger.peek("my_block_to_be_timed") > 0.9
# Second delta measurement (note that we don't have to repeat the args again, as
# the stats under that name have already been created above with the correct
# args).
with logger.log_time("my_block_to_be_timed"):
time.sleep(2.0)
# EMA should be ~1.1sec.
assert 1.15 > logger.peek("my_block_to_be_timed") > 1.05
# When calling `reduce()`, the internal values list gets cleaned up (reduced)
# and reduction results are returned.
results = logger.reduce()
# EMA should be ~1.1sec.
assert 1.15 > results["my_block_to_be_timed"] > 1.05
# 5) Keeping track of throughputs and compiling all metrics
logger = MetricsLogger()
logger.log_value("samples", 1.0, reduce="sum", with_throughput=True, throughput_ema_coeff=1.0)
time.sleep(1.0)
logger.log_value("samples", 2.0, reduce="sum", with_throughput=True, throughput_ema_coeff=1.0)
results = logger.compile()
check(results["samples"], 3.0)
# Since we have an ema_coeff of 1.0, the throughput should be the same as the last value we logged (after 1 second)
check(results["samples_throughput"], 2.0, rtol=0.1)
"""
def __init__(self, root=False):
"""Initializes a MetricsLogger instance.
Args:
root: Whether this logger is a root logger. If True, lifetime stats
(clear_on_reduce=False and reduction="sum") will not be cleared on reduce().
"""
self.stats = {}
self._tensor_mode = False
# TODO (sven): We use a dummy RLock here for most RLlib algos, however, APPO
# and IMPALA require this to be an actual RLock (b/c of thread safety reasons).
# An actual RLock, however, breaks our current OfflineData and
# OfflinePreLearner logic, in which the Learner (which contains a
# MetricsLogger) is serialized and deserialized. We will have to fix this
# offline RL logic first, then can remove this hack here and return to always
# using the RLock.
self._threading_lock = _DummyRLock()
# Is this a root logger?
self._is_root_logger = root
def __contains__(self, key: Union[str, Tuple[str, ...]]) -> bool:
"""Returns True, if `key` can be found in self.stats.
Args:
key: The key to find in self.stats. This must be either a str (single,
top-level key) or a tuple of str (nested key).
Returns:
Whether `key` could be found in self.stats.
"""
return self._key_in_stats(key)
def peek(
self,
key: Union[str, Tuple[str, ...], None] = None,
default=None,
compile: bool = True,
throughput: bool = False,
) -> Any:
"""Returns the reduced values found in this MetricsLogger.
Note that calling this method does NOT cause an actual underlying value list
reduction, even though reduced values are being returned. It'll keep all
internal structures as-is. By default, this returns a single reduced value or, if
the Stats object has no reduce method, a list of values. When when compile is False,
the result is a list of one or more values.
Args:
key: The key/key sequence of the sub-structure of `self`, whose (reduced)
values to return.
default: An optional default value in case `key` cannot be found in `self`.
If default is not provided and `key` cannot be found, throws a KeyError.
compile: If True, the result is compiled into a single value if possible.
throughput: If True, the throughput is returned instead of the
actual (reduced) value.
.. testcode::
from ray.rllib.utils.metrics.metrics_logger import MetricsLogger
from ray.rllib.utils.test_utils import check
logger = MetricsLogger()
ema = 0.01
# Log some (EMA reduced) values.
key = ("some", "nested", "key", "sequence")
logger.log_value(key, 2.0, ema_coeff=ema)
logger.log_value(key, 3.0)
# Expected reduced value:
expected_reduced = (1.0 - ema) * 2.0 + ema * 3.0
# Peek at the (reduced) value under `key`.
check(logger.peek(key), expected_reduced)
# Peek at the (reduced) nested struct under ("some", "nested").
check(
logger.peek(("some", "nested")),
{"key": {"sequence": expected_reduced}},
)
# Log some more, check again.
logger.log_value(key, 4.0)
expected_reduced = (1.0 - ema) * expected_reduced + ema * 4.0
check(logger.peek(key=key), expected_reduced)
Returns:
The (reduced) values of the (possibly nested) sub-structure found under
the given key or key sequence.
"""
if throughput:
return self._get_throughputs(key=key, default=default)
# Create a reduced view of the entire stats structure.
def _nested_peek(stats):
return tree.map_structure(
# If the Stats object has a reduce method, we need to convert the list to a single value
lambda s: (
s.peek(compile=compile)
if s._reduce_method is not None
else s.peek(compile=compile)[0]
)
if isinstance(s, Stats)
else s,
stats.copy(),
)
with self._threading_lock:
if key is None:
return _nested_peek(self.stats)
else:
if default is None:
stats = self._get_key(key, key_error=True)
else:
stats = self._get_key(key, key_error=False)
if isinstance(stats, Stats):
# If the Stats object has a reduce method, we need to convert the list to a single value
return stats.peek(compile=compile)
elif isinstance(stats, dict) and stats:
return _nested_peek(stats)
else:
return default
@staticmethod
def peek_results(results: Any, compile: bool = True) -> Any:
"""Performs `peek()` on any leaf element of an arbitrarily nested Stats struct.
Args:
results: The nested structure of Stats-leafs to be peek'd and returned.
compile: If True, the result is compiled into a single value if possible.
Returns:
A corresponding structure of the peek'd `results` (reduced float/int values;
no Stats objects).
"""
return tree.map_structure(
lambda s: s.peek(compile=compile) if isinstance(s, Stats) else s, results
)
def log_value(
self,
key: Union[str, Tuple[str, ...]],
value: Any,
*,
reduce: Optional[str] = "mean",
window: Optional[Union[int, float]] = None,
ema_coeff: Optional[float] = None,
percentiles: Union[List[int], bool] = False,
clear_on_reduce: bool = False,
with_throughput: bool = False,
throughput_ema_coeff: Optional[float] = None,
reduce_per_index_on_aggregate: bool = False,
) -> None:
"""Logs a new value under a (possibly nested) key to the logger.
.. testcode::
from ray.rllib.utils.metrics.metrics_logger import MetricsLogger
from ray.rllib.utils.test_utils import check
logger = MetricsLogger(root=True)
# Log n simple float values under the "loss" key. By default, all logged
# values under that key are averaged, once `reduce()` is called.
logger.log_value("loss", 0.01, window=10)
logger.log_value("loss", 0.02, window=10)
logger.log_value("loss", 0.03, window=10)
# Peek at the current (reduced) value.
# Note that in the underlying structure, the internal values list still
# contains all logged values (0.01, 0.02, and 0.03).
check(logger.peek("loss"), 0.02)
# Log 10x (window size) the same value.
for _ in range(10):
logger.log_value("loss", 0.05, window=10)
check(logger.peek("loss"), 0.05)
# Internals check (note that users should not be concerned with accessing
# these). Len should always be 10, since the underlying struct is a
# `deque(max_len=10)`.
check(len(logger.stats["loss"].values), 10)
# Only, when we call `reduce` does the underlying structure get "cleaned
# up". In this case, the list is shortened to 10 items (window size).
results = logger.reduce()
check(results, {"loss": 0.05})
check(len(logger.stats["loss"].values), 10)
# Log a value under a deeper nested key.
logger.log_value(("some", "nested", "key"), -1.0)
check(logger.peek(("some", "nested", "key")), -1.0)
# Log n values without reducing them (we want to just collect some items).
logger.log_value("some_items", 5.0, reduce=None)
logger.log_value("some_items", 6.0, reduce=None)
logger.log_value("some_items", 7.0, reduce=None)
# Peeking at these returns the full list of items (no reduction set up).
check(logger.peek("some_items"), [5.0, 6.0, 7.0])
# If you don't want the internal list to grow indefinitely, you should set
# `clear_on_reduce=True`:
logger.log_value("some_more_items", -5.0, reduce=None, clear_on_reduce=True)
logger.log_value("some_more_items", -6.0, reduce=None, clear_on_reduce=True)
logger.log_value("some_more_items", -7.0, reduce=None, clear_on_reduce=True )
# Peeking at these returns the full list of items (no reduction set up).
check(logger.peek("some_more_items"), [-5.0, -6.0, -7.0])
# Reducing everything (and return plain values, not `Stats` objects).
results = logger.reduce()
check(results, {
"loss": 0.05,
"some": {
"nested": {
"key": -1.0,
},
},
"some_items": [5.0, 6.0, 7.0], # reduce=None; list as-is
"some_more_items": [-5.0, -6.0, -7.0], # reduce=None; list as-is
})
# However, the `reduce()` call did empty the `some_more_items` list
# (b/c we set `clear_on_reduce=True`).
check(logger.peek("some_more_items"), [])
# ... but not the "some_items" list (b/c `clear_on_reduce=False`).
check(logger.peek("some_items"), [])
Args:
key: The key (or nested key-tuple) to log the `value` under.
value: The value to log. This should be a numeric value.
reduce: The reduction method to apply, once `self.reduce()` is called.
If None, will collect all logged values under `key` in a list (and
also return that list upon calling `self.reduce()`).
window: An optional window size to reduce over.
If not None, then the reduction operation is only applied to the most
recent `window` items, and - after reduction - the internal values list
under `key` is shortened to hold at most `window` items (the most
recent ones).
Must be None if `ema_coeff` is provided.
If None (and `ema_coeff` is None), reduction must not be "mean".
ema_coeff: An optional EMA coefficient to use if `reduce` is "mean"
and no `window` is provided. Note that if both `window` and `ema_coeff`
are provided, an error is thrown. Also, if `ema_coeff` is provided,
`reduce` must be "mean".
The reduction formula for EMA is:
EMA(t1) = (1.0 - ema_coeff) * EMA(t0) + ema_coeff * new_value
percentiles: If reduce is `None`, we can compute the percentiles of the
values list given by `percentiles`. Defaults to [0, 0.5, 0.75, 0.9, 0.95,
0.99, 1] if set to True. When using percentiles, a window must be provided.
This window should be chosen carefully. RLlib computes exact percentiles and
the computational complexity is O(m*n*log(n/m)) where n is the window size
and m is the number of parallel metrics loggers involved (for example,
m EnvRunners).
clear_on_reduce: If True, all values under `key` will be emptied after
`self.reduce()` is called. Setting this to True is useful for cases,
in which the internal values list would otherwise grow indefinitely,
for example if reduce is None and there is no `window` provided.
with_throughput: Whether to track a throughput estimate together with this
metric. This is only supported for `reduce=sum` and
`clear_on_reduce=False` metrics (aka. "lifetime counts"). The `Stats`
object under the logged key then keeps track of the time passed
between two consecutive calls to `reduce()` and update its throughput
estimate. The current throughput estimate of a key can be obtained
through: <MetricsLogger>.peek(key, throughput=True).
throughput_ema_coeff: The EMA coefficient to use for throughput tracking.
Only used if with_throughput=True. Defaults to 0.05 if with_throughput is True.
reduce_per_index_on_aggregate: If True, when merging Stats objects in parallel, we reduce
incoming values per index such that the new value at index `n` will be
the reduced value of all incoming values at index `n`.
If False, when reducing `n` Stats, the first `n` merged values will be
the reduced value of all incoming values at index `0`, the next `n` merged
values will be the reduced values of all incoming values at index `1`, etc.
"""
# No reduction (continue appending to list) AND no window.
# -> We'll force-reset our values upon `reduce()`.
if reduce is None and (window is None or window == float("inf")):
clear_on_reduce = True
# Set default ema_coeff to 0.01 if reduce is "mean" and no window is provided
if reduce == "mean" and window is None and ema_coeff is None:
ema_coeff = 0.01
if with_throughput and throughput_ema_coeff is None:
throughput_ema_coeff = 0.05
value = self._detach_tensor_if_necessary(value)
with self._threading_lock:
# `key` doesn't exist -> Automatically create it.
if not self._key_in_stats(key):
self._set_key(
key,
(
Stats(
value,
reduce=reduce,
percentiles=percentiles,
window=window,
ema_coeff=ema_coeff,
clear_on_reduce=clear_on_reduce,
throughput=with_throughput,
throughput_ema_coeff=throughput_ema_coeff,
reduce_per_index_on_aggregate=reduce_per_index_on_aggregate,
)
),
)
else:
stats = self._get_key(key)
if reduce != stats._reduce_method and log_once(f"reduce_warning_{key}"):
logger.warning(
f"reduce should be the same for all logged values under the same key, "
f"but got argument reduce={reduce} while the existing Stats object {key} "
f"has reduce={stats._reduce_method}."
)
if clear_on_reduce != stats._clear_on_reduce and log_once(
f"clear_on_reduce_warning_{key}"
):
logger.warning(
f"clear_on_reduce should be the same for all logged values under the same key, "
f"but got argument clear_on_reduce={clear_on_reduce} while the existing Stats object {key} "
f"has clear_on_reduce={stats._clear_on_reduce}."
)
if with_throughput != bool(stats.has_throughput) and log_once(
f"with_throughput_warning_{key}"
):
logger.warning(
f"with_throughput should be the same for all logged values under the same key, "
f"but got argument with_throughput={with_throughput} while the existing Stats object {key} "
f"has has_throughput={stats.has_throughput}. This warning will always be shown if you are using an older checkpoint."
)
if throughput_ema_coeff != stats._throughput_ema_coeff and log_once(
f"throughput_ema_coeff_warning_{key}"
):
logger.warning(
f"throughput_ema_coeff should be the same for all logged values under the same key, "
f"but got argument throughput_ema_coeff={throughput_ema_coeff} while the existing Stats object {key} "
f"has throughput_ema_coeff={stats._throughput_ema_coeff}. This warning will always be shown if you are using an older checkpoint."
)
if window != stats._window and log_once(f"window_warning_{key}"):
logger.warning(
f"window should be the same for all logged values under the same key, "
f"but got argument window={window} while the existing Stats object {key} "
f"has window={stats._window}."
)
if percentiles != getattr(stats, "_percentiles", False) and log_once(
f"percentiles_warning_{key}"
):
logger.warning(
"percentiles should be the same for all logged values under the same key, "
f"but got argument percentiles={percentiles} while the existing Stats object {key} "
f"has percentiles={getattr(stats, '_percentiles', False)}."
)
if (
reduce_per_index_on_aggregate
!= stats._reduce_per_index_on_aggregate
and log_once(f"reduce_per_index_on_aggregate_warning_{key}")
):
logger.warning(
f"reduce_per_index_on_aggregate should be the same for all logged values under the same key, "
f"but got argument reduce_per_index_on_aggregate={reduce_per_index_on_aggregate} while the existing Stats object {key} "
f"has reduce_per_index_on_aggregate={stats._reduce_per_index_on_aggregate}."
)
stats.push(value)
def log_dict(
self,
value_dict,
*,
key: Optional[Union[str, Tuple[str, ...]]] = None,
reduce: Optional[str] = "mean",
window: Optional[Union[int, float]] = None,
ema_coeff: Optional[float] = None,
percentiles: Union[List[int], bool] = False,
clear_on_reduce: bool = False,
with_throughput: bool = False,
throughput_ema_coeff: Optional[float] = None,
reduce_per_index_on_aggregate: bool = False,
) -> None:
"""Logs all leafs of a possibly nested dict of values to this logger.
To aggregate logs from upstream components, use `aggregate`.
This is a convinience function that is equivalent to:
```
tree.map_structure_with_path(lambda path, value: logger.log_value(path, value, ...), value_dict)
```
Traverses through all leafs of `stats_dict` and - if a path cannot be found in
this logger yet, will add the `Stats` found at the leaf under that new key.
If a path already exists, will merge the found leaf (`Stats`) with the ones
already logged before. This way, `stats_dict` does NOT have to have
the same structure as what has already been logged to `self`, but can be used to
log values under new keys or nested key paths.
.. testcode::
from ray.rllib.utils.metrics.metrics_logger import MetricsLogger
from ray.rllib.utils.test_utils import check
logger = MetricsLogger()
# Log n dicts with keys "a" and (some) "b". By default, all logged values
# under that key are averaged, once `reduce()` is called.
logger.log_dict(
{
"a": 0.1,
"b": -0.1,
},
window=10,
)
logger.log_dict({
"b": -0.2,
}) # don't have to repeat `window` arg if key already exists
logger.log_dict({
"a": 0.2,
"c": {"d": 5.0}, # can also introduce an entirely new (nested) key
})
# Peek at the current (reduced) values under "a" and "b".
check(logger.peek("a"), 0.15)
check(logger.peek("b"), -0.15)
check(logger.peek(("c", "d")), 5.0)
# Reduced all stats.
results = logger.reduce()
check(results, {
"a": 0.15,
"b": -0.15,
"c": {"d": 5.0},
})
Args:
value_dict: The (possibly nested) dict with individual values as
leafs to be logged to this logger.
key: An additional key (or tuple of keys) to prepend to all the keys
(or tuples of keys in case of nesting) found inside `stats_dict`.
Useful to log the entire contents of `stats_dict` in a more organized
fashion under one new key, for example logging the results returned by
an EnvRunner under key
reduce: The reduction method to apply, once `self.reduce()` is called.
If None, will collect all logged values under `key` in a list (and
also return that list upon calling `self.reduce()`).
window: An optional window size to reduce over.
If not None, then the reduction operation is only applied to the most
recent `window` items, and - after reduction - the internal values list
under `key` is shortened to hold at most `window` items (the most
recent ones).
Must be None if `ema_coeff` is provided.
If None (and `ema_coeff` is None), reduction must not be "mean".
ema_coeff: An optional EMA coefficient to use if `reduce` is "mean"
and no `window` is provided. Note that if both `window` and `ema_coeff`
are provided, an error is thrown. Also, if `ema_coeff` is provided,
`reduce` must be "mean".
The reduction formula for EMA is:
EMA(t1) = (1.0 - ema_coeff) * EMA(t0) + ema_coeff * new_value
percentiles: If reduce is `None`, we can compute the percentiles of the
values list given by `percentiles`. Defaults to [0, 0.5, 0.75, 0.9, 0.95,
0.99, 1] if set to True. When using percentiles, a window must be provided.
This window should be chosen carefully. RLlib computes exact percentiles and
the computational complexity is O(m*n*log(n/m)) where n is the window size
and m is the number of parallel metrics loggers involved (for example,
m EnvRunners).
clear_on_reduce: If True, all values under `key` will be emptied after
`self.reduce()` is called. Setting this to True is useful for cases,
in which the internal values list would otherwise grow indefinitely,
for example if reduce is None and there is no `window` provided.
with_throughput: Whether to track a throughput estimate together with this
metric. This is only supported for `reduce=sum` and
`clear_on_reduce=False` metrics (aka. "lifetime counts"). The `Stats`
object under the logged key then keeps track of the time passed
between two consecutive calls to `reduce()` and update its throughput
estimate. The current throughput estimate of a key can be obtained
through: <MetricsLogger>.peek(key, throughput=True).
throughput_ema_coeff: The EMA coefficient to use for throughput tracking.
Only used if with_throughput=True. Defaults to 0.05 if with_throughput is True.
reduce_per_index_on_aggregate: If True, when merging Stats objects, we reduce
incoming values per index such that the new value at index `n` will be
the reduced value of all incoming values at index `n`.
If False, when reducing `n` Stats, the first `n` merged values will be
the reduced value of all incoming values at index `0`, the next `n` merged
values will be the reduced values of all incoming values at index `1`, etc.
"""
assert isinstance(
value_dict, dict
), f"`stats_dict` ({value_dict}) must be dict!"
prefix_key = force_tuple(key)
def _map(path, stat_or_value):
extended_key = prefix_key + force_tuple(tree.flatten(path))
if isinstance(stat_or_value, Stats):
deprecation_warning(
old="MetricsLogger.log_dict() for Stats objects",
new="MetricsLogger.aggregate()",
error=True,
)
self.log_value(
extended_key,
value=stat_or_value,
reduce=reduce,
window=window,
ema_coeff=ema_coeff,
percentiles=percentiles,
clear_on_reduce=clear_on_reduce,
with_throughput=with_throughput,
throughput_ema_coeff=throughput_ema_coeff,
reduce_per_index_on_aggregate=reduce_per_index_on_aggregate,
)
with self._threading_lock:
tree.map_structure_with_path(_map, value_dict)
@Deprecated(new="aggregate", error=False)
def merge_and_log_n_dicts(self, *args, **kwargs):
return self.aggregate(*args, **kwargs)
def aggregate(
self,
stats_dicts: List[Dict[str, Any]],
*,
key: Optional[Union[str, Tuple[str, ...]]] = None,
) -> None:
"""Merges n stats_dicts and logs result by merging on the time axis with existing stats.
The n stats_dicts should be generated by n parallel components such that merging their
respective stats in parallel is meaningful. This lets us aggregate stats in a tree structure of
MetricsLoggers.
If you want to log a dictionary of values (not Stats objects), use `log_dict`.
.. testcode::
from ray.rllib.utils.metrics.metrics_logger import MetricsLogger
from ray.rllib.utils.test_utils import check
# Example: n Learners logging loss stats to be merged.
# Note that losses should usually be logged with a window=1 so they don't
# get smeared over time and instead provide an accurate picture of the
# current situation.
main_logger = MetricsLogger()
logger_learner1 = MetricsLogger()
logger_learner1.log_value("loss", 0.1, window=1)
learner1_results = logger_learner1.reduce()
logger_learner2 = MetricsLogger()
logger_learner2.log_value("loss", 0.2, window=1)
learner2_results = logger_learner2.reduce()
# Merge the stats from both Learners.
main_logger.aggregate(
[learner1_results, learner2_results],
key="learners",
)
check(main_logger.peek(("learners", "loss")), 0.15)
# Example: m EnvRunners logging episode returns to be merged.
main_logger = MetricsLogger()
logger_env_runner1 = MetricsLogger()
logger_env_runner1.log_value("mean_ret", 100.0, window=3)
logger_env_runner1.log_value("mean_ret", 200.0, window=3)
logger_env_runner1.log_value("mean_ret", 300.0, window=3)
logger_env_runner1.log_value("mean_ret", 400.0, window=3)
env_runner1_results = logger_env_runner1.reduce()
logger_env_runner2 = MetricsLogger()
logger_env_runner2.log_value("mean_ret", 150.0, window=3)
logger_env_runner2.log_value("mean_ret", 250.0, window=3)
logger_env_runner2.log_value("mean_ret", 350.0, window=3)
logger_env_runner2.log_value("mean_ret", 450.0, window=3)
env_runner2_results = logger_env_runner2.reduce()
# Merge the stats from both EnvRunners.
main_logger.aggregate(
[env_runner1_results, env_runner2_results],
key="env_runners",
)
# The expected procedure is as follows:
# The individual internal values lists of the two loggers are as follows:
# env runner 1: [200, 300, 400]
# env runner 2: [250, 350, 450]
# Move backwards from index=-1 (each time, loop through both env runners)
# index=-1 -> [400, 450] -> mean -> [425] -> repeat 2 times (number
# of env runners) -> [425, 425]
# index=-2 -> [300, 350] -> mean -> [325] -> repeat 2 times
# -> append -> [425, 425, 325, 325] -> STOP b/c we have reached >= window.
# reverse the list -> [325, 325, 425, 425]
# deque(max_len=3) -> [325, 425, 425]
check(
main_logger.stats["env_runners"]["mean_ret"].values,
[325, 425, 425],
)
check(main_logger.peek(("env_runners", "mean_ret")), (325 + 425 + 425) / 3)
# Example: Lifetime sum over n parallel components' stats.
main_logger = MetricsLogger()
logger1 = MetricsLogger()
logger1.log_value("some_stat", 50, reduce="sum", window=None)
logger1.log_value("some_stat", 25, reduce="sum", window=None)
logger1_results = logger1.reduce()
logger2 = MetricsLogger()
logger2.log_value("some_stat", 75, reduce="sum", window=None)
logger2_results = logger2.reduce()
# Merge the stats from both Learners.
main_logger.aggregate([logger1_results, logger2_results])
check(main_logger.peek("some_stat"), 150)
# Example: Sum over n parallel components' stats with a window of 3.
main_logger = MetricsLogger()
logger1 = MetricsLogger()
logger1.log_value("some_stat", 50, reduce="sum", window=3)
logger1.log_value("some_stat", 25, reduce="sum", window=3)
logger1.log_value("some_stat", 10, reduce="sum", window=3)
logger1.log_value("some_stat", 5, reduce="sum", window=3)
logger1_results = logger1.reduce()
logger2 = MetricsLogger()
logger2.log_value("some_stat", 75, reduce="sum", window=3)
logger2.log_value("some_stat", 100, reduce="sum", window=3)
logger2_results = logger2.reduce()
# Merge the stats from both Learners.
main_logger.aggregate([logger1_results, logger2_results])
# The expected procedure is as follows:
# The individual internal values lists of the two loggers are as follows:
# env runner 1: [50, 25, 10, 5]
# env runner 2: [75, 100]
# Move backwards from index=-1 (each time, loop through both loggers)
# index=-1 -> [5, 100] -> reduce over both two indices -> [(5 + 100) / 2, (5 + 100) / 2] = [52.5, 52.5]
# Result = [52.5, 52.5]
# len() = 2 < window = 3
# index=-2 -> [10, 75] -> reduce over both two indices -> [(10 + 75) / 2, (10 + 75) / 2] = [42.5, 42.5]
# result = [42.5, 42.5, 52.5, 52.5]
# len() = 4 >= window = 3
check(main_logger.peek("some_stat"), 147.5) # last 3 items (window) get sum'd
Args:
stats_dicts: List of n stats dicts to be merged and then logged.
key: Optional top-level key under which to log all keys/key sequences
found in the n `stats_dicts`.
"""
all_keys = set()
def traverse_and_add_paths(d, path=()):
if isinstance(d, dict):
new_dict = {}
for key, value in d.items():
new_dict[key] = traverse_and_add_paths(value, path + (key,))
return new_dict
elif isinstance(d, list):
all_keys.add(path)
if len(d) == 1:
return d[0]
return d
else:
# For lists and values, we add the path to the set of all keys
all_keys.add(path)
return d
def build_nested_dict(stats_dict, key):
if isinstance(key, str):
return {key: stats_dict}
elif len(key) > 1:
# Key is tuple of keys so we build a nested dict recursively
return {key[0]: build_nested_dict(stats_dict, key[1:])}
else:
return {key[0]: stats_dict}
# We do one pass over all the stats_dicts_or_loggers to 1. prepend the key if provided and 2. collect all the keys that lead to leaves (which may be lists or values).
incoming_stats_dicts_with_key = []
for stats_dict in stats_dicts:
if key is not None:
stats_dict = build_nested_dict(stats_dict, key)
stats_dict = traverse_and_add_paths(stats_dict)
incoming_stats_dicts_with_key.append(stats_dict)
tree.map_structure_with_path(
lambda path, _: all_keys.add(force_tuple(path)),
self.stats,
)
for key in all_keys:
# Get all incoming Stats objects for this key
incoming_stats = [
self._get_key(key, stats=s)
for s in incoming_stats_dicts_with_key
if self._key_in_stats(key, stats=s)
]
structure_under_key = self._get_key(key, stats=self.stats, key_error=False)
# self._get_key returns {} if the key is not found
own_stats = (
None if isinstance(structure_under_key, dict) else structure_under_key
)
merged_stats = merge_stats(
base_stats=own_stats, incoming_stats=incoming_stats
)
self._set_key(key, merged_stats)
def log_time(
self,
key: Union[str, Tuple[str, ...]],
*,
reduce: str = "mean",
window: Optional[Union[int, float]] = None,
ema_coeff: Optional[float] = None,
percentiles: Union[List[int], bool] = False,
clear_on_reduce: bool = False,
with_throughput: bool = False,
throughput_ema_coeff: float = 0.05,
reduce_per_index_on_aggregate: bool = False,
) -> Stats:
"""Measures and logs a time delta value under `key` when used with a with-block.
.. testcode::
import time
from ray.rllib.utils.metrics.metrics_logger import MetricsLogger
logger = MetricsLogger()
# First delta measurement:
with logger.log_time("my_block_to_be_timed", ema_coeff=0.1):
time.sleep(1.0)
# EMA should be ~1sec.
assert 1.1 > logger.peek("my_block_to_be_timed") > 0.9
# Second delta measurement (note that we don't have to repeat the args
# again, as the stats under that name have already been created above with
# the correct args).
with logger.log_time("my_block_to_be_timed"):
time.sleep(2.0)
# EMA should be ~1.1sec.
assert 1.15 > logger.peek("my_block_to_be_timed") > 1.05
# When calling `reduce()`, the latest, reduced value is returned.
results = logger.reduce()
# EMA should be ~1.1sec.
assert 1.15 > results["my_block_to_be_timed"] > 1.05
Args:
key: The key (or tuple of keys) to log the measured time delta under.
reduce: The reduction method to apply, once `self.reduce()` is called.
If None, will collect all logged values under `key` in a list (and
also return that list upon calling `self.reduce()`).
window: An optional window size to reduce over.
If not None, then the reduction operation is only applied to the most
recent `window` items, and - after reduction - the internal values list
under `key` is shortened to hold at most `window` items (the most
recent ones).
Must be None if `ema_coeff` is provided.
If None (and `ema_coeff` is None), reduction must not be "mean".
ema_coeff: An optional EMA coefficient to use if `reduce` is "mean"
and no `window` is provided. Note that if both `window` and `ema_coeff`
are provided, an error is thrown. Also, if `ema_coeff` is provided,
`reduce` must be "mean".
The reduction formula for EMA is:
EMA(t1) = (1.0 - ema_coeff) * EMA(t0) + ema_coeff * new_value
percentiles: If reduce is `None`, we can compute the percentiles of the
values list given by `percentiles`. Defaults to [0, 0.5, 0.75, 0.9, 0.95,
0.99, 1] if set to True. When using percentiles, a window must be provided.
This window should be chosen carefully. RLlib computes exact percentiles and
the computational complexity is O(m*n*log(n/m)) where n is the window size
and m is the number of parallel metrics loggers involved (for example,
m EnvRunners).
clear_on_reduce: If True, all values under `key` will be emptied after
`self.reduce()` is called. Setting this to True is useful for cases,
in which the internal values list would otherwise grow indefinitely,
for example if reduce is None and there is no `window` provided.
with_throughput: Whether to track a throughput estimate together with this
metric. This is only supported for `reduce=sum` and
`clear_on_reduce=False` metrics (aka. "lifetime counts"). The `Stats`
object under the logged key then keeps track of the time passed
between two consecutive calls to `reduce()` and update its throughput
estimate. The current throughput estimate of a key can be obtained
through: <MetricsLogger>.peek(key, throughput=True).
throughput_ema_coeff: The EMA coefficient to use for throughput tracking.
Only used if with_throughput=True. Defaults to 0.05.
reduce_per_index_on_aggregate: If True, when merging Stats objects, we reduce
incoming values per index such that the new value at index `n` will be
the reduced value of all incoming values at index `n`.
If False, when reducing `n` Stats, the first `n` merged values will be
the reduced value of all incoming values at index `0`, the next `n` merged
values will be the reduced values of all incoming values at index `1`, etc.
"""
# No reduction (continue appending to list) AND no window.
# -> We'll force-reset our values upon `reduce()`.
if reduce is None and (window is None or window == float("inf")):
clear_on_reduce = True
# Set default ema_coeff to 0.01 if reduce is "mean" and no window is provided
if reduce == "mean" and window is None and ema_coeff is None:
ema_coeff = 0.01
if not self._key_in_stats(key):
self._set_key(
key,
Stats(
init_values=None,
reduce=reduce,
percentiles=percentiles,
window=window,
ema_coeff=ema_coeff,
clear_on_reduce=clear_on_reduce,
throughput=with_throughput,
throughput_ema_coeff=throughput_ema_coeff,
reduce_per_index_on_aggregate=reduce_per_index_on_aggregate,
),
)
# Return the Stats object, so a `with` clause can enter and exit it.
return self._get_key(key)
def reduce(self) -> Dict:
"""Reduces all logged values based on their settings and returns a result dict.
DO NOT CALL THIS METHOD under normal circumstances! RLlib's components call it
right before a distinct step has been completed and the (MetricsLogger-based)
results of that step need to be passed upstream to other components for further
processing.
The returned result dict has the exact same structure as the logged keys (or
nested key sequences) combined. Values are Stats objects if this MetricsLogger
is not a root logger. If this MetricsLogger is a root logger, the values are
the actual reduced values.
For example, imagine component A (e.g. an Algorithm) containing a MetricsLogger
and n remote components (e.g. n EnvRunners), each with their own
MetricsLogger object. Component A calls its n remote components, each of
which returns an equivalent, reduced dict with values at the leafs.
Component A can then further log these n result dicts through its own
MetricsLogger through:
`logger.aggregate([n returned result dicts from n subcomponents])`.
.. testcode::
from ray.rllib.utils.metrics.metrics_logger import MetricsLogger
from ray.rllib.utils.test_utils import check
logger = MetricsLogger(root=True)
# Log some values under different keys.
logger.log_value("loss", 0.1, window=2)
logger.log_value("loss", 0.2, window=2)
logger.log_value("min_loss", 0.3, reduce="min", window=2)
logger.log_value("min_loss", 0.1, reduce="min", window=2)
# reduce() returns the reduced values.
results = logger.reduce()
check(results["loss"], 0.15) # mean of [0.1, 0.2]
check(results["min_loss"], 0.1) # min of [0.3, 0.1]
# We can also reduce a specific key using indexing.
check(logger.reduce()["loss"], 0.15) # mean of [0.1, 0.2]
# Or reduce a nested key structure.
logger.log_value(("nested", "key"), 1.0)
check(logger.reduce()["nested"]["key"], 1.0)
Returns:
A (nested) dict matching the structure of `self.stats` (contains all ever
logged keys to this MetricsLogger) with the leafs being (reduced) Stats
objects if this MetricsLogger is not a root logger. If this MetricsLogger
is a root logger, the leafs are the actual (reduced) values.
"""
# For better error message, catch the last key-path (reducing of which might
# throw an error).
PATH = None
def _reduce(path, stats: Stats):
nonlocal PATH
PATH = path
return stats.reduce(compile=self._is_root_logger)
try:
with self._threading_lock:
reduced_stats_to_return = tree.map_structure_with_path(
_reduce, self.stats
)
# Provide proper error message if reduction fails due to bad data.
except Exception as e:
raise ValueError(
"There was an error while reducing the Stats object under key="
f"{PATH}! Check, whether you logged invalid or incompatible "
"values into this key over time in your custom code."
f"\nThe values under this key are: {self._get_key(PATH).values}."
f"\nThe original error was {str(e)}"
)
return reduced_stats_to_return
def activate_tensor_mode(self):
self._tensor_mode = True
def deactivate_tensor_mode(self):
self._tensor_mode = False
@property
def tensor_mode(self):
return self._tensor_mode
def _detach_tensor_if_necessary(self, value):
if self.tensor_mode:
if torch and torch.is_tensor(value):
return value.detach()
elif tf and tf.is_tensor(value):
return tf.stop_gradient(value)
return value
def set_value(
self,
key: Union[str, Tuple[str, ...]],
value: Any,
*,
reduce: Optional[str] = "mean",
window: Optional[Union[int, float]] = None,
ema_coeff: Optional[float] = None,
percentiles: Union[List[int], bool] = False,
clear_on_reduce: bool = False,
with_throughput: bool = False,
throughput_ema_coeff: float = 0.05,
reduce_per_index_on_aggregate: bool = False,
) -> None:
"""Overrides the logged values under `key` with `value`.
The internal values list under `key` is cleared and reset to [`value`]. If
`key` already exists, this method will NOT alter the reduce settings. Otherwise,
it will apply the provided reduce settings (`reduce`, `window`, `ema_coeff`,
and `clear_on_reduce`).
Args:
key: The key to override.
value: The new value to set the internal values list to (will be set to
a list containing a single item `value`).
reduce: The reduction method to apply, once `self.reduce()` is called.
If None, will collect all logged values under `key` in a list (and
also return that list upon calling `self.reduce()`).
Note that this is only applied if `key` does not exist in `self` yet.
window: An optional window size to reduce over.
If not None, then the reduction operation is only applied to the most
recent `window` items, and - after reduction - the internal values list
under `key` is shortened to hold at most `window` items (the most
recent ones).
Must be None if `ema_coeff` is provided.
If None (and `ema_coeff` is None), reduction must not be "mean".
Note that this is only applied if `key` does not exist in `self` yet.
ema_coeff: An optional EMA coefficient to use if `reduce` is "mean"
and no `window` is provided. Note that if both `window` and `ema_coeff`
are provided, an error is thrown. Also, if `ema_coeff` is provided,
`reduce` must be "mean".
The reduction formula for EMA is:
EMA(t1) = (1.0 - ema_coeff) * EMA(t0) + ema_coeff * new_value
Note that this is only applied if `key` does not exist in `self` yet.
percentiles: If reduce is `None`, we can compute the percentiles of the
values list given by `percentiles`. Defaults to [0, 0.5, 0.75, 0.9, 0.95,
0.99, 1] if set to True. When using percentiles, a window must be provided.
This window should be chosen carefully. RLlib computes exact percentiles and
the computational complexity is O(m*n*log(n/m)) where n is the window size
and m is the number of parallel metrics loggers involved (for example,
m EnvRunners).
clear_on_reduce: If True, all values under `key` will be emptied after
`self.reduce()` is called. Setting this to True is useful for cases,
in which the internal values list would otherwise grow indefinitely,
for example if reduce is None and there is no `window` provided.
Note that this is only applied if `key` does not exist in `self` yet.
with_throughput: Whether to track a throughput estimate together with this
metric. This is only supported for `reduce=sum` and
`clear_on_reduce=False` metrics (aka. "lifetime counts"). The `Stats`
object under the logged key then keeps track of the time passed
between two consecutive calls to `reduce()` and update its throughput
estimate. The current throughput estimate of a key can be obtained
through: peeked_value, throuthput_per_sec =
<MetricsLogger>.peek([key], throughput=True).
throughput_ema_coeff: The EMA coefficient to use for throughput tracking.
Only used if with_throughput=True. Defaults to 0.05.
reduce_per_index_on_aggregate: If True, when merging Stats objects, we reduce
incoming values per index such that the new value at index `n` will be
the reduced value of all incoming values at index `n`.
If False, when reducing `n` Stats, the first `n` merged values will be
the reduced value of all incoming values at index `0`, the next `n` merged
values will be the reduced values of all incoming values at index `1`, etc.
Note that this is only applied if `key` does not exist in `self` yet.
"""
# Key already in self -> Erase internal values list with [`value`].
if self._key_in_stats(key):
with self._threading_lock:
stats = self._get_key(key)
stats.values = [value]
# Key cannot be found in `self` -> Simply log as a (new) value.
else:
# Set default ema_coeff to 0.01 if reduce is "mean" and no window is provided
if reduce == "mean" and window is None and ema_coeff is None:
ema_coeff = 0.01
self.log_value(
key,
value,
reduce=reduce,
window=window,
ema_coeff=ema_coeff,
percentiles=percentiles,
clear_on_reduce=clear_on_reduce,
with_throughput=with_throughput,
throughput_ema_coeff=throughput_ema_coeff,
reduce_per_index_on_aggregate=reduce_per_index_on_aggregate,
)
def reset(self) -> None:
"""Resets all data stored in this MetricsLogger."""
with self._threading_lock:
self.stats = {}
def delete(self, *key: Tuple[str, ...], key_error: bool = True) -> None:
"""Deletes the given `key` from this metrics logger's stats.
Args:
key: The key or key sequence (for nested location within self.stats),
to delete from this MetricsLogger's stats.
key_error: Whether to throw a KeyError if `key` cannot be found in `self`.
Raises:
KeyError: If `key` cannot be found in `self` AND `key_error` is True.
"""
self._del_key(key, key_error)
def get_state(self) -> Dict[str, Any]:
"""Returns the current state of `self` as a dict.
Note that the state is merely the combination of all states of the individual
`Stats` objects stored under `self.stats`.
"""
stats_dict = {}
def _map(path, stats):
# Convert keys to strings for msgpack-friendliness.
stats_dict["--".join(path)] = stats.get_state()
with self._threading_lock:
tree.map_structure_with_path(_map, self.stats)
return {"stats": stats_dict}
def set_state(self, state: Dict[str, Any]) -> None:
"""Sets the state of `self` to the given `state`.
Args:
state: The state to set `self` to.
"""
with self._threading_lock:
# Reset all existing stats to ensure a clean state transition
self.stats = {}
for flat_key, stats_state in state["stats"].items():
self._set_key(flat_key.split("--"), Stats.from_state(stats_state))
def _key_in_stats(self, flat_key, *, stats=None):
flat_key = force_tuple(tree.flatten(flat_key))
_dict = stats if stats is not None else self.stats
for key in flat_key:
if key not in _dict:
return False
_dict = _dict[key]
return True
def _get_key(self, flat_key, *, stats=None, key_error=True):
flat_key = force_tuple(tree.flatten(flat_key))
_dict = stats if stats is not None else self.stats
for key in flat_key:
try:
_dict = _dict[key]
except KeyError as e:
if key_error:
raise e
else:
return {}
return _dict
def _set_key(self, flat_key, stats):
flat_key = force_tuple(tree.flatten(flat_key))
with self._threading_lock:
_dict = self.stats
for i, key in enumerate(flat_key):
# If we are at the end of the key sequence, set
# the key, no matter, whether it already exists or not.
if i == len(flat_key) - 1:
_dict[key] = stats
return
# If an intermediary key in the sequence is missing,
# add a sub-dict under this key.
if key not in _dict:
_dict[key] = {}
_dict = _dict[key]
def _del_key(self, flat_key, key_error=False):
flat_key = force_tuple(tree.flatten(flat_key))
with self._threading_lock:
# Erase the key from the (nested) `self.stats` dict.
_dict = self.stats
try:
for i, key in enumerate(flat_key):
if i == len(flat_key) - 1:
del _dict[key]
return
_dict = _dict[key]
except KeyError as e:
if key_error:
raise e
def _get_throughputs(
self, key: Optional[Union[str, Tuple[str, ...]]] = None, default=None
) -> Union[Dict, float]:
"""Returns throughput values for Stats that have throughput tracking enabled.
If no key is provided, returns a nested dict containing throughput values for all Stats
that have throughput tracking enabled. If a key is provided, returns the throughput value
for that specific key or nested structure.
The throughput values represent the rate of change of the corresponding metrics per second.
For example, if a metric represents the number of steps taken, its throughput value would
represent steps per second.
Args:
key: Optional key or nested key path to get throughput for. If provided, returns just
the throughput value for that key or nested structure. If None, returns a nested dict
with throughputs for all metrics.
default: Default value to return if no throughput values are found.
Returns:
If key is None: A nested dict with the same structure as self.stats but with "_throughput"
appended to leaf keys and throughput values as leaf values. Only includes entries for
Stats objects that have throughput tracking enabled.
If key is provided: The throughput value for that specific key or nested structure.
"""
def _nested_throughputs(stats):
"""Helper function to calculate throughputs for a nested structure."""
def _transform(path, value):
if isinstance(value, Stats) and value.has_throughput:
# Convert path to tuple for consistent key handling
key = force_tuple(path)
# Add "_throughput" to the last key in the path
return key[:-1] + (key[-1] + "_throughput",), value.throughput
return path, value
result = {}
for path, value in tree.flatten_with_path(stats):
new_path, new_value = _transform(path, value)
if isinstance(new_value, float): # Only include throughput values
_dict = result
for k in new_path[:-1]:
if k not in _dict:
_dict[k] = {}
_dict = _dict[k]
_dict[new_path[-1]] = new_value
return result
with self._threading_lock:
if key is not None:
# Get the Stats object or nested structure for the key
stats = self._get_key(key, key_error=False)
if isinstance(stats, Stats):
if not stats.has_throughput:
raise ValueError(
f"Key '{key}' does not have throughput tracking enabled"
)
return stats.throughput
elif stats == {}:
# If the key is not found, return the default value
return default
else:
# stats is a non-empty dictionary
return _nested_throughputs(stats)
throughputs = {}
def _map(path, stats):
if isinstance(stats, Stats) and stats.has_throughput:
# Convert path to tuple for consistent key handling
key = force_tuple(path)
# Add "_throughput" to the last key in the path
key = key[:-1] + (key[-1] + "_throughput",)
# Set the throughput value in the nested structure
_dict = throughputs
for k in key[:-1]:
if k not in _dict:
_dict[k] = {}
_dict = _dict[k]
_dict[key[-1]] = stats.throughput
tree.map_structure_with_path(_map, self.stats)
return throughputs if throughputs else default
def compile(self) -> Dict:
"""Compiles all current values and throughputs into a single dictionary.
This method combines the results of all stats and throughputs into a single
dictionary, with throughput values having a "_throughput" suffix. This is useful
for getting a complete snapshot of all metrics and their throughputs in one call.
Returns:
A nested dictionary containing both the current values and throughputs for all
metrics. The structure matches self.stats, with throughput values having
"_throughput" suffix in their keys.
"""
# Get all current values
values = self.reduce()
# Get all throughputs
throughputs = self._get_throughputs()
deep_update(values, throughputs or {}, new_keys_allowed=True)
def traverse_dict(d):
if isinstance(d, dict):
new_dict = {}
for key, value in d.items():
new_dict[key] = traverse_dict(value)
return new_dict
elif isinstance(d, list):
if len(d) == 1:
return d[0]
# If value is a longer list, we should just return the list because there is no reduction method applied
return d
else:
# If the value is not a list, it is a single value and we can yield it
return d
return traverse_dict(values)
| MetricsLogger |
python | dask__dask | dask/_task_spec.py | {
"start": 24875,
"end": 27057
} | class ____(Task, Iterable):
constructor: Callable
klass: type
__slots__ = tuple(__annotations__)
def __init__(
self,
/,
*args: Any,
**kwargs: Any,
):
if len(args) == 1 and isinstance(args[0], self.klass):
args = args[0] # type: ignore[assignment]
super().__init__(
None,
self.to_container,
*args,
constructor=self.constructor,
**kwargs,
)
def __getstate__(self):
state = super().__getstate__()
state = list(state)
slots = self.__class__.get_all_slots()
ix = slots.index("kwargs")
# The constructor as a kwarg is redundant since this is encoded in the
# class itself. Serializing the builtin types is not trivial
# This saves about 15% of overhead
state[ix] = state[ix].copy()
state[ix].pop("constructor", None)
return state
def __setstate__(self, state):
super().__setstate__(state)
self.kwargs["constructor"] = self.__class__.constructor
return self
def __repr__(self):
return f"{type(self).__name__}({self.args})"
def substitute(
self, subs: dict[KeyType, KeyType | GraphNode], key: KeyType | None = None
) -> NestedContainer:
subs_filtered = {
k: v for k, v in subs.items() if k in self.dependencies and k != v
}
if not subs_filtered:
return self
return type(self)(
*(
(
a.substitute(subs_filtered)
if isinstance(a, (GraphNode, TaskRef))
else a
)
for a in self.args
)
)
def __dask_tokenize__(self):
from dask.tokenize import tokenize
return (
type(self).__name__,
self.klass,
sorted(tokenize(a) for a in self.args),
)
return super().__dask_tokenize__()
@staticmethod
def to_container(*args, constructor):
return constructor(args)
def __iter__(self):
yield from self.args
| NestedContainer |
python | joke2k__faker | tests/providers/test_currency.py | {
"start": 17213,
"end": 17878
} | class ____:
"""Test vi_VN currency provider"""
num_samples = 100
@classmethod
def setup_class(cls):
from faker.providers.currency.vi_VN import Provider as ViVNCurrencyProvider
cls.provider = ViVNCurrencyProvider
cls.currencies = cls.provider.currencies
def test_currency(self, faker, num_samples):
for _ in range(num_samples):
cur = faker.currency()
assert isinstance(cur, tuple) and cur in self.currencies
def test_pricetag(self, faker, num_samples):
for _ in range(num_samples):
pricetag = faker.pricetag()
assert isinstance(pricetag, str)
| TestViVn |
python | celery__celery | t/unit/backends/test_azureblockblob.py | {
"start": 7934,
"end": 8718
} | class ____:
def setup_method(self):
self.url = (
"azureblockblob://"
"DefaultEndpointsProtocol=protocol;"
"AccountName=name;"
"AccountKey=account_key;"
"EndpointSuffix=suffix"
)
self.backend = AzureBlockBlobBackend(
app=self.app,
url=self.url
)
def test_as_uri_include_password(self):
assert self.backend.as_uri(include_password=True) == self.url
def test_as_uri_exclude_password(self):
assert self.backend.as_uri(include_password=False) == (
"azureblockblob://"
"DefaultEndpointsProtocol=protocol;"
"AccountName=name;"
"AccountKey=**;"
"EndpointSuffix=suffix"
)
| test_as_uri |
python | huggingface__transformers | tests/models/prompt_depth_anything/test_modeling_prompt_depth_anything.py | {
"start": 5023,
"end": 8777
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as Prompt Depth Anything does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (PromptDepthAnythingForDepthEstimation,) if is_torch_available() else ()
pipeline_model_mapping = (
{"depth-estimation": PromptDepthAnythingForDepthEstimation} if is_torch_available() else {}
)
test_resize_embeddings = False
def setUp(self):
self.model_tester = PromptDepthAnythingModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=PromptDepthAnythingConfig,
has_text_modality=False,
hidden_size=37,
common_properties=["patch_size"],
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(
reason="Prompt Depth Anything with AutoBackbone does not have a base model and hence no input_embeddings"
)
def test_inputs_embeds(self):
pass
def test_for_depth_estimation(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs)
@unittest.skip(reason="Prompt Depth Anything does not support training yet")
def test_training(self):
pass
@unittest.skip(reason="Prompt Depth Anything does not support training yet")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="Prompt Depth Anything with AutoBackbone does not have a base model and hence no input_embeddings"
)
def test_model_get_set_embeddings(self):
pass
@unittest.skip(
reason="This architecture seems to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seems to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "depth-anything/prompt-depth-anything-vits-hf"
model = PromptDepthAnythingForDepthEstimation.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_backbone_selection(self):
def _validate_backbone_init():
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
self.assertEqual(len(model.backbone.out_indices), 2)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.backbone = "facebook/dinov2-small"
config.use_pretrained_backbone = True
config.use_timm_backbone = False
config.backbone_config = None
config.backbone_kwargs = {"out_indices": [-2, -1]}
_validate_backbone_init()
def prepare_img():
url = "https://github.com/DepthAnything/PromptDA/blob/main/assets/example_images/image.jpg?raw=true"
image = Image.open(requests.get(url, stream=True).raw)
return image
def prepare_prompt_depth():
prompt_depth_url = (
"https://github.com/DepthAnything/PromptDA/blob/main/assets/example_images/arkit_depth.png?raw=true"
)
prompt_depth = Image.open(requests.get(prompt_depth_url, stream=True).raw)
return prompt_depth
@require_torch
@require_vision
@slow
| PromptDepthAnythingModelTest |
python | lepture__authlib | authlib/jose/rfc7518/jwe_algs.py | {
"start": 1610,
"end": 2857
} | class ____(JWEAlgorithm):
#: A key of size 2048 bits or larger MUST be used with these algorithms
#: RSA1_5, RSA-OAEP, RSA-OAEP-256
key_size = 2048
def __init__(self, name, description, pad_fn):
self.name = name
self.description = description
self.padding = pad_fn
def prepare_key(self, raw_data):
return RSAKey.import_key(raw_data)
def generate_preset(self, enc_alg, key):
cek = enc_alg.generate_cek()
return {"cek": cek}
def wrap(self, enc_alg, headers, key, preset=None):
if preset and "cek" in preset:
cek = preset["cek"]
else:
cek = enc_alg.generate_cek()
op_key = key.get_op_key("wrapKey")
if op_key.key_size < self.key_size:
raise ValueError("A key of size 2048 bits or larger MUST be used")
ek = op_key.encrypt(cek, self.padding)
return {"ek": ek, "cek": cek}
def unwrap(self, enc_alg, ek, headers, key):
# it will raise ValueError if failed
op_key = key.get_op_key("unwrapKey")
cek = op_key.decrypt(ek, self.padding)
if len(cek) * 8 != enc_alg.CEK_SIZE:
raise ValueError('Invalid "cek" length')
return cek
| RSAAlgorithm |
python | ray-project__ray | python/ray/data/_internal/execution/streaming_executor_state.py | {
"start": 5713,
"end": 6205
} | class ____:
"""The scheduling status of an operator.
This will be updated each time when StreamingExecutor makes
a scheduling decision, i.e., in each `select_operator_to_run`
call.
"""
# Whether the op was considered runnable in the last scheduling
# decision.
runnable: bool = False
# Whether the resources were sufficient for the operator to run
# in the last scheduling decision.
under_resource_limits: bool = False
@dataclass
| OpSchedulingStatus |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/dag_stats.py | {
"start": 963,
"end": 1094
} | class ____(BaseModel):
"""DagStatsState serializer for responses."""
state: DagRunState
count: int
| DagStatsStateResponse |
python | bokeh__bokeh | tests/unit/bokeh/core/test_serialization.py | {
"start": 32051,
"end": 45654
} | class ____:
def setup_method(self, test_method):
from json import loads
from bokeh.core.json_encoder import serialize_json
self.serialize = serialize_json
self.deserialize = loads
def test_with_basic(self) -> None:
assert self.serialize({'test': [1, 2, 3]}) == '{"test":[1,2,3]}'
def test_pretty(self) -> None:
assert self.serialize({'test': [1, 2, 3]}, pretty=True) == '{\n "test": [\n 1,\n 2,\n 3\n ]\n}'
def test_with_np_array(self) -> None:
a = np.arange(5)
assert self.serialize(a) == '[0,1,2,3,4]'
def test_with_pd_series(self) -> None:
s = pd.Series([0, 1, 2, 3, 4])
assert self.serialize(s) == '[0,1,2,3,4]'
def test_nans_and_infs(self) -> None:
arr = np.array([np.nan, np.inf, -np.inf, 0])
serialized = self.serialize(arr)
deserialized = self.deserialize(serialized)
assert deserialized[0] == 'NaN'
assert deserialized[1] == 'Infinity'
assert deserialized[2] == '-Infinity'
assert deserialized[3] == 0
def test_nans_and_infs_pandas(self) -> None:
arr = pd.Series(np.array([np.nan, np.inf, -np.inf, 0]))
serialized = self.serialize(arr)
deserialized = self.deserialize(serialized)
assert deserialized[0] == 'NaN'
assert deserialized[1] == 'Infinity'
assert deserialized[2] == '-Infinity'
assert deserialized[3] == 0
def test_pandas_datetime_types(self) -> None:
''' should convert to millis '''
idx = pd.date_range('2001-1-1', '2001-1-5')
df = pd.DataFrame({'vals' :idx}, index=idx)
serialized = self.serialize({'vals' : df.vals,
'idx' : df.index})
deserialized = self.deserialize(serialized)
baseline = {
"vals": [
978307200000,
978393600000,
978480000000,
978566400000,
978652800000,
],
"idx": [
978307200000,
978393600000,
978480000000,
978566400000,
978652800000,
],
}
assert deserialized == baseline
def test_builtin_datetime_types(self) -> None:
''' should convert to millis as-is '''
DT_EPOCH = dt.datetime.fromtimestamp(0, tz=dt.timezone.utc)
a = dt.date(2016, 4, 28)
b = dt.datetime(2016, 4, 28, 2, 20, 50)
serialized = self.serialize({'a' : [a],
'b' : [b]})
deserialized = self.deserialize(serialized)
baseline = {'a': ['2016-04-28'],
'b': [(b - DT_EPOCH).total_seconds() * 1000. + b.microsecond / 1000.],
}
assert deserialized == baseline
# test pre-computed values too
assert deserialized == {
'a': ['2016-04-28'], 'b': [1461810050000.0]
}
def test_builtin_timedelta_types(self) -> None:
''' should convert time delta to a dictionary '''
delta = dt.timedelta(days=42, seconds=1138, microseconds=1337)
serialized = self.serialize(delta)
deserialized = self.deserialize(serialized)
assert deserialized == delta.total_seconds() * 1000
def test_numpy_timedelta_types(self) -> None:
delta = np.timedelta64(3000, 'ms')
serialized = self.serialize(delta)
deserialized = self.deserialize(serialized)
assert deserialized == 3000
delta = np.timedelta64(3000, 's')
serialized = self.serialize(delta)
deserialized = self.deserialize(serialized)
assert deserialized == 3000000
def test_pandas_timedelta_types(self) -> None:
delta = pd.Timedelta("3000ms")
serialized = self.serialize(delta)
deserialized = self.deserialize(serialized)
assert deserialized == 3000
@pytest.mark.parametrize('dt', [np.float32, np.float64, np.int64])
@pytest.mark.parametrize('shape', [(12,), (2, 6), (2,2,3)])
def test_encode_base64_dict(dt, shape) -> None:
a = np.arange(12, dtype=dt)
a.reshape(shape)
d = bus.encode_base64_dict(a)
assert 'shape' in d
assert d['shape'] == a.shape
assert 'dtype' in d
assert d['dtype'] == a.dtype.name
assert '__ndarray__' in d
b64 = base64.b64decode(d['__ndarray__'])
aa = np.frombuffer(b64, dtype=d['dtype'])
assert np.array_equal(a, aa)
@pytest.mark.parametrize('dt', [np.float32, np.float64, np.int64])
@pytest.mark.parametrize('shape', [(12,), (2, 6), (2,2,3)])
def test_decode_base64_dict(dt, shape) -> None:
a = np.arange(12, dtype=dt)
a.reshape(shape)
data = base64.b64encode(a).decode('utf-8')
d = {
'__ndarray__' : data,
'dtype' : a.dtype.name,
'shape' : a.shape
}
aa = bus.decode_base64_dict(d)
assert aa.shape == a.shape
assert aa.dtype.name == a.dtype.name
assert np.array_equal(a, aa)
assert aa.flags['WRITEABLE']
@pytest.mark.parametrize('dt', [np.float32, np.float64, np.int64])
@pytest.mark.parametrize('shape', [(12,), (2, 6), (2,2,3)])
def test_encode_decode_roundtrip(dt, shape) -> None:
a = np.arange(12, dtype=dt)
a.reshape(shape)
d = bus.encode_base64_dict(a)
aa = bus.decode_base64_dict(d)
assert np.array_equal(a, aa)
@pytest.mark.parametrize('dt', bus.BINARY_ARRAY_TYPES)
@pytest.mark.parametrize('shape', [(12,), (2, 6), (2,2,3)])
def test_encode_binary_dict(dt, shape) -> None:
a = np.arange(12, dtype=dt)
a.reshape(shape)
bufs = []
d = bus.encode_binary_dict(a, buffers=bufs)
assert len(bufs) == 1
assert len(bufs[0]) == 2
assert bufs[0][1] == a.tobytes()
assert 'shape' in d
assert d['shape'] == a.shape
assert 'dtype' in d
assert d['dtype'] == a.dtype.name
assert '__buffer__' in d
@pytest.mark.parametrize('cols', [None, [], ['a'], ['a', 'b'], ['a', 'b', 'c']])
@pytest.mark.parametrize('dt1', [np.float32, np.float64, np.int64])
@pytest.mark.parametrize('dt2', [np.float32, np.float64, np.int64])
def test_transform_column_source_data_with_buffers(pd, cols, dt1, dt2) -> None:
d = dict(a=[1,2,3], b=np.array([4,5,6], dtype=dt1), c=pd.Series([7,8,9], dtype=dt2))
bufs = []
out = bus.transform_column_source_data(d, buffers=bufs, cols=cols)
assert set(out) == (set(d) if cols is None else set(cols))
if 'a' in out:
assert out['a'] == [1,2,3]
for x in ['b', 'c']:
dt = d[x].dtype
if x in out:
if dt in bus.BINARY_ARRAY_TYPES:
assert isinstance(out[x], dict)
assert 'shape' in out[x]
assert out[x]['shape'] == d[x].shape
assert 'dtype' in out[x]
assert out[x]['dtype'] == d[x].dtype.name
assert '__buffer__' in out[x]
else:
assert isinstance(out[x], list)
assert out[x] == list(d[x])
def test_transform_series_force_list_default_with_buffers() -> None:
# default int seems to be int64, can't be converted to buffer!
df = pd.Series([1, 3, 5, 6, 8])
out = bus.transform_series(df)
assert isinstance(out, list)
assert out == [1, 3, 5, 6, 8]
df = pd.Series([1, 3, 5, 6, 8], dtype=np.int32)
bufs = []
out = bus.transform_series(df, buffers=bufs)
assert isinstance(out, dict)
assert len(bufs) == 1
assert len(bufs[0]) == 2
assert isinstance(bufs[0][0], dict)
assert list(bufs[0][0]) == ["id"]
assert bufs[0][1] == np.array(df).tobytes()
assert 'shape' in out
assert out['shape'] == df.shape
assert 'dtype' in out
assert out['dtype'] == df.dtype.name
assert '__buffer__' in out
df = pd.Series([1.0, 3, 5, 6, 8])
bufs = []
out = bus.transform_series(df, buffers=bufs)
assert isinstance(out, dict)
assert len(bufs) == 1
assert len(bufs[0]) == 2
assert isinstance(bufs[0][0], dict)
assert list(bufs[0][0]) == ["id"]
assert bufs[0][1] == np.array(df).tobytes()
assert 'shape' in out
assert out['shape'] == df.shape
assert 'dtype' in out
assert out['dtype'] == df.dtype.name
assert '__buffer__' in out
df = pd.Series(np.array([np.nan, np.inf, -np.inf, 0]))
bufs = []
out = bus.transform_series(df, buffers=bufs)
assert isinstance(out, dict)
assert len(bufs) == 1
assert len(bufs[0]) == 2
assert isinstance(bufs[0][0], dict)
assert list(bufs[0][0]) == ["id"]
assert bufs[0][1] == np.array(df).tobytes()
assert 'shape' in out
assert out['shape'] == df.shape
assert 'dtype' in out
assert out['dtype'] == df.dtype.name
assert '__buffer__' in out
# PeriodIndex
df = pd.period_range('1900-01-01','2000-01-01', freq='A')
bufs = []
out = bus.transform_series(df, buffers=bufs)
assert isinstance(out, dict)
assert len(bufs) == 1
assert len(bufs[0]) == 2
assert isinstance(bufs[0][0], dict)
assert list(bufs[0][0]) == ["id"]
assert bufs[0][1] == bus.convert_datetime_array(df.to_timestamp().values).tobytes()
assert 'shape' in out
assert out['shape'] == df.shape
assert 'dtype' in out
assert out['dtype'] == 'float64'
assert '__buffer__' in out
# DatetimeIndex
df = pd.period_range('1900-01-01','2000-01-01', freq='A').to_timestamp()
bufs = []
out = bus.transform_series(df, buffers=bufs)
assert isinstance(out, dict)
assert len(bufs) == 1
assert len(bufs[0]) == 2
assert isinstance(bufs[0][0], dict)
assert list(bufs[0][0]) == ["id"]
assert bufs[0][1] == bus.convert_datetime_array(df.values).tobytes()
assert 'shape' in out
assert out['shape'] == df.shape
assert 'dtype' in out
assert out['dtype'] == 'float64'
assert '__buffer__' in out
# TimeDeltaIndex
df = pd.to_timedelta(np.arange(5), unit='s')
bufs = []
out = bus.transform_series(df, buffers=bufs)
assert isinstance(out, dict)
assert len(bufs) == 1
assert len(bufs[0]) == 2
assert isinstance(bufs[0][0], dict)
assert list(bufs[0][0]) == ["id"]
assert bufs[0][1] == bus.convert_datetime_array(df.values).tobytes()
assert 'shape' in out
assert out['shape'] == df.shape
assert 'dtype' in out
assert out['dtype'] == 'float64'
assert '__buffer__' in out
def test_to_json(self) -> None:
child_obj = SomeModelToJson(foo=57, bar="hello")
obj = SomeModelToJson(child=child_obj, foo=42, bar="world")
json = obj.to_json(include_defaults=True)
json_string = serialize_json(json)
assert json == {
"child": {"id": child_obj.id},
"null_child": None,
"id": obj.id,
"name": None,
"tags": [],
'js_property_callbacks': dict(type="map", entries=[]),
"js_event_callbacks": dict(type="map", entries=[]),
"subscribed_events": dict(type="set", entries=[]),
"syncable": True,
"foo": 42,
"bar": "world",
}
assert (
'{"bar":"world",' +
'"child":{"id":"%s"},' +
'"foo":42,' +
'"id":"%s",' +
'"js_event_callbacks":{"entries":[],"type":"map"},' +
'"js_property_callbacks":{"entries":[],"type":"map"},' +
'"name":null,' +
'"null_child":null,' +
'"subscribed_events":{"entries":[],"type":"set"},' +
'"syncable":true,' +
'"tags":[]}'
) % (child_obj.id, obj.id) == json_string
json = obj.to_json(include_defaults=False)
json_string = serialize_json(json)
assert json == {
"child": {"id": child_obj.id},
"id": obj.id,
"foo": 42,
"bar": "world",
}
assert (
'{"bar":"world",' +
'"child":{"id":"%s"},' +
'"foo":42,' +
'"id":"%s"}'
) % (child_obj.id, obj.id) == json_string
def test_no_units_in_json(self) -> None:
from bokeh.models import AnnularWedge
obj = AnnularWedge()
json = obj.to_json(include_defaults=True)
assert 'start_angle' in json
assert 'start_angle_units' not in json
assert 'outer_radius' in json
assert 'outer_radius_units' not in json
def test_dataspec_field_in_json(self) -> None:
from bokeh.models import AnnularWedge
obj = AnnularWedge()
obj.start_angle = "fieldname"
json = obj.to_json(include_defaults=True)
assert 'start_angle' in json
assert 'start_angle_units' not in json
assert json["start_angle"] == dict(type="map", entries=[["field", "fieldname"]]) # TODO: dict(type="field", field="fieldname")
def test_dataspec_value_in_json(self) -> None:
from bokeh.models import AnnularWedge
obj = AnnularWedge()
obj.start_angle = 60
json = obj.to_json(include_defaults=True)
assert 'start_angle' in json
assert 'start_angle_units' not in json
assert json["start_angle"] == dict(type="map", entries=[["value", 60]]) # TODO: dict(type="value", value=60)
"""
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| TestSerializeJson |
python | kamyu104__LeetCode-Solutions | Python/the-number-of-weak-characters-in-the-game.py | {
"start": 563,
"end": 1044
} | class ____(object):
def numberOfWeakCharacters(self, properties):
"""
:type properties: List[List[int]]
:rtype: int
"""
lookup = collections.defaultdict(list)
for a, d in properties:
lookup[a].append(d)
result = max_d = 0
for a in sorted(lookup.iterkeys(), reverse=True):
result += sum(d < max_d for d in lookup[a])
max_d = max(max_d, max(lookup[a]))
return result
| Solution |
python | run-llama__llama_index | llama-index-core/llama_index/core/chat_engine/types.py | {
"start": 3192,
"end": 14020
} | class ____:
"""Streaming chat response to user and writing to chat history."""
response: str = ""
sources: List[ToolOutput] = field(default_factory=list)
chat_stream: Optional[ChatResponseGen] = None
achat_stream: Optional[ChatResponseAsyncGen] = None
source_nodes: List[NodeWithScore] = field(default_factory=list)
unformatted_response: str = ""
queue: Queue = field(default_factory=Queue)
aqueue: Optional[asyncio.Queue] = None
# flag when chat message is a function call
is_function: Optional[bool] = None
# flag when processing done
is_done = False
# signal when a new item is added to the queue
new_item_event: Optional[asyncio.Event] = None
# NOTE: async code uses two events rather than one since it yields
# control when waiting for queue item
# signal when the OpenAI functions stop executing
is_function_false_event: Optional[asyncio.Event] = None
# signal when an OpenAI function is being executed
is_function_not_none_thread_event: Event = field(default_factory=Event)
is_writing_to_memory: bool = True
# Track if an exception occurred
exception: Optional[Exception] = None
awrite_response_to_history_task: Optional[asyncio.Task] = None
def set_source_nodes(self) -> None:
if self.sources and not self.source_nodes:
for tool_output in self.sources:
if isinstance(tool_output.raw_output, (Response, StreamingResponse)):
self.source_nodes.extend(tool_output.raw_output.source_nodes)
def __post_init__(self) -> None:
self.set_source_nodes()
def __str__(self) -> str:
if self.is_done and not self.queue.empty() and not self.is_function:
while self.queue.queue:
delta = self.queue.queue.popleft()
self.unformatted_response += delta
self.response = self.unformatted_response.strip()
return self.response
def _ensure_async_setup(self) -> None:
if self.aqueue is None:
self.aqueue = asyncio.Queue()
if self.new_item_event is None:
self.new_item_event = asyncio.Event()
if self.is_function_false_event is None:
self.is_function_false_event = asyncio.Event()
def put_in_queue(self, delta: Optional[str]) -> None:
self.queue.put_nowait(delta)
self.is_function_not_none_thread_event.set()
def aput_in_queue(self, delta: Optional[str]) -> None:
assert self.aqueue is not None
assert self.new_item_event is not None
self.aqueue.put_nowait(delta)
self.new_item_event.set()
@dispatcher.span
def write_response_to_history(
self,
memory: BaseMemory,
on_stream_end_fn: Optional[Callable] = None,
) -> None:
if self.chat_stream is None:
raise ValueError(
"chat_stream is None. Cannot write to history without chat_stream."
)
# try/except to prevent hanging on error
dispatcher.event(StreamChatStartEvent())
try:
final_text = ""
for chat in self.chat_stream:
self.is_function = is_function(chat.message)
if chat.delta:
dispatcher.event(
StreamChatDeltaReceivedEvent(
delta=chat.delta,
)
)
self.put_in_queue(chat.delta)
final_text += chat.delta or ""
if self.is_function is not None: # if loop has gone through iteration
# NOTE: this is to handle the special case where we consume some of the
# chat stream, but not all of it (e.g. in react agent)
chat.message.content = final_text.strip() # final message
memory.put(chat.message)
except Exception as e:
dispatcher.event(StreamChatErrorEvent(exception=e))
self.exception = e
# This act as is_done events for any consumers waiting
self.is_function_not_none_thread_event.set()
# force the queue reader to see the exception
self.put_in_queue("")
raise
dispatcher.event(StreamChatEndEvent())
self.is_done = True
# This act as is_done events for any consumers waiting
self.is_function_not_none_thread_event.set()
if on_stream_end_fn is not None and not self.is_function:
on_stream_end_fn()
@dispatcher.span
async def awrite_response_to_history(
self,
memory: BaseMemory,
on_stream_end_fn: Optional[Callable] = None,
) -> None:
self._ensure_async_setup()
assert self.aqueue is not None
assert self.is_function_false_event is not None
assert self.new_item_event is not None
if self.achat_stream is None:
raise ValueError(
"achat_stream is None. Cannot asynchronously write to "
"history without achat_stream."
)
# try/except to prevent hanging on error
dispatcher.event(StreamChatStartEvent())
try:
final_text = ""
async for chat in self.achat_stream:
self.is_function = is_function(chat.message)
if chat.delta:
dispatcher.event(
StreamChatDeltaReceivedEvent(
delta=chat.delta,
)
)
self.aput_in_queue(chat.delta)
final_text += chat.delta or ""
self.new_item_event.set()
if self.is_function is False:
self.is_function_false_event.set()
if self.is_function is not None: # if loop has gone through iteration
# NOTE: this is to handle the special case where we consume some of the
# chat stream, but not all of it (e.g. in react agent)
chat.message.content = final_text.strip() # final message
await memory.aput(chat.message)
except Exception as e:
dispatcher.event(StreamChatErrorEvent(exception=e))
self.exception = e
# These act as is_done events for any consumers waiting
self.is_function_false_event.set()
self.new_item_event.set()
# force the queue reader to see the exception
self.aput_in_queue("")
raise
dispatcher.event(StreamChatEndEvent())
self.is_done = True
# These act as is_done events for any consumers waiting
self.is_function_false_event.set()
self.new_item_event.set()
if on_stream_end_fn is not None and not self.is_function:
if iscoroutinefunction(
on_stream_end_fn.func
if isinstance(on_stream_end_fn, partial)
else on_stream_end_fn
):
await on_stream_end_fn()
else:
on_stream_end_fn()
@property
def response_gen(self) -> Generator[str, None, None]:
yielded_once = False
if self.is_writing_to_memory:
while not self.is_done or not self.queue.empty():
if self.exception is not None:
raise self.exception
try:
delta = self.queue.get(block=False)
self.unformatted_response += delta
yield delta
yielded_once = True
except Empty:
# Queue is empty, but we're not done yet. Sleep for 0 secs to release the GIL and allow other threads to run.
time.sleep(0)
else:
if self.chat_stream is None:
raise ValueError("chat_stream is None!")
for chat_response in self.chat_stream:
self.unformatted_response += chat_response.delta or ""
yield chat_response.delta or ""
yielded_once = True
self.response = self.unformatted_response.strip()
# edge case where the stream was exhausted before yielding anything
if not yielded_once:
yield self.response
async def async_response_gen(self) -> AsyncGenerator[str, None]:
try:
yielded_once = False
self._ensure_async_setup()
assert self.aqueue is not None
if self.is_writing_to_memory:
while True:
if not self.aqueue.empty() or not self.is_done:
if self.exception is not None:
raise self.exception
try:
delta = await asyncio.wait_for(
self.aqueue.get(), timeout=0.1
)
except asyncio.TimeoutError:
# Break only when the stream is done and the queue is empty
if self.is_done and self.aqueue.empty():
break
continue
if delta is not None:
self.unformatted_response += delta
yield delta
yielded_once = True
else:
break
else:
if self.achat_stream is None:
raise ValueError("achat_stream is None!")
async for chat_response in self.achat_stream:
self.unformatted_response += chat_response.delta or ""
yield chat_response.delta or ""
yielded_once = True
self.response = self.unformatted_response.strip()
# edge case where the stream was exhausted before yielding anything
if not yielded_once:
yield self.response
finally:
if self.awrite_response_to_history_task:
# Make sure that the background task ran to completion, retrieve any exceptions
await self.awrite_response_to_history_task
self.awrite_response_to_history_task = (
None # No need to keep the reference to the finished task
)
def print_response_stream(self) -> None:
for token in self.response_gen:
print(token, end="", flush=True)
async def aprint_response_stream(self) -> None:
async for token in self.async_response_gen():
print(token, end="", flush=True)
AGENT_CHAT_RESPONSE_TYPE = Union[AgentChatResponse, StreamingAgentChatResponse]
| StreamingAgentChatResponse |
python | TheAlgorithms__Python | data_structures/linked_list/swap_nodes.py | {
"start": 216,
"end": 4157
} | class ____:
head: Node | None = None
def __iter__(self) -> Iterator:
"""
>>> linked_list = LinkedList()
>>> list(linked_list)
[]
>>> linked_list.push(0)
>>> tuple(linked_list)
(0,)
"""
node = self.head
while node:
yield node.data
node = node.next_node
def __len__(self) -> int:
"""
>>> linked_list = LinkedList()
>>> len(linked_list)
0
>>> linked_list.push(0)
>>> len(linked_list)
1
"""
return sum(1 for _ in self)
def push(self, new_data: Any) -> None:
"""
Add a new node with the given data to the beginning of the Linked List.
Args:
new_data (Any): The data to be added to the new node.
Returns:
None
Examples:
>>> linked_list = LinkedList()
>>> linked_list.push(5)
>>> linked_list.push(4)
>>> linked_list.push(3)
>>> linked_list.push(2)
>>> linked_list.push(1)
>>> list(linked_list)
[1, 2, 3, 4, 5]
"""
new_node = Node(new_data)
new_node.next_node = self.head
self.head = new_node
def swap_nodes(self, node_data_1: Any, node_data_2: Any) -> None:
"""
Swap the positions of two nodes in the Linked List based on their data values.
Args:
node_data_1: Data value of the first node to be swapped.
node_data_2: Data value of the second node to be swapped.
Note:
If either of the specified data values isn't found then, no swapping occurs.
Examples:
When both values are present in a linked list.
>>> linked_list = LinkedList()
>>> linked_list.push(5)
>>> linked_list.push(4)
>>> linked_list.push(3)
>>> linked_list.push(2)
>>> linked_list.push(1)
>>> list(linked_list)
[1, 2, 3, 4, 5]
>>> linked_list.swap_nodes(1, 5)
>>> tuple(linked_list)
(5, 2, 3, 4, 1)
When one value is present and the other isn't in the linked list.
>>> second_list = LinkedList()
>>> second_list.push(6)
>>> second_list.push(7)
>>> second_list.push(8)
>>> second_list.push(9)
>>> second_list.swap_nodes(1, 6) is None
True
When both values are absent in the linked list.
>>> second_list = LinkedList()
>>> second_list.push(10)
>>> second_list.push(9)
>>> second_list.push(8)
>>> second_list.push(7)
>>> second_list.swap_nodes(1, 3) is None
True
When linkedlist is empty.
>>> second_list = LinkedList()
>>> second_list.swap_nodes(1, 3) is None
True
Returns:
None
"""
if node_data_1 == node_data_2:
return
node_1 = self.head
while node_1 and node_1.data != node_data_1:
node_1 = node_1.next_node
node_2 = self.head
while node_2 and node_2.data != node_data_2:
node_2 = node_2.next_node
if node_1 is None or node_2 is None:
return
# Swap the data values of the two nodes
node_1.data, node_2.data = node_2.data, node_1.data
if __name__ == "__main__":
"""
Python script that outputs the swap of nodes in a linked list.
"""
from doctest import testmod
testmod()
linked_list = LinkedList()
for i in range(5, 0, -1):
linked_list.push(i)
print(f"Original Linked List: {list(linked_list)}")
linked_list.swap_nodes(1, 4)
print(f"Modified Linked List: {list(linked_list)}")
print("After swapping the nodes whose data is 1 and 4.")
| LinkedList |
python | conda__conda | conda/exceptions.py | {
"start": 36098,
"end": 36612
} | class ____(CondaError):
def __init__(self, prefix: PathType, message: str = "", **kwargs):
error = f"Cannot modify '{prefix}'. The environment is marked as frozen. "
if message:
error += "Reason:\n\n"
error += indent(message, " ")
error += "\n\n"
error += (
"You can bypass these protections with the `--override-frozen` flag,"
" at your own risk."
)
super().__init__(error, **kwargs)
| EnvironmentIsFrozenError |
python | ray-project__ray | python/ray/dashboard/modules/aggregator/tests/test_multi_consumer_event_buffer.py | {
"start": 887,
"end": 10322
} | class ____:
@pytest.mark.asyncio
async def test_add_and_consume_event_basic(self):
"""Test basic event addition."""
buffer = MultiConsumerEventBuffer(max_size=10, max_batch_size=5)
consumer_name = "test_consumer"
await buffer.register_consumer(consumer_name)
assert await buffer.size() == 0
event = _create_test_event(b"event1")
await buffer.add_event(event)
assert await buffer.size() == 1
batch = await buffer.wait_for_batch(consumer_name, timeout_seconds=0)
assert len(batch) == 1
assert batch[0] == event
@pytest.mark.asyncio
async def test_add_event_buffer_overflow(self):
"""Test buffer overflow behavior and eviction logic."""
buffer = MultiConsumerEventBuffer(max_size=3, max_batch_size=2)
consumer_name = "test_consumer"
await buffer.register_consumer(consumer_name)
# Add events to fill buffer
events = []
event_types = [
RayEvent.EventType.TASK_DEFINITION_EVENT,
RayEvent.EventType.TASK_LIFECYCLE_EVENT,
RayEvent.EventType.ACTOR_TASK_DEFINITION_EVENT,
]
for i in range(3):
event = _create_test_event(f"event{i}".encode(), event_types[i])
events.append(event)
await buffer.add_event(event)
assert await buffer.size() == 3
# Add one more event to trigger eviction
overflow_event = _create_test_event(
b"overflow", RayEvent.EventType.TASK_PROFILE_EVENT
)
await buffer.add_event(overflow_event)
assert await buffer.size() == 3 # Still max size
@pytest.mark.asyncio
async def test_wait_for_batch_multiple_events(self):
"""Test waiting for batch when multiple events are immediately available and when when not all events are available."""
buffer = MultiConsumerEventBuffer(max_size=10, max_batch_size=3)
consumer_name = "test_consumer"
await buffer.register_consumer(consumer_name)
# Add multiple events
events = []
for i in range(5):
event = _create_test_event(f"event{i}".encode())
events.append(event)
await buffer.add_event(event)
# Should get max_batch_size events immediately
batch = await buffer.wait_for_batch(consumer_name, timeout_seconds=0.1)
assert len(batch) == 3 # max_batch_size
assert batch == events[:3]
# should now get the leftover events (< max_batch_size)
batch = await buffer.wait_for_batch(consumer_name, timeout_seconds=0.1)
assert len(batch) == 2
assert batch == events[3:]
@pytest.mark.asyncio
async def test_wait_for_batch_unknown_consumer(self):
"""Test error handling for unknown consumer."""
buffer = MultiConsumerEventBuffer(max_size=10, max_batch_size=5)
with pytest.raises(KeyError, match="unknown consumer"):
await buffer.wait_for_batch("nonexistent_consumer", timeout_seconds=0)
@pytest.mark.asyncio
async def test_register_consumer_duplicate(self):
"""Test error handling for duplicate consumer registration."""
buffer = MultiConsumerEventBuffer(max_size=10, max_batch_size=5)
consumer_name = "test_consumer"
await buffer.register_consumer(consumer_name)
with pytest.raises(
ValueError, match="consumer 'test_consumer' already registered"
):
await buffer.register_consumer(consumer_name)
@pytest.mark.asyncio
async def test_multiple_consumers_independent_cursors(self):
"""Test that multiple consumers have independent cursors."""
buffer = MultiConsumerEventBuffer(max_size=10, max_batch_size=2)
consumer_name_1 = "test_consumer_1"
consumer_name_2 = "test_consumer_2"
await buffer.register_consumer(consumer_name_1)
await buffer.register_consumer(consumer_name_2)
# Add events
events = []
for i in range(10):
event = _create_test_event(f"event{i}".encode())
events.append(event)
await buffer.add_event(event)
# Consumer 1 reads first batch
batch1 = await buffer.wait_for_batch(consumer_name_1, timeout_seconds=0.1)
assert batch1 == events[:2]
# Consumer 2 reads from beginning
batch2 = await buffer.wait_for_batch(consumer_name_2, timeout_seconds=0.1)
assert batch2 == events[:2]
# consumer 1 reads another batch
batch3 = await buffer.wait_for_batch(consumer_name_1, timeout_seconds=0.1)
assert batch3 == events[2:4]
# more events are added leading to events not consumed by consumer 2 getting evicted
# 4 events get evicted, consumer 1 has processed all 4 evicted events previously
# but consumer 2 has only processed 2 out of the 4 evicted events
for i in range(4):
event = _create_test_event(f"event{i + 10}".encode())
events.append(event)
await buffer.add_event(event)
# Just ensure buffer remains at max size
assert await buffer.size() == 10
# consumer 1 will read the next 2 events, not affected by the evictions
# consumer 1's cursor is adjusted internally to account for the evicted events
batch4 = await buffer.wait_for_batch(consumer_name_1, timeout_seconds=0.1)
assert batch4 == events[4:6]
# consumer 2 will read 2 events, skipping the evicted events
batch5 = await buffer.wait_for_batch(consumer_name_2, timeout_seconds=0.1)
assert batch5 == events[4:6] # events[2:4] are lost
@pytest.mark.asyncio
async def test_wait_for_batch_blocks_until_event_available(self):
"""Test that wait_for_batch blocks until at least one event is available."""
buffer = MultiConsumerEventBuffer(max_size=10, max_batch_size=5)
consumer_name = "test_consumer"
await buffer.register_consumer(consumer_name)
# Start waiting for batch (should block)
async def wait_for_batch():
return await buffer.wait_for_batch(consumer_name, timeout_seconds=2.0)
wait_task = asyncio.create_task(wait_for_batch())
# Wait a bit to ensure the task is waiting
await asyncio.sleep(4.0)
assert not wait_task.done()
# Add an event
event = _create_test_event(b"event1")
await buffer.add_event(event)
# Now the task should complete
batch = await wait_task
assert len(batch) == 1
assert batch[0] == event
@pytest.mark.asyncio
async def test_concurrent_producer_consumer_random_sleeps_with_overall_timeout(
self,
):
"""Producer with random sleeps and consumer reading until all events are received.
Uses an overall asyncio timeout to ensure the test fails if it hangs
before consuming all events.
"""
total_events = 40
max_batch_size = 2
buffer = MultiConsumerEventBuffer(max_size=100, max_batch_size=max_batch_size)
consumer_name = "test_consumer"
await buffer.register_consumer(consumer_name)
produced_events = []
consumed_events = []
random.seed(0)
async def producer():
for i in range(total_events):
event = _create_test_event(f"e{i}".encode())
produced_events.append(event)
await buffer.add_event(event)
await asyncio.sleep(random.uniform(0.0, 0.02))
async def consumer():
while len(consumed_events) < total_events:
batch = await buffer.wait_for_batch(consumer_name, timeout_seconds=0.1)
consumed_events.extend(batch)
# The test should fail if this times out before all events are consumed
await asyncio.wait_for(asyncio.gather(producer(), consumer()), timeout=5.0)
assert len(consumed_events) == total_events
assert consumed_events == produced_events
@pytest.mark.asyncio
async def test_events_are_evicted_once_consumed_by_all_consumers(self):
"""Test events are evicted from the buffer once they are consumed by all consumers"""
buffer = MultiConsumerEventBuffer(max_size=10, max_batch_size=2)
consumer_name_1 = "test_consumer_1"
consumer_name_2 = "test_consumer_2"
await buffer.register_consumer(consumer_name_1)
await buffer.register_consumer(consumer_name_2)
# Add events
events = []
for i in range(10):
event = _create_test_event(f"event{i}".encode())
events.append(event)
await buffer.add_event(event)
assert await buffer.size() == 10
# Consumer 1 reads first batch
batch1 = await buffer.wait_for_batch(consumer_name_1, timeout_seconds=0.1)
assert batch1 == events[:2]
# buffer size does not change as consumer 2 is yet to consume these events
assert await buffer.size() == 10
# Consumer 2 reads from beginning
batch2 = await buffer.wait_for_batch(consumer_name_2, timeout_seconds=0.1)
assert batch2 == events[:2]
# size reduces by 2 as both consumers have consumed 2 events
assert await buffer.size() == 8
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| TestMultiConsumerEventBuffer |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 8060,
"end": 8905
} | class ____(VOTableSpecWarning):
"""Array uses commas rather than whitespace.
The VOTable spec states:
If a cell contains an array or complex number, it should be
encoded as multiple numbers separated by whitespace.
Many VOTable files in the wild use commas as a separator instead,
and ``astropy.io.votable`` can support this convention depending on the
:ref:`astropy:verifying-votables` setting.
``astropy.io.votable`` always outputs files using only spaces, regardless of
how they were input.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#toc-header-35>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:TABLEDATA>`__
"""
message_template = "Array uses commas rather than whitespace"
| W01 |
python | streamlit__streamlit | lib/streamlit/elements/lib/image_utils.py | {
"start": 2007,
"end": 15964
} | class ____(IntEnum):
"""
Special values that are recognized by the frontend and allow us to change the
behavior of the displayed image.
"""
ORIGINAL = -1
COLUMN = -2
AUTO = -3
MIN_IMAGE_OR_CONTAINER = -4
MAX_IMAGE_OR_CONTAINER = -5
WidthBehavior.ORIGINAL.__doc__ = """Display the image at its original width"""
WidthBehavior.COLUMN.__doc__ = (
"""Display the image at the width of the column it's in."""
)
WidthBehavior.AUTO.__doc__ = """Display the image at its original width, unless it
would exceed the width of its column in which case clamp it to
its column width"""
def _image_may_have_alpha_channel(image: PILImage) -> bool:
return image.mode in ("RGBA", "LA", "P")
def _image_is_gif(image: PILImage) -> bool:
return image.format == "GIF"
def _validate_image_format_string(
image_data: bytes | PILImage, format: str
) -> ImageFormat:
"""Return either "JPEG", "PNG", or "GIF", based on the input `format` string.
- If `format` is "JPEG" or "JPG" (or any capitalization thereof), return "JPEG"
- If `format` is "PNG" (or any capitalization thereof), return "PNG"
- For all other strings, return "PNG" if the image has an alpha channel,
"GIF" if the image is a GIF, and "JPEG" otherwise.
"""
img_format = format.upper()
if img_format in {"JPEG", "PNG"}:
return cast("ImageFormat", img_format)
# We are forgiving on the spelling of JPEG
if img_format == "JPG":
return "JPEG"
pil_image: PILImage
if isinstance(image_data, bytes):
from PIL import Image
pil_image = Image.open(io.BytesIO(image_data))
else:
pil_image = image_data
if _image_is_gif(pil_image):
return "GIF"
if _image_may_have_alpha_channel(pil_image):
return "PNG"
return "JPEG"
def _pil_to_bytes(
image: PILImage,
format: ImageFormat = "JPEG",
quality: int = 100,
) -> bytes:
"""Convert a PIL image to bytes."""
tmp = io.BytesIO()
# User must have specified JPEG, so we must convert it
if format == "JPEG" and _image_may_have_alpha_channel(image):
image = image.convert("RGB")
image.save(tmp, format=format, quality=quality)
return tmp.getvalue()
def _bytesio_to_bytes(data: io.BytesIO) -> bytes:
data.seek(0)
return data.getvalue()
def _np_array_to_bytes(array: npt.NDArray[Any], output_format: str = "JPEG") -> bytes:
import numpy as np
from PIL import Image
img = Image.fromarray(array.astype(np.uint8))
img_format = _validate_image_format_string(img, output_format)
return _pil_to_bytes(img, img_format)
def _verify_np_shape(array: npt.NDArray[Any]) -> npt.NDArray[Any]:
shape: NumpyShape = array.shape
if len(shape) not in (2, 3):
raise StreamlitAPIException("Numpy shape has to be of length 2 or 3.")
if len(shape) == 3 and shape[-1] not in (1, 3, 4):
raise StreamlitAPIException(
f"Channel can only be 1, 3, or 4 got {shape[-1]}. Shape is {shape}"
)
# If there's only one channel, convert is to x, y
if len(shape) == 3 and shape[-1] == 1:
array = array[:, :, 0]
return array
def _get_image_format_mimetype(image_format: ImageFormat) -> str:
"""Get the mimetype string for the given ImageFormat."""
return f"image/{image_format.lower()}"
def _ensure_image_size_and_format(
image_data: bytes, layout_config: LayoutConfig, image_format: ImageFormat
) -> bytes:
"""Resize an image if it exceeds the given width, or if exceeds
MAXIMUM_CONTENT_WIDTH. Ensure the image's format corresponds to the given
ImageFormat. Return the (possibly resized and reformatted) image bytes.
"""
from PIL import Image
pil_image: PILImage = Image.open(io.BytesIO(image_data))
actual_width, actual_height = pil_image.size
target_width = (
layout_config.width
if isinstance(layout_config.width, int)
else MAXIMUM_CONTENT_WIDTH
)
# Resizing the image down if the embedded width is greater than
# the target width.
if target_width > 0 and actual_width > target_width:
# We need to resize the image.
new_height = int(1.0 * actual_height * target_width / actual_width)
# pillow reexports Image.Resampling.BILINEAR as Image.BILINEAR for backwards
# compatibility reasons, so we use the reexport to support older pillow
# versions. The types don't seem to reflect this, though, hence the type: ignore
# below.
pil_image = pil_image.resize(
(target_width, new_height),
resample=Image.BILINEAR, # type: ignore[attr-defined]
)
return _pil_to_bytes(pil_image, format=image_format, quality=90)
if pil_image.format != image_format:
# We need to reformat the image.
return _pil_to_bytes(pil_image, format=image_format, quality=90)
# No resizing or reformatting necessary - return the original bytes.
return image_data
def _clip_image(image: npt.NDArray[Any], clamp: bool) -> npt.NDArray[Any]:
import numpy as np
data = image
if issubclass(image.dtype.type, np.floating):
if clamp:
data = np.clip(image, 0, 1.0)
elif np.amin(image) < 0.0 or np.amax(image) > 1.0:
raise RuntimeError("Data is outside [0.0, 1.0] and clamp is not set.")
data = data * 255
elif clamp:
data = np.clip(image, 0, 255)
elif np.amin(image) < 0 or np.amax(image) > 255:
raise RuntimeError("Data is outside [0, 255] and clamp is not set.")
return data
def image_to_url(
image: AtomicImage,
layout_config: LayoutConfig,
clamp: bool,
channels: Channels,
output_format: ImageFormatOrAuto,
image_id: str,
) -> str:
"""Return a URL that an image can be served from.
If `image` is already a URL, return it unmodified.
Otherwise, add the image to the MediaFileManager and return the URL.
(When running in "raw" mode, we won't actually load data into the
MediaFileManager, and we'll return an empty URL).
"""
import numpy as np
from PIL import Image, ImageFile
image_data: bytes
# Convert Path to string if necessary
if isinstance(image, Path):
image = str(image)
# Strings
if isinstance(image, str):
if not os.path.isfile(image) and url_util.is_url(
image, allowed_schemas=("http", "https", "data")
):
# If it's a url, return it directly.
return image
if image.endswith(".svg") and os.path.isfile(image):
# Unpack local SVG image file to an SVG string
with open(image) as textfile:
image = textfile.read()
# Following regex allows svg image files to start either via a "<?xml...>" tag
# eventually followed by a "<svg...>" tag or directly starting with a "<svg>" tag
if re.search(r"(^\s?(<\?xml[\s\S]*<svg\s)|^\s?<svg\s|^\s?<svg>\s)", image):
if "xmlns" not in image:
# The xmlns attribute is required for SVGs to render in an img tag.
# If it's not present, we add to the first SVG tag:
image = image.replace(
"<svg", '<svg xmlns="http://www.w3.org/2000/svg" ', 1
)
# Convert to base64 to prevent issues with encoding:
import base64
image_b64_encoded = base64.b64encode(image.encode("utf-8")).decode("utf-8")
# Return SVG as data URI:
return f"data:image/svg+xml;base64,{image_b64_encoded}"
# Otherwise, try to open it as a file.
try:
with open(image, "rb") as f:
image_data = f.read()
except Exception:
# When we aren't able to open the image file, we still pass the path to
# the MediaFileManager - its storage backend may have access to files
# that Streamlit does not.
import mimetypes
mimetype, _ = mimetypes.guess_type(image)
if mimetype is None:
mimetype = "application/octet-stream"
url = runtime.get_instance().media_file_mgr.add(image, mimetype, image_id)
caching.save_media_data(image, mimetype, image_id)
return url
# PIL Images
elif isinstance(image, (ImageFile.ImageFile, Image.Image)):
img_format = _validate_image_format_string(image, output_format)
image_data = _pil_to_bytes(image, img_format)
# BytesIO
# Note: This doesn't support SVG. We could convert to png (cairosvg.svg2png)
# or just decode BytesIO to string and handle that way.
elif isinstance(image, io.BytesIO):
image_data = _bytesio_to_bytes(image)
# Numpy Arrays (ie opencv)
elif isinstance(image, np.ndarray):
image = _clip_image(_verify_np_shape(image), clamp)
if channels == "BGR":
if len(image.shape) == 3:
image = image[:, :, [2, 1, 0]]
else:
raise StreamlitAPIException(
'When using `channels="BGR"`, the input image should '
"have exactly 3 color channels"
)
image_data = _np_array_to_bytes(array=image, output_format=output_format)
# Raw bytes
else:
image_data = image
# Determine the image's format, resize it, and get its mimetype
image_format = _validate_image_format_string(image_data, output_format)
image_data = _ensure_image_size_and_format(image_data, layout_config, image_format)
mimetype = _get_image_format_mimetype(image_format)
if runtime.exists():
url = runtime.get_instance().media_file_mgr.add(image_data, mimetype, image_id)
caching.save_media_data(image_data, mimetype, image_id)
return url
# When running in "raw mode", we can't access the MediaFileManager.
return ""
def _4d_to_list_3d(array: npt.NDArray[Any]) -> list[npt.NDArray[Any]]:
return [array[i, :, :, :] for i in range(array.shape[0])]
def marshall_images(
coordinates: str,
image: ImageOrImageList,
caption: str | npt.NDArray[Any] | list[str] | None,
layout_config: LayoutConfig,
proto_imgs: ImageListProto,
clamp: bool,
channels: Channels = "RGB",
output_format: ImageFormatOrAuto = "auto",
) -> None:
"""Fill an ImageListProto with a list of images and their captions.
The images will be resized and reformatted as necessary.
Parameters
----------
coordinates
A string identifying the images' location in the frontend.
image
The image or images to include in the ImageListProto.
caption
Image caption. If displaying multiple images, caption should be a
list of captions (one for each image).
width
The desired width of the image or images. This parameter will be
passed to the frontend.
Positive values set the image width explicitly.
Negative values has some special. For details, see: `WidthBehaviour`
proto_imgs
The ImageListProto to fill in.
clamp
Clamp image pixel values to a valid range ([0-255] per channel).
This is only meaningful for byte array images; the parameter is
ignored for image URLs. If this is not set, and an image has an
out-of-range value, an error will be thrown.
channels
If image is an nd.array, this parameter denotes the format used to
represent color information. Defaults to 'RGB', meaning
`image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and
`image[:, :, 2]` is blue. For images coming from libraries like
OpenCV you should set this to 'BGR', instead.
output_format
This parameter specifies the format to use when transferring the
image data. Photos should use the JPEG format for lossy compression
while diagrams should use the PNG format for lossless compression.
Defaults to 'auto' which identifies the compression type based
on the type and format of the image argument.
"""
import numpy as np
channels = cast("Channels", channels.upper())
# Turn single image and caption into one element list.
images: Sequence[AtomicImage]
if isinstance(image, (list, set, tuple)):
images = list(image)
elif isinstance(image, np.ndarray) and len(image.shape) == 4:
images = _4d_to_list_3d(image)
else:
images = cast("Sequence[AtomicImage]", [image])
if isinstance(caption, list):
captions: Sequence[str | None] = caption
elif isinstance(caption, str):
captions = [caption]
elif isinstance(caption, np.ndarray) and len(caption.shape) == 1:
captions = caption.tolist()
elif caption is None:
captions = [None] * len(images)
else:
captions = [str(caption)]
if not isinstance(captions, list):
raise StreamlitAPIException(
"If image is a list then caption should be a list as well."
)
if len(captions) != len(images):
raise StreamlitAPIException(
f"Cannot pair {len(captions)} captions with {len(images)} images."
)
# Each image in an image list needs to be kept track of at its own coordinates.
for coord_suffix, (single_image, single_caption) in enumerate(
zip(images, captions, strict=False)
):
proto_img = proto_imgs.imgs.add()
if single_caption is not None:
proto_img.caption = str(single_caption)
# We use the index of the image in the input image list to identify this image inside
# MediaFileManager. For this, we just add the index to the image's "coordinates".
image_id = f"{coordinates}-{coord_suffix}"
proto_img.url = image_to_url(
single_image, layout_config, clamp, channels, output_format, image_id
)
| WidthBehavior |
python | pydantic__pydantic | tests/test_main.py | {
"start": 23108,
"end": 25488
} | class ____(str, Enum):
FOO = 'foo'
BAR = 'bar'
@pytest.mark.parametrize('value', [StrFoo.FOO, StrFoo.FOO.value, 'foo', 'hello'])
def test_literal_use_enum_values_multi_type(value) -> None:
class Model(BaseModel):
baz: Literal[StrFoo.FOO, 'hello']
model_config = ConfigDict(use_enum_values=True)
assert isinstance(Model(baz=value).baz, str)
def test_literal_use_enum_values_with_default() -> None:
class Model(BaseModel):
baz: Literal[StrFoo.FOO] = Field(default=StrFoo.FOO)
model_config = ConfigDict(use_enum_values=True, validate_default=True)
validated = Model()
assert type(validated.baz) is str
assert type(validated.model_dump()['baz']) is str
validated = Model.model_validate_json('{"baz": "foo"}')
assert type(validated.baz) is str
assert type(validated.model_dump()['baz']) is str
validated = Model.model_validate({'baz': StrFoo.FOO})
assert type(validated.baz) is str
assert type(validated.model_dump()['baz']) is str
def test_strict_enum_values():
class MyEnum(Enum):
val = 'val'
class Model(BaseModel):
model_config = ConfigDict(use_enum_values=True)
x: MyEnum
assert Model.model_validate({'x': MyEnum.val}, strict=True).x == 'val'
def test_union_enum_values():
class MyEnum(Enum):
val = 'val'
class NormalModel(BaseModel):
x: Union[MyEnum, int]
class UseEnumValuesModel(BaseModel):
model_config = ConfigDict(use_enum_values=True)
x: Union[MyEnum, int]
assert NormalModel(x=MyEnum.val).x != 'val'
assert UseEnumValuesModel(x=MyEnum.val).x == 'val'
def test_enum_raw():
FooEnum = Enum('FooEnum', {'foo': 'foo', 'bar': 'bar'})
class Model(BaseModel):
foo: FooEnum = None
m = Model(foo='foo')
assert isinstance(m.foo, FooEnum)
assert m.foo != 'foo'
assert m.foo.value == 'foo'
def test_set_tuple_values():
class Model(BaseModel):
foo: set
bar: tuple
m = Model(foo=['a', 'b'], bar=['c', 'd'])
assert m.foo == {'a', 'b'}
assert m.bar == ('c', 'd')
assert m.model_dump() == {'foo': {'a', 'b'}, 'bar': ('c', 'd')}
def test_default_copy():
class User(BaseModel):
friends: list[int] = Field(default_factory=lambda: [])
u1 = User()
u2 = User()
assert u1.friends is not u2.friends
| StrFoo |
python | walkccc__LeetCode | solutions/259. 3Sum Smaller/259.py | {
"start": 0,
"end": 525
} | class ____:
def threeSumSmaller(self, nums: list[int], target: int) -> int:
if len(nums) < 3:
return 0
ans = 0
nums.sort()
for i in range(len(nums) - 2):
l = i + 1
r = len(nums) - 1
while l < r:
if nums[i] + nums[l] + nums[r] < target:
# (nums[i], nums[l], nums[r])
# (nums[i], nums[l], nums[r - 1])
# ...,
# (nums[i], nums[l], nums[l + 1])
ans += r - l
l += 1
else:
r -= 1
return ans
| Solution |
python | chroma-core__chroma | chromadb/test/property/strategies.py | {
"start": 8602,
"end": 24087
} | class ____(ExternalCollection):
"""
An internal view of a collection.
This strategy contains all the information Chroma uses internally to manage a
collection. It is a superset of ExternalCollection and should be used to test
internal Chroma logic.
"""
id: uuid.UUID
dimension: int
dtype: npt.DTypeLike
known_metadata_keys: types.Metadata
known_document_keywords: List[str]
has_documents: bool = False
has_embeddings: bool = False
collection_config: Optional[CreateCollectionConfiguration] = None
@st.composite
def collections(
draw: st.DrawFn,
add_filterable_data: bool = False,
with_hnsw_params: bool = False,
has_embeddings: Optional[bool] = None,
has_documents: Optional[bool] = None,
with_persistent_hnsw_params: st.SearchStrategy[bool] = st.just(False),
max_hnsw_batch_size: int = 2000,
max_hnsw_sync_threshold: int = 2000,
) -> Collection:
"""Strategy to generate a Collection object. If add_filterable_data is True, then known_metadata_keys and known_document_keywords will be populated with consistent data."""
assert not ((has_embeddings is False) and (has_documents is False))
name = draw(collection_name())
metadata = draw(collection_metadata)
dimension = draw(st.integers(min_value=2, max_value=2048))
dtype = draw(st.sampled_from(float_types))
use_persistent_hnsw_params = draw(with_persistent_hnsw_params)
if use_persistent_hnsw_params and not with_hnsw_params:
raise ValueError(
"with_persistent_hnsw_params requires with_hnsw_params to be true"
)
if with_hnsw_params:
if metadata is None:
metadata = {}
metadata.update(test_hnsw_config)
if use_persistent_hnsw_params:
metadata["hnsw:sync_threshold"] = draw(
st.integers(min_value=3, max_value=max_hnsw_sync_threshold)
)
metadata["hnsw:batch_size"] = draw(
st.integers(
min_value=3,
max_value=min(
[metadata["hnsw:sync_threshold"], max_hnsw_batch_size]
),
)
)
# Sometimes, select a space at random
if draw(st.booleans()):
# TODO: pull the distance functions from a source of truth that lives not
# in tests once https://github.com/chroma-core/issues/issues/61 lands
metadata["hnsw:space"] = draw(st.sampled_from(["cosine", "l2", "ip"]))
collection_config: Optional[CreateCollectionConfiguration] = None
# Generate a spann config if in spann mode
if not is_spann_disabled_mode:
# Use metadata["hnsw:space"] if it exists, otherwise default to "l2"
spann_space = metadata.get("hnsw:space", "l2") if metadata else "l2"
spann_config: CreateSpannConfiguration = {
"space": spann_space,
"write_nprobe": 4,
"reassign_neighbor_count": 4
}
collection_config = {
"spann": spann_config,
}
known_metadata_keys: Dict[str, Union[int, str, float]] = {}
if add_filterable_data:
while len(known_metadata_keys) < 5:
key = draw(safe_text)
known_metadata_keys[key] = draw(st.one_of(*safe_values))
if has_documents is None:
has_documents = draw(st.booleans())
assert has_documents is not None
# For cluster tests, we want to avoid generating documents and where_document
# clauses of length < 3. We also don't want them to contain certan special
# characters like _ and % that implicitly involve searching for a regex in sqlite.
if not NOT_CLUSTER_ONLY:
if has_documents and add_filterable_data:
known_document_keywords = draw(
st.lists(safe_text_min_size_3, min_size=5, max_size=5)
)
else:
known_document_keywords = []
else:
if has_documents and add_filterable_data:
known_document_keywords = draw(st.lists(safe_text, min_size=5, max_size=5))
else:
known_document_keywords = []
if not has_documents:
has_embeddings = True
else:
if has_embeddings is None:
has_embeddings = draw(st.booleans())
assert has_embeddings is not None
embedding_function = draw(embedding_function_strategy(dimension, dtype))
return Collection(
id=uuid.uuid4(),
name=name,
metadata=metadata,
dimension=dimension,
dtype=dtype,
known_metadata_keys=known_metadata_keys,
has_documents=has_documents,
known_document_keywords=known_document_keywords,
has_embeddings=has_embeddings,
embedding_function=embedding_function,
collection_config=collection_config
)
@st.composite
def metadata(
draw: st.DrawFn,
collection: Collection,
min_size: int = 0,
max_size: Optional[int] = None,
) -> Optional[types.Metadata]:
"""Strategy for generating metadata that could be a part of the given collection"""
# First draw a random dictionary.
metadata: types.Metadata = draw(
st.dictionaries(
safe_text, st.one_of(*safe_values), min_size=min_size, max_size=max_size
)
)
# Then, remove keys that overlap with the known keys for the coll
# to avoid type errors when comparing.
if collection.known_metadata_keys:
for key in collection.known_metadata_keys.keys():
if key in metadata:
del metadata[key] # type: ignore
# Finally, add in some of the known keys for the collection
sampling_dict: Dict[str, st.SearchStrategy[Union[str, int, float]]] = {
k: st.just(v) for k, v in collection.known_metadata_keys.items()
}
metadata.update(draw(st.fixed_dictionaries({}, optional=sampling_dict))) # type: ignore
# We don't allow submitting empty metadata
if metadata == {}:
return None
return metadata
@st.composite
def document(draw: st.DrawFn, collection: Collection) -> types.Document:
"""Strategy for generating documents that could be a part of the given collection"""
# For cluster tests, we want to avoid generating documents of length < 3.
# We also don't want them to contain certan special
# characters like _ and % that implicitly involve searching for a regex in sqlite.
if not NOT_CLUSTER_ONLY:
# Blacklist certain unicode characters that affect sqlite processing.
# For example, the null (/x00) character makes sqlite stop processing a string.
# Also, blacklist _ and % for cluster tests.
blacklist_categories = ("Cc", "Cs", "Pc", "Po")
if collection.known_document_keywords:
known_words_st = st.sampled_from(collection.known_document_keywords)
else:
known_words_st = st.text(
min_size=3,
alphabet=st.characters(blacklist_categories=blacklist_categories), # type: ignore
)
random_words_st = st.text(
min_size=3, alphabet=st.characters(blacklist_categories=blacklist_categories) # type: ignore
)
words = draw(st.lists(st.one_of(known_words_st, random_words_st), min_size=1))
return " ".join(words)
# Blacklist certain unicode characters that affect sqlite processing.
# For example, the null (/x00) character makes sqlite stop processing a string.
blacklist_categories = ("Cc", "Cs") # type: ignore
if collection.known_document_keywords:
known_words_st = st.sampled_from(collection.known_document_keywords)
else:
known_words_st = st.text(
min_size=1,
alphabet=st.characters(blacklist_categories=blacklist_categories), # type: ignore
)
random_words_st = st.text(
min_size=1, alphabet=st.characters(blacklist_categories=blacklist_categories) # type: ignore
)
words = draw(st.lists(st.one_of(known_words_st, random_words_st), min_size=1))
return " ".join(words)
@st.composite
def recordsets(
draw: st.DrawFn,
collection_strategy: SearchStrategy[Collection] = collections(),
id_strategy: SearchStrategy[str] = safe_text,
min_size: int = 1,
max_size: int = 50,
# If num_unique_metadata is not None, then the number of metadata generations
# will be the size of the record set. If set, the number of metadata
# generations will be the value of num_unique_metadata.
num_unique_metadata: Optional[int] = None,
min_metadata_size: int = 0,
max_metadata_size: Optional[int] = None,
) -> RecordSet:
collection = draw(collection_strategy)
ids = list(
draw(st.lists(id_strategy, min_size=min_size, max_size=max_size, unique=True))
)
embeddings: Optional[Embeddings] = None
if collection.has_embeddings:
embeddings = create_embeddings(collection.dimension, len(ids), collection.dtype)
num_metadata = num_unique_metadata if num_unique_metadata is not None else len(ids)
generated_metadatas = draw(
st.lists(
metadata(
collection, min_size=min_metadata_size, max_size=max_metadata_size
),
min_size=num_metadata,
max_size=num_metadata,
)
)
metadatas = []
for i in range(len(ids)):
metadatas.append(generated_metadatas[i % len(generated_metadatas)])
documents: Optional[Documents] = None
if collection.has_documents:
documents = draw(
st.lists(document(collection), min_size=len(ids), max_size=len(ids))
)
# in the case where we have a single record, sometimes exercise
# the code that handles individual values rather than lists.
# In this case, any field may be a list or a single value.
if len(ids) == 1:
single_id: Union[str, List[str]] = ids[0] if draw(st.booleans()) else ids
single_embedding = (
embeddings[0]
if embeddings is not None and draw(st.booleans())
else embeddings
)
single_metadata: Union[Optional[Metadata], List[Optional[Metadata]]] = (
metadatas[0] if draw(st.booleans()) else metadatas
)
single_document = (
documents[0] if documents is not None and draw(st.booleans()) else documents
)
return {
"ids": single_id,
"embeddings": single_embedding,
"metadatas": single_metadata,
"documents": single_document,
}
return {
"ids": ids,
"embeddings": embeddings,
"metadatas": metadatas,
"documents": documents,
}
def opposite_value(value: LiteralValue) -> SearchStrategy[Any]:
"""
Returns a strategy that will generate all valid values except the input value - testing of $nin
"""
if isinstance(value, float):
return safe_floats.filter(lambda x: x != value)
elif isinstance(value, str):
return safe_text.filter(lambda x: x != value)
elif isinstance(value, bool):
return st.booleans().filter(lambda x: x != value)
elif isinstance(value, int):
return st.integers(min_value=-(2**31), max_value=2**31 - 1).filter(
lambda x: x != value
)
else:
return st.from_type(type(value)).filter(lambda x: x != value)
@st.composite
def where_clause(draw: st.DrawFn, collection: Collection) -> types.Where:
"""Generate a filter that could be used in a query against the given collection"""
known_keys = sorted(collection.known_metadata_keys.keys())
key = draw(st.sampled_from(known_keys))
value = collection.known_metadata_keys[key]
legal_ops: List[Optional[str]] = [None]
if isinstance(value, bool):
legal_ops.extend(["$eq", "$ne", "$in", "$nin"])
elif isinstance(value, float):
legal_ops.extend(["$gt", "$lt", "$lte", "$gte"])
elif isinstance(value, int):
legal_ops.extend(["$gt", "$lt", "$lte", "$gte", "$eq", "$ne", "$in", "$nin"])
elif isinstance(value, str):
legal_ops.extend(["$eq", "$ne", "$in", "$nin"])
else:
assert False, f"Unsupported type: {type(value)}"
if isinstance(value, float):
# Add or subtract a small number to avoid floating point rounding errors
value = value + draw(st.sampled_from([1e-6, -1e-6]))
# Truncate to 32 bit
value = float(np.float32(value))
op: WhereOperator = draw(st.sampled_from(legal_ops))
if op is None:
return {key: value}
elif op == "$in": # type: ignore
if isinstance(value, str) and not value:
return {}
return {key: {op: [value, *[draw(opposite_value(value)) for _ in range(3)]]}}
elif op == "$nin": # type: ignore
if isinstance(value, str) and not value:
return {}
return {key: {op: [draw(opposite_value(value)) for _ in range(3)]}}
else:
return {key: {op: value}} # type: ignore
@st.composite
def where_doc_clause(draw: st.DrawFn, collection: Collection) -> types.WhereDocument:
"""Generate a where_document filter that could be used against the given collection"""
# For cluster tests, we want to avoid generating where_document
# clauses of length < 3. We also don't want them to contain certan special
# characters like _ and % that implicitly involve searching for a regex in sqlite.
if not NOT_CLUSTER_ONLY:
if collection.known_document_keywords:
word = draw(st.sampled_from(collection.known_document_keywords))
else:
word = draw(safe_text_min_size_3)
else:
if collection.known_document_keywords:
word = draw(st.sampled_from(collection.known_document_keywords))
else:
word = draw(safe_text)
# This is hacky, but the distributed system does not support $not_contains
# so we need to avoid generating these operators for now in that case.
# TODO: Remove this once the distributed system supports $not_contains
op = draw(st.sampled_from(["$contains", "$not_contains"]))
if op == "$contains":
return {"$contains": word}
else:
assert op == "$not_contains"
return {"$not_contains": word}
def binary_operator_clause(
base_st: SearchStrategy[types.Where],
) -> SearchStrategy[types.Where]:
op: SearchStrategy[LogicalOperator] = st.sampled_from(["$and", "$or"])
return st.dictionaries(
keys=op,
values=st.lists(base_st, max_size=2, min_size=2),
min_size=1,
max_size=1,
)
def binary_document_operator_clause(
base_st: SearchStrategy[types.WhereDocument],
) -> SearchStrategy[types.WhereDocument]:
op: SearchStrategy[LogicalOperator] = st.sampled_from(["$and", "$or"])
return st.dictionaries(
keys=op,
values=st.lists(base_st, max_size=2, min_size=2),
min_size=1,
max_size=1,
)
@st.composite
def recursive_where_clause(draw: st.DrawFn, collection: Collection) -> types.Where:
base_st = where_clause(collection)
where: types.Where = draw(st.recursive(base_st, binary_operator_clause))
return where
@st.composite
def recursive_where_doc_clause(
draw: st.DrawFn, collection: Collection
) -> types.WhereDocument:
base_st = where_doc_clause(collection)
where: types.WhereDocument = draw(
st.recursive(base_st, binary_document_operator_clause)
)
return where
| Collection |
python | pypa__pipenv | pipenv/patched/pip/_internal/distributions/wheel.py | {
"start": 425,
"end": 1377
} | class ____(AbstractDistribution):
"""Represents a wheel distribution.
This does not need any preparation as wheels can be directly unpacked.
"""
@property
def build_tracker_id(self) -> Optional[str]:
return None
def get_metadata_distribution(self) -> BaseDistribution:
"""Loads the metadata from the wheel file into memory and returns a
Distribution that uses it, not relying on the wheel file or
requirement.
"""
assert self.req.local_file_path, "Set as part of preparation during download"
assert self.req.name, "Wheels are never unnamed"
wheel = FilesystemWheel(self.req.local_file_path)
return get_wheel_distribution(wheel, canonicalize_name(self.req.name))
def prepare_distribution_metadata(
self,
finder: "PackageFinder",
build_isolation: bool,
check_build_deps: bool,
) -> None:
pass
| WheelDistribution |
python | gevent__gevent | src/greentest/3.14/test_socket.py | {
"start": 6787,
"end": 6942
} | class ____(unittest.TestCase):
@cpython_only
def test_lazy_import(self):
ensure_lazy_imports("socket", {"array", "selectors"})
| TestLazyImport |
python | astropy__astropy | astropy/modeling/tests/test_fitters.py | {
"start": 1461,
"end": 3325
} | class ____:
"""Tests for 2D polynomial fitting."""
def setup_class(self):
self.model = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
def poly2(x, y):
return 1 + 2 * x + 3 * x**2 + 4 * y + 5 * y**2 + 6 * x * y
self.z = poly2(self.x, self.y)
def test_poly2D_fitting(self):
fitter = LinearLSQFitter()
v = self.model.fit_deriv(x=self.x, y=self.y)
p = linalg.lstsq(v, self.z.ravel(), rcond=-1)[0]
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, p)
def test_eval(self):
fitter = LinearLSQFitter()
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model(self.x, self.y), self.z)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", non_linear_fitters)
def test_nonlinear_fitting(self, fitter):
fitter = fitter()
self.model.parameters = [0.6, 1.8, 2.9, 3.7, 4.9, 6.7]
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, [1, 2, 3, 4, 5, 6])
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_compare_nonlinear_fitting(self):
self.model.parameters = [0.6, 1.8, 2.9, 3.7, 4.9, 6.7]
fit_models = []
for fitter in non_linear_fitters:
fitter = fitter()
with pytest.warns(
AstropyUserWarning, match=r"Model is linear in parameters"
):
fit_models.append(fitter(self.model, self.x, self.y, self.z))
for pair in combinations(fit_models, 2):
assert_allclose(pair[0].parameters, pair[1].parameters)
| TestPolynomial2D |
python | scipy__scipy | scipy/signal/tests/test_filter_design.py | {
"start": 69079,
"end": 70396
} | class ____:
@xfail_xp_backends(
'dask.array', reason='https://github.com/dask/dask/issues/11883'
)
def test_basic(self, xp):
z = xp.asarray([])
p = xp.asarray([(-1+1j) / math.sqrt(2), (-1-1j) / math.sqrt(2)])
k = 1
z_lp, p_lp, k_lp = lp2lp_zpk(z, p, k, 5)
xp_assert_equal(z_lp, xp.asarray([]))
xp_assert_close(_sort_cmplx(p_lp, xp=xp), _sort_cmplx(p, xp=xp) * 5)
assert k_lp == 25.
# Pseudo-Chebyshev with both poles and zeros
z = xp.asarray([-2j, +2j])
p = xp.asarray([-0.75, -0.5-0.5j, -0.5+0.5j])
k = 3
z_lp, p_lp, k_lp = lp2lp_zpk(z, p, k, 20)
xp_assert_close(
_sort_cmplx(z_lp, xp=xp), _sort_cmplx([-40j, +40j], xp=xp)
)
xp_assert_close(
_sort_cmplx(p_lp, xp=xp), _sort_cmplx([-15, -10-10j, -10+10j], xp=xp)
)
assert k_lp == 60.
def test_fs_validation(self):
z = [-2j, +2j]
p = [-0.75, -0.5 - 0.5j, -0.5 + 0.5j]
k = 3
with pytest.raises(ValueError, match="Sampling.*single scalar"):
bilinear_zpk(z, p, k, fs=np.array([10, 20]))
with pytest.raises(ValueError, match="Sampling.*be none"):
bilinear_zpk(z, p, k, fs=None)
@make_xp_test_case(lp2hp_zpk)
| TestLp2lp_zpk |
python | modin-project__modin | modin/tests/pandas/utils.py | {
"start": 16089,
"end": 16317
} | class ____:
def __init__(self, value: int):
self.value = value
def __add__(self, other):
return self.value + other
def __radd__(self, other):
return other + self.value
| CustomIntegerForAddition |
python | pypa__setuptools | setuptools/_vendor/typeguard/_union_transformer.py | {
"start": 585,
"end": 1354
} | class ____(NodeTransformer):
def __init__(self, union_name: Name | None = None):
self.union_name = union_name or Name(id="Union", ctx=Load())
def visit_BinOp(self, node: BinOp) -> Any:
self.generic_visit(node)
if isinstance(node.op, BitOr):
return Subscript(
value=self.union_name,
slice=Index(
ASTTuple(elts=[node.left, node.right], ctx=Load()), ctx=Load()
),
ctx=Load(),
)
return node
def compile_type_hint(hint: str) -> CodeType:
parsed = parse(hint, "<string>", "eval")
UnionTransformer().visit(parsed)
fix_missing_locations(parsed)
return compile(parsed, "<string>", "eval", flags=0)
| UnionTransformer |
python | pytorch__pytorch | torch/_inductor/template_heuristics/triton.py | {
"start": 92253,
"end": 92661
} | class ____(
TMATemplateConfigMixin, XPUConfigHeuristic
):
"""Persistent TMA template heuristic for XPU"""
def __init__(self) -> None:
super().__init__()
# Override mm_configs to use persistent_mm_configs
self.mm_configs = self.persistent_mm_configs
@register_template_heuristic(persistent_tma_mm_template.uid, "xpu", op_name="addmm")
| XPUPersistentTMATemplateConfigHeuristic |
python | PyCQA__pylint | tests/functional/c/class_scope.py | {
"start": 712,
"end": 893
} | class ____:
"""right"""
class Result1:
"""result one"""
OK = 0
def work(self) -> Result1:
"""good type hint"""
return self.Result1.OK
| Right |
python | google__jax | jax/_src/custom_transpose.py | {
"start": 5777,
"end": 9204
} | class ____(core.Primitive):
call_primitive = False
map_primitive = False
multiple_results = True
def bind(self, *args, **params):
return self._true_bind(*args, **params)
def bind_with_trace(self, trace, call_args, params):
call, tracers = call_args[0], call_args[1:]
return trace.process_custom_transpose(self, call, tracers, **params)
# TODO(frostig,mattjj): consider keeping `call` as a named parameter
# instead of following this "call primitive" convention.
def get_bind_params(self, params):
if 'call_jaxpr' in params:
assert 'transpose_jaxpr_thunk' in params
new_params: dict[str, Any] = dict(params)
new_params['transpose'] = make_transpose_from_thunk(
new_params.pop('transpose_jaxpr_thunk'),
new_params['lin_tree'])
call_jaxpr: core.ClosedJaxpr = new_params.pop('call_jaxpr')
call = lu.wrap_init(core.jaxpr_as_fun(call_jaxpr),
debug_info=call_jaxpr.jaxpr.debug_info)
else:
assert 'transpose' in params
new_params: dict[str, Any] = dict(params)
call = new_params.pop("call")
return [call], new_params
# TODO(frostig,mattjj): reinstate checks
def custom_transpose_typecheck(_, *in_atoms, out_types, **params):
del in_atoms, params
return out_types, core.no_effects
def custom_transpose_transpose_rule(
cts, *args, out_types, res_tree, lin_tree, out_tree, **params):
transpose: lu.WrappedFun
if 'transpose_jaxpr_thunk' in params:
assert 'call_jaxpr' in params
transpose = make_transpose_from_thunk(
params['transpose_jaxpr_thunk'], lin_tree)
else:
assert 'call' in params
transpose = params['transpose']
call_in_tree = treedef_tuple((res_tree, lin_tree))
# TODO(frostig,mattjj): `lin_arg` indicates the inputs with respect
# to which we are transposing (via `ad.is_undefined_primal`).
# Consider passing this information to the custom transpose rule?
res_arg, lin_arg = tree_unflatten(call_in_tree, args)
assert all(not ad.is_undefined_primal(x) for x in tree_leaves(res_arg))
cts = [ad_util.zeros_like_aval(ct.aval) if type(ct) is ad_util.Zero else ct
for ct in cts]
ct_out = tree_unflatten(out_tree, cts)
ct_lin = transpose.call_wrapped(res_arg, ct_out)
check_transpose_rule_trees(transpose, lin_tree, tree_structure(ct_lin))
ct_lin = tree_broadcast(lin_tree, ct_lin, is_leaf=lambda x: x is None)
# When the transpose returns None, we treat that as a Zero, except when the
# input is also None. In that case, the cotangent corresponding to that input
# should be dropped.
zero = object()
ct_lin = tree_map(lambda l, ct: zero if ct is None and l is not None else ct,
lin_arg, ct_lin, is_leaf=ad.is_undefined_primal)
ct_lin_flat, _ = tree_flatten(ct_lin)
return [None] * res_tree.num_leaves + [None if ct is zero else ct for ct in ct_lin_flat]
def custom_transpose_lowering(*args, call_jaxpr, **params):
return core.jaxpr_as_fun(call_jaxpr)(*args)
custom_transpose_p = CustomTransposePrimitive('custom_transpose_call')
core.custom_typechecks[custom_transpose_p] = custom_transpose_typecheck
ad.primitive_transposes[custom_transpose_p] = custom_transpose_transpose_rule
mlir.register_lowering(
custom_transpose_p,
mlir.lower_fun(custom_transpose_lowering, multiple_results=True))
pxla.register_initial_style_primitive(custom_transpose_p)
| CustomTransposePrimitive |
python | huggingface__transformers | src/transformers/models/yoso/modeling_yoso.py | {
"start": 17528,
"end": 18137
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.self = YosoSelfAttention(config)
self.output = YosoSelfOutput(config)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
self_outputs = self.self(hidden_states, attention_mask, output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
| YosoAttention |
python | cython__cython | Cython/Plex/Errors.py | {
"start": 53,
"end": 100
} | class ____(Exception):
message = ""
| PlexError |
python | keras-team__keras | keras/src/ops/math.py | {
"start": 33912,
"end": 34473
} | class ____(Operation):
def call(self, x):
return backend.math.logdet(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape[:-2], dtype=x.dtype)
@keras_export(["keras.ops.logdet"])
def logdet(x):
"""Computes log of the determinant of a hermitian positive definite matrix.
Args:
x: Input matrix. It must 2D and square.
Returns:
The natural log of the determinant of matrix.
"""
if any_symbolic_tensors((x,)):
return Logdet().symbolic_call(x)
return backend.math.logdet(x)
| Logdet |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/bigquery.py | {
"start": 13574,
"end": 19980
} | class ____(
_BigQueryDbHookMixin, SQLValueCheckOperator, _BigQueryOperatorsEncryptionConfigurationMixin
):
"""
Perform a simple value check using sql code.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryValueCheckOperator`
:param sql: SQL to execute.
:param use_legacy_sql: Whether to use legacy SQL (true)
or standard SQL (false).
:param encryption_configuration: (Optional) Custom encryption configuration (e.g., Cloud KMS keys).
.. code-block:: python
encryption_configuration = {
"kmsKeyName": "projects/PROJECT/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY",
}
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param location: The geographic location of the job. See details at:
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:param impersonation_chain: Optional service account to impersonate using
short-term credentials, or chained list of accounts required to get the
access token of the last account in the list, which will be impersonated
in the request. If set as a string, the account must grant the
originating account the Service Account Token Creator IAM role. If set
as a sequence, the identities from the list must grant Service Account
Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account. (templated)
:param labels: a dictionary containing labels for the table, passed to BigQuery.
:param deferrable: Run operator in the deferrable mode.
:param poll_interval: (Deferrable mode only) polling period in seconds to
check for the status of job.
:param project_id: Google Cloud Project where the job is running
"""
template_fields: Sequence[str] = (
"sql",
"gcp_conn_id",
"pass_value",
"impersonation_chain",
"labels",
)
template_ext: Sequence[str] = (".sql",)
ui_color = BigQueryUIColors.CHECK.value
conn_id_field = "gcp_conn_id"
def __init__(
self,
*,
sql: str,
pass_value: Any,
tolerance: Any = None,
encryption_configuration: dict | None = None,
gcp_conn_id: str = "google_cloud_default",
project_id: str = PROVIDE_PROJECT_ID,
use_legacy_sql: bool = True,
location: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
labels: dict | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poll_interval: float = 4.0,
**kwargs,
) -> None:
super().__init__(sql=sql, pass_value=pass_value, tolerance=tolerance, **kwargs)
self.location = location
self.gcp_conn_id = gcp_conn_id
self.use_legacy_sql = use_legacy_sql
self.encryption_configuration = encryption_configuration
self.impersonation_chain = impersonation_chain
self.labels = labels
self.deferrable = deferrable
self.poll_interval = poll_interval
self.project_id = project_id
def _submit_job(
self,
hook: BigQueryHook,
job_id: str,
) -> BigQueryJob:
"""Submit a new job and get the job id for polling the status using Triggerer."""
configuration = {
"query": {
"query": self.sql,
"useLegacySql": self.use_legacy_sql,
},
}
self.include_encryption_configuration(configuration, "query")
return hook.insert_job(
configuration=configuration,
project_id=self.project_id,
location=self.location,
job_id=job_id,
nowait=True,
)
def execute(self, context: Context) -> None:
if not self.deferrable:
super().execute(context=context)
else:
hook = BigQueryHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
if self.project_id is None:
self.project_id = hook.project_id
job = self._submit_job(hook, job_id="")
context["ti"].xcom_push(key="job_id", value=job.job_id)
if job.running():
self.defer(
timeout=self.execution_timeout,
trigger=BigQueryValueCheckTrigger(
conn_id=self.gcp_conn_id,
job_id=job.job_id,
project_id=self.project_id,
location=self.location or hook.location,
sql=self.sql,
pass_value=self.pass_value,
tolerance=self.tol,
poll_interval=self.poll_interval,
impersonation_chain=self.impersonation_chain,
),
method_name="execute_complete",
)
self._handle_job_error(job)
# job.result() returns a RowIterator. Mypy expects an instance of SupportsNext[Any] for
# the next() call which the RowIterator does not resemble to. Hence, ignore the arg-type error.
# Row passed to check_value is a collection of values only, without column names.
self.check_value(next(iter(job.result()), [])) # type: ignore[arg-type]
self.log.info("Current state of job %s is %s", job.job_id, job.state)
@staticmethod
def _handle_job_error(job: BigQueryJob | UnknownJob) -> None:
if job.error_result:
raise AirflowException(f"BigQuery job {job.job_id} failed: {job.error_result}")
def execute_complete(self, context: Context, event: dict[str, Any]) -> None:
"""
Act as a callback for when the trigger fires.
This returns immediately. It relies on trigger to throw an exception,
otherwise it assumes execution was successful.
"""
if event["status"] == "error":
raise AirflowException(event["message"])
self.log.info(
"%s completed with response %s ",
self.task_id,
event["message"],
)
| BigQueryValueCheckOperator |
python | ray-project__ray | doc/source/custom_directives.py | {
"start": 12850,
"end": 14663
} | class ____(Enum):
"""Enum which allows easier enumeration of members for example metadata."""
@classmethod
def items(cls: type) -> Iterable[Tuple["ExampleEnum", str]]:
"""Return an iterable mapping between the enum type and the corresponding value.
Returns
-------
Dict['ExampleEnum', str]
Dictionary of enum type, enum value for the enum class
"""
yield from {entry: entry.value for entry in cls}.items()
@classmethod
def values(cls: type) -> List[str]:
"""Return a list of the values of the enum.
Returns
-------
List[str]
List of values for the enum class
"""
return [entry.value for entry in cls]
@classmethod
def _missing_(cls: type, value: str) -> "ExampleEnum":
"""Allow case-insensitive lookup of enum values.
This shouldn't be called directly. Instead this is called when e.g.,
"SkillLevel('beginner')" is called.
Parameters
----------
value : str
Value for which the associated enum class instance is to be returned.
Spaces are stripped from the beginning and end of the string, and
matching is case-insensitive.
Returns
-------
ExampleEnum
Enum class instance for the requested value
"""
value = value.lstrip().rstrip().lower()
for member in cls:
if member.value.lstrip().rstrip().lower() == value:
return member
return None
@classmethod
def formatted_name(cls: type) -> str:
"""Return the formatted name for the class."""
raise NotImplementedError
@classmethod
def key(cls: type) -> str:
raise NotImplementedError
| ExampleEnum |
python | django__django | tests/test_runner_apps/failures/tests_failures.py | {
"start": 234,
"end": 356
} | class ____(TestCase):
@expectedFailure
def test_sample(self):
self.assertEqual(0, 1)
| ExpectedFailureTestCase |
python | celery__celery | celery/exceptions.py | {
"start": 5928,
"end": 6022
} | class ____(TaskPredicate):
"""A task can raise this to ignore doing state updates."""
| Ignore |
python | wireservice__csvkit | csvkit/utilities/csvstat.py | {
"start": 1530,
"end": 14580
} | class ____(CSVKitUtility):
description = 'Print descriptive statistics for each column in a CSV file.'
def add_arguments(self):
self.argparser.add_argument(
'--csv', dest='csv_output', action='store_true',
help='Output results as a CSV table, rather than plain text.')
self.argparser.add_argument(
'--json', dest='json_output', action='store_true',
help='Output results as JSON text, rather than plain text.')
self.argparser.add_argument(
'-i', '--indent', dest='indent', type=int,
help='Indent the output JSON this many spaces. Disabled by default.')
self.argparser.add_argument(
'-n', '--names', dest='names_only', action='store_true',
help='Display column names and indices from the input CSV and exit.')
self.argparser.add_argument(
'-c', '--columns', dest='columns',
help='A comma-separated list of column indices, names or ranges to be examined, e.g. "1,id,3-5". '
'Defaults to all columns.')
self.argparser.add_argument(
'--type', dest='type_only', action='store_true',
help='Only output data type.')
self.argparser.add_argument(
'--nulls', dest='nulls_only', action='store_true',
help='Only output whether columns contains nulls.')
self.argparser.add_argument(
'--non-nulls', dest='nonnulls_only', action='store_true',
help='Only output counts of non-null values.')
self.argparser.add_argument(
'--unique', dest='unique_only', action='store_true',
help='Only output counts of unique values.')
self.argparser.add_argument(
'--min', dest='min_only', action='store_true',
help='Only output smallest values.')
self.argparser.add_argument(
'--max', dest='max_only', action='store_true',
help='Only output largest values.')
self.argparser.add_argument(
'--sum', dest='sum_only', action='store_true',
help='Only output sums.')
self.argparser.add_argument(
'--mean', dest='mean_only', action='store_true',
help='Only output means.')
self.argparser.add_argument(
'--median', dest='median_only', action='store_true',
help='Only output medians.')
self.argparser.add_argument(
'--stdev', dest='stdev_only', action='store_true',
help='Only output standard deviations.')
self.argparser.add_argument(
'--len', dest='len_only', action='store_true',
help='Only output the length of the longest values.')
self.argparser.add_argument(
'--max-precision', dest='maxprecision_only', action='store_true',
help='Only output the most decimal places.')
self.argparser.add_argument(
'--freq', dest='freq_only', action='store_true',
help='Only output lists of frequent values.')
self.argparser.add_argument(
'--freq-count', dest='freq_count', type=int,
help='The maximum number of frequent values to display.')
self.argparser.add_argument(
'--count', dest='count_only', action='store_true',
help='Only output total row count.')
self.argparser.add_argument(
'--decimal-format', dest='decimal_format', type=str, default='%.3f',
help='%%-format specification for printing decimal numbers. '
'Defaults to locale-specific formatting with "%%.3f".')
self.argparser.add_argument(
'-G', '--no-grouping-separator', dest='no_grouping_separator', action='store_true',
help='Do not use grouping separators in decimal numbers.')
self.argparser.add_argument(
'-y', '--snifflimit', dest='sniff_limit', type=int, default=1024,
help='Limit CSV dialect sniffing to the specified number of bytes. '
'Specify "0" to disable sniffing entirely, or "-1" to sniff the entire file.')
self.argparser.add_argument(
'-I', '--no-inference', dest='no_inference', action='store_true',
help='Disable type inference (and --locale, --date-format, --datetime-format, --no-leading-zeroes) '
'when parsing the input.')
def main(self):
if self.args.names_only:
self.print_column_names()
return
if self.additional_input_expected():
self.argparser.error('You must provide an input file or piped data.')
operations = [op for op in OPERATIONS.keys() if getattr(self.args, op + '_only')]
if len(operations) > 1:
self.argparser.error('Only one operation argument may be specified (--mean, --median, etc).')
if operations and self.args.csv_output:
self.argparser.error(
'You may not specify --csv and an operation (--mean, --median, etc) at the same time.')
if operations and self.args.json_output:
self.argparser.error(
'You may not specify --json and an operation (--mean, --median, etc) at the same time.')
if operations and self.args.count_only:
self.argparser.error(
'You may not specify --count and an operation (--mean, --median, etc) at the same time.')
if self.args.count_only:
count = len(list(agate.csv.reader(self.skip_lines(), **self.reader_kwargs)))
if not self.args.no_header_row:
count -= 1
self.output_file.write('%i\n' % count)
return
sniff_limit = self.args.sniff_limit if self.args.sniff_limit != -1 else None
table = agate.Table.from_csv(
self.input_file,
skip_lines=self.args.skip_lines,
sniff_limit=sniff_limit,
column_types=self.get_column_types(),
**self.reader_kwargs,
)
column_ids = parse_column_identifiers(
self.args.columns,
table.column_names,
self.get_column_offset(),
)
kwargs = {}
if self.args.freq_count:
kwargs['freq_count'] = self.args.freq_count
# Output a single stat
if operations:
if len(column_ids) == 1:
self.print_one(table, column_ids[0], operations[0], label=False, **kwargs)
else:
for column_id in column_ids:
self.print_one(table, column_id, operations[0], **kwargs)
else:
stats = {}
for column_id in column_ids:
stats[column_id] = self.calculate_stats(table, column_id, **kwargs)
if self.args.csv_output:
self.print_csv(table, column_ids, stats)
elif self.args.json_output:
self.print_json(table, column_ids, stats)
else:
self.print_stats(table, column_ids, stats)
def is_finite_decimal(self, value):
return isinstance(value, Decimal) and value.is_finite()
def _calculate_stat(self, table, column_id, op_name, op_data, **kwargs):
getter = globals().get(f'get_{op_name}')
with warnings.catch_warnings():
warnings.simplefilter('ignore', agate.NullCalculationWarning)
try:
if getter:
return getter(table, column_id, **kwargs)
op = op_data['aggregation']
v = table.aggregate(op(column_id))
if self.is_finite_decimal(v) and not self.args.json_output:
return format_decimal(v, self.args.decimal_format, self.args.no_grouping_separator)
return v
except Exception:
pass
def print_one(self, table, column_id, op_name, label=True, **kwargs):
"""
Print data for a single statistic.
"""
column_name = table.column_names[column_id]
stat = self._calculate_stat(table, column_id, op_name, OPERATIONS[op_name], **kwargs)
# Formatting
if op_name == 'freq':
stat = ', '.join([f"\"{str(row['value'])}\": {row['count']}" for row in stat])
stat = '{ %s }' % stat
if label:
self.output_file.write('%3i. %s: %s\n' % (column_id + 1, column_name, stat))
else:
self.output_file.write(f'{stat}\n')
def calculate_stats(self, table, column_id, **kwargs):
"""
Calculate stats for all valid operations.
"""
return {
op_name: self._calculate_stat(table, column_id, op_name, op_data, **kwargs)
for op_name, op_data in OPERATIONS.items()
}
def print_stats(self, table, column_ids, stats):
"""
Print data for all statistics.
"""
label_column_width = max(len(op_data['label']) for op_data in OPERATIONS.values())
for column_id in column_ids:
column_name = table.column_names[column_id]
column = table.columns[column_id]
column_stats = stats[column_id]
self.output_file.write('%3i. "%s"\n\n' % (column_id + 1, column_name))
for op_name, op_data in OPERATIONS.items():
if column_stats[op_name] is None:
continue
label = '{label:{label_column_width}}'.format(**{
'label_column_width': label_column_width,
'label': op_data['label'],
})
if op_name == 'freq':
for i, row in enumerate(column_stats['freq']):
if i == 0:
self.output_file.write(f'\t{label} ')
else:
self.output_file.write('\t{label:{label_column_width}} '.format(**{
'label_column_width': label_column_width,
'label': '',
}))
if isinstance(column.data_type, agate.Number):
v = row['value']
if self.is_finite_decimal(v):
v = format_decimal(v, self.args.decimal_format, self.args.no_grouping_separator)
else:
v = str(row['value'])
self.output_file.write(f"{v} ({row['count']}x)\n")
continue
v = column_stats[op_name]
if op_name == 'nulls' and v:
v = f'{v} (excluded from calculations)'
elif op_name == 'len':
v = f'{v} characters'
self.output_file.write(f'\t{label} {v}\n')
self.output_file.write('\n')
self.output_file.write(f'Row count: {len(table.rows)}\n')
def print_csv(self, table, column_ids, stats):
"""
Print data for all statistics as a CSV table.
"""
header = ['column_id', 'column_name'] + list(OPERATIONS)
writer = agate.csv.DictWriter(self.output_file, fieldnames=header)
writer.writeheader()
for row in self._rows(table, column_ids, stats):
if 'freq' in row:
row['freq'] = ', '.join([str(row['value']) for row in row['freq']])
writer.writerow(row)
def print_json(self, table, column_ids, stats):
"""
Print data for all statistics as a JSON text.
"""
data = list(self._rows(table, column_ids, stats))
json.dump(data, self.output_file, default=default_float_decimal, ensure_ascii=False, indent=self.args.indent)
def _rows(self, table, column_ids, stats):
for column_id in column_ids:
column_name = table.column_names[column_id]
column_stats = stats[column_id]
output_row = {'column_id': column_id + 1, 'column_name': column_name}
for op_name, _op_data in OPERATIONS.items():
if column_stats[op_name] is not None:
output_row[op_name] = column_stats[op_name]
yield output_row
def format_decimal(d, f='%.3f', no_grouping_separator=False):
return locale.format_string(f, d, grouping=not no_grouping_separator).rstrip('0').rstrip('.')
# These are accessed via: globals().get(f'get_{op_name}')
def get_type(table, column_id, **kwargs):
return f'{table.columns[column_id].data_type.__class__.__name__}'
def get_unique(table, column_id, **kwargs):
return len(table.columns[column_id].values_distinct())
def get_freq(table, column_id, freq_count=5, **kwargs):
values = table.columns[column_id].values()
return [
{'value': r[0], 'count': r[1]}
for r in Counter(values).most_common(freq_count)
]
def launch_new_instance():
utility = CSVStat()
utility.run()
if __name__ == '__main__':
launch_new_instance()
| CSVStat |
python | dask__distributed | distributed/tests/test_spill.py | {
"start": 6454,
"end": 10007
} | class ____:
def __init__(self, size):
self.size = size
def __getstate__(self):
raise MyError()
def __sizeof__(self):
return self.size
def test_spillbuffer_fail_to_serialize(tmp_path):
buf = SpillBuffer(str(tmp_path), target=200, max_spill=600)
# bad data individually larger than spill threshold target 200
a = Bad(size=201)
# Exception caught in the worker
with pytest.raises(TypeError, match="Failed to pickle 'a'") as e:
with captured_logger("distributed.spill") as logs_bad_key:
buf["a"] = a
assert isinstance(e.value.__cause__.__cause__, MyError)
# spill.py must remain silent because we're already logging in worker.py
assert not logs_bad_key.getvalue()
assert_buf(buf, tmp_path, {}, {})
b = Bad(size=100) # this is small enough to fit in memory/fast
buf["b"] = b
assert_buf(buf, tmp_path, {"b": b}, {})
c = "c" * 100
with captured_logger("distributed.spill") as logs_bad_key_mem:
# This will go to fast and try to kick b out,
# but keep b in fast since it's not pickable
buf["c"] = c
# worker.py won't intercept the exception here, so spill.py must dump the traceback
logs_value = logs_bad_key_mem.getvalue()
assert "Failed to pickle 'b'" in logs_value # from distributed.spill
assert "Traceback" in logs_value # from distributed.spill
assert_buf(buf, tmp_path, {"b": b, "c": c}, {})
@pytest.mark.skipif(WINDOWS, reason="Needs chmod")
def test_spillbuffer_oserror(tmp_path):
buf = SpillBuffer(str(tmp_path), target=200, max_spill=800)
a, b, c, d = (
"a" * 200,
"b" * 100,
"c" * 201,
"d" * 101,
)
# let's have something in fast and something in slow
buf["a"] = a
buf["b"] = b
assert_buf(buf, tmp_path, {"b": b}, {"a": a})
# modify permissions of disk to be read only.
# This causes writes to raise OSError, just like in case of disk full.
os.chmod(tmp_path, 0o555)
# Add key > than target
with captured_logger("distributed.spill") as logs_oserror_slow:
buf["c"] = c
assert "Spill to disk failed" in logs_oserror_slow.getvalue()
assert_buf(buf, tmp_path, {"b": b, "c": c}, {"a": a})
del buf["c"]
assert_buf(buf, tmp_path, {"b": b}, {"a": a})
# add key to fast which is smaller than target but when added it triggers spill,
# which triggers OSError
RateLimiterFilter.reset_timer("distributed.spill")
with captured_logger("distributed.spill") as logs_oserror_evict:
buf["d"] = d
assert "Spill to disk failed" in logs_oserror_evict.getvalue()
assert_buf(buf, tmp_path, {"b": b, "d": d}, {"a": a})
def test_spillbuffer_evict(tmp_path):
buf = SpillBuffer(str(tmp_path), target=300)
bad = Bad(size=100)
a = "a" * 100
buf["a"] = a
assert_buf(buf, tmp_path, {"a": a}, {})
# successful eviction
weight = buf.evict()
assert weight == sizeof(a)
assert_buf(buf, tmp_path, {}, {"a": a})
buf["bad"] = bad
assert_buf(buf, tmp_path, {"bad": bad}, {"a": a})
# unsuccessful eviction
with captured_logger("distributed.spill") as logs_evict_key:
weight = buf.evict()
assert weight == -1
assert "Failed to pickle" in logs_evict_key.getvalue()
# bad keys stays in fast
assert_buf(buf, tmp_path, {"bad": bad}, {"a": a})
def test_no_pop(tmp_path):
buf = SpillBuffer(str(tmp_path), target=100)
with pytest.raises(NotImplementedError):
buf.pop("x", None)
| Bad |
python | django__django | tests/syndication_tests/feeds.py | {
"start": 4842,
"end": 5250
} | class ____(TestRss2Feed):
"""
A feed to test custom context data in templates for title or description.
"""
title_template = "syndication/title_context.html"
description_template = "syndication/description_context.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["foo"] = "bar"
return context
| TemplateContextFeed |
python | spack__spack | lib/spack/spack/util/debug.py | {
"start": 1228,
"end": 2840
} | class ____(pdb.Pdb):
"""
This class allows the python debugger to follow forked processes
and can set tracepoints allowing the Python Debugger Pdb to be used
from a python multiprocessing child process.
This is used the same way one would normally use Pdb, simply import this
class and use as a drop in for Pdb, although the syntax here is slightly different,
requiring the instantiton of this class, i.e. ForkablePdb().set_trace().
This should be used when attempting to call a debugger from a
child process spawned by the python multiprocessing such as during
the run of Spack.install, or any where else Spack spawns a child process.
"""
try:
_original_stdin_fd = sys.stdin.fileno()
except io.UnsupportedOperation:
_original_stdin_fd = None
_original_stdin = None
def __init__(self, stdout_fd=None, stderr_fd=None):
pdb.Pdb.__init__(self, nosigint=True)
self._stdout_fd = stdout_fd
self._stderr_fd = stderr_fd
def _cmdloop(self):
current_stdin = sys.stdin
try:
if not self._original_stdin:
self._original_stdin = os.fdopen(self._original_stdin_fd)
sys.stdin = self._original_stdin
if self._stdout_fd is not None:
os.dup2(self._stdout_fd, sys.stdout.fileno())
os.dup2(self._stdout_fd, self.stdout.fileno())
if self._stderr_fd is not None:
os.dup2(self._stderr_fd, sys.stderr.fileno())
self.cmdloop()
finally:
sys.stdin = current_stdin
| ForkablePdb |
python | pypa__pip | src/pip/_vendor/packaging/_tokenizer.py | {
"start": 184,
"end": 245
} | class ____:
name: str
text: str
position: int
| Token |
python | pennersr__django-allauth | allauth/account/views.py | {
"start": 46358,
"end": 47151
} | class ____(_BaseVerifyPhoneView):
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
self.stage = request._login_stage
self.process = flows.phone_verification.PhoneVerificationStageProcess.resume(
self.stage
)
if not self.process:
return self.stage.abort()
return super().dispatch(request, *args, **kwargs)
def respond_process_succeeded(self, form):
return self.stage.exit()
def respond_process_failed(self, form):
adapter = get_adapter(self.request)
adapter.add_message(
self.request,
messages.ERROR,
message=adapter.error_messages["too_many_login_attempts"],
)
return self.stage.abort()
| _VerifyPhoneSignupView |
python | doocs__leetcode | solution/0700-0799/0752.Open the Lock/Solution2.py | {
"start": 0,
"end": 1315
} | class ____:
def openLock(self, deadends: List[str], target: str) -> int:
def next(s):
res = []
s = list(s)
for i in range(4):
c = s[i]
s[i] = '9' if c == '0' else str(int(c) - 1)
res.append(''.join(s))
s[i] = '0' if c == '9' else str(int(c) + 1)
res.append(''.join(s))
s[i] = c
return res
def extend(m1, m2, q):
for _ in range(len(q)):
p = q.popleft()
step = m1[p]
for t in next(p):
if t in s or t in m1:
continue
if t in m2:
return step + 1 + m2[t]
m1[t] = step + 1
q.append(t)
return -1
def bfs():
m1, m2 = {"0000": 0}, {target: 0}
q1, q2 = deque(['0000']), deque([target])
while q1 and q2:
t = extend(m1, m2, q1) if len(q1) <= len(q2) else extend(m2, m1, q2)
if t != -1:
return t
return -1
if target == '0000':
return 0
s = set(deadends)
if '0000' in s:
return -1
return bfs()
| Solution |
python | great-expectations__great_expectations | great_expectations/datasource/datasource_dict.py | {
"start": 5166,
"end": 7464
} | class ____(DatasourceDict):
"""
Extends the capabilites of the DatasourceDict by placing a caching layer in front of the underlying store.
Any retrievals will firstly check an in-memory dictionary before requesting from the store. Other CRUD methods will ensure that
both cache and store are kept in sync.
""" # noqa: E501 # FIXME CoP
def __init__(
self,
context: AbstractDataContext,
datasource_store: DatasourceStore,
):
super().__init__(
context=context,
datasource_store=datasource_store,
)
self._cache: dict[str, FluentDatasource] = {}
@override
@property
def data(self) -> dict[str, FluentDatasource]: # type: ignore[override] # `data` is meant to be a writeable attr (not a read-only property)
return self._cache
@override
def __contains__(self, name: object) -> bool:
if name in self.data:
return True
try:
# Resort to store only if not in cache
_ = self._get_ds_from_store(str(name))
return True
except KeyError:
return False
@override
def set_datasource(self, name: str, ds: FluentDatasource) -> FluentDatasource | None:
self.data[name] = self._add_ids(ds)
return ds
def _add_ids(self, ds: FluentDatasource) -> FluentDatasource:
# File and ephemeral contexts do not use the store, so we need to add IDs here.
# Note that this is used for both `add` and `update` operations.
if ds.id is None:
ds.id = uuid.uuid4()
for asset in ds.assets:
if asset.id is None:
asset.id = uuid.uuid4()
for batch_definition in asset.batch_definitions:
if batch_definition.id is None:
batch_definition.id = uuid.uuid4()
return ds
@override
def __delitem__(self, name: str) -> None:
self.data.pop(name, None)
@override
def __getitem__(self, name: str) -> FluentDatasource:
if name in self.data:
return self.data[name]
# Upon cache miss, retrieve from store and add to cache
ds = super().__getitem__(name)
self.data[name] = ds
return ds
| CacheableDatasourceDict |
python | openai__openai-python | src/openai/resources/responses/input_tokens.py | {
"start": 14006,
"end": 14243
} | class ____:
def __init__(self, input_tokens: InputTokens) -> None:
self._input_tokens = input_tokens
self.count = to_streamed_response_wrapper(
input_tokens.count,
)
| InputTokensWithStreamingResponse |
python | getsentry__sentry | tests/acceptance/test_shared_issue.py | {
"start": 265,
"end": 1341
} | class ____(AcceptanceTestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user("foo@example.com")
self.org = self.create_organization(owner=self.user, name="Rowdy Tiger")
self.team = self.create_team(organization=self.org, name="Mariachi Band")
self.project = self.create_project(organization=self.org, teams=[self.team], name="Bengal")
self.login_as(self.user)
def test_python_event(self) -> None:
data = load_data(platform="python")
data["timestamp"] = before_now(days=1).isoformat()
event = self.store_event(data=data, project_id=self.project.id)
assert event.group is not None
GroupShare.objects.create(project_id=event.group.project_id, group=event.group)
self.browser.get(
f"/organizations/{self.org.slug}/share/issue/{event.group.get_share_id()}/"
)
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
self.browser.wait_until_not('[data-test-id="event-entries-loading-false"]')
| SharedIssueTest |
python | huggingface__transformers | src/transformers/models/eomt/modular_eomt.py | {
"start": 10768,
"end": 10812
} | class ____(Mask2FormerLoss):
pass
| EomtLoss |
python | tensorflow__tensorflow | tensorflow/python/framework/convert_to_constants.py | {
"start": 29514,
"end": 32928
} | class ____(_ConverterData):
"""Container for ConcreteFunction-based conversion data."""
def __init__(self,
func,
lower_control_flow,
aggressive_inlining,
variable_names_allowlist=None,
variable_names_denylist=None):
"""Creates the conversion data for the given function.
Args:
func: ConcreteFunction.
lower_control_flow: Boolean indicating whether or not to lower control
flow ops such as If and While.
aggressive_inlining: Boolean indicating whether or not to do aggressive
function inlining (might be unsafe if function has stateful ops, not
properly connected to control outputs).
variable_names_allowlist: The set of variable names to convert (by
default, all variables are converted).
variable_names_denylist: The set of variable names to omit converting to
constants.
"""
self._func = func
# Inline the graph in order to remove functions when possible.
graph_def = _run_inline_graph_optimization(func, lower_control_flow,
aggressive_inlining)
super(_FunctionConverterData, self).__init__(
graph_def,
variable_names_allowlist=variable_names_allowlist,
variable_names_denylist=variable_names_denylist)
self._build_tensor_data()
def _eval(self, tensor):
"""Returns the value in the tensor. Must be implemented in sub-classes."""
raise errors.UnimplementedError(
"The evaluation method should be implemented in sub-classes.")
def _build_tensor_data(self):
"""Caches the tensor data for all Placeholders in the given function."""
map_index_to_variable = {}
for var in self._func.graph.variables:
for idx, captured_input in enumerate(self._func.captured_inputs):
if var.handle is captured_input: # pylint: disable=protected-access
map_index_to_variable[idx] = var
break
# Iterates through all captures which are represented as Placeholders.
for idx, (val_tensor, name_tensor) in enumerate(self._func.graph.captures):
tensor_name = name_tensor.name.split(":")[0]
if not self._should_convert(tensor_name):
continue
if idx in map_index_to_variable:
data = self._eval(map_index_to_variable[idx])
else:
if val_tensor.dtype == dtypes.resource:
logging.vlog(1, "Skip converting resource tensor %s" % tensor_name)
continue
data = np.array(self._eval(val_tensor))
self._tensor_data[tensor_name] = _TensorData(
numpy=data,
dtype=dtypes.as_dtype(data.dtype).as_datatype_enum,
index=idx)
# Get data for VariableV2 ops (reference variables) that cannot be lifted.
for node in self.node_defs.values():
if node.op == "VariableV2":
if not self._should_convert(node.name):
continue
if node.name not in self.tensor_data:
with self._func.graph.as_default():
identity_node = array_ops.identity(
self._func.graph.as_graph_element(node.name + ":0"))
pruned_graph = self._func.prune([], [identity_node.name])()[0]
self._tensor_data[node.name] = _TensorData(
numpy=pruned_graph.numpy(),
dtype=node.attr["dtype"].type,
index=None)
| _FunctionConverterData |
python | ansible__ansible | test/units/module_utils/basic/test_imports.py | {
"start": 403,
"end": 3448
} | class ____(unittest.TestCase):
def clear_modules(self, mods):
for mod in mods:
if mod in sys.modules:
del sys.modules[mod]
@patch.object(builtins, '__import__')
def test_module_utils_basic_import_syslog(self, mock_import):
def _mock_import(name, *args, **kwargs):
if name == 'syslog':
raise ImportError
return realimport(name, *args, **kwargs)
self.clear_modules(['syslog', 'ansible.module_utils.basic'])
mod = builtins.__import__('ansible.module_utils.basic')
self.assertTrue(mod.module_utils.basic.HAS_SYSLOG)
self.clear_modules(['syslog', 'ansible.module_utils.basic'])
mock_import.side_effect = _mock_import
mod = builtins.__import__('ansible.module_utils.basic')
self.assertFalse(mod.module_utils.basic.HAS_SYSLOG)
@patch.object(builtins, '__import__')
def test_module_utils_basic_import_selinux(self, mock_import):
def _mock_import(name, globals=None, locals=None, fromlist=tuple(), level=0, **kwargs):
if name == 'ansible.module_utils.compat' and fromlist == ('selinux',):
raise ImportError
return realimport(name, globals=globals, locals=locals, fromlist=fromlist, level=level, **kwargs)
try:
self.clear_modules(['ansible.module_utils.compat.selinux', 'ansible.module_utils.basic'])
mod = builtins.__import__('ansible.module_utils.basic')
self.assertTrue(mod.module_utils.basic.HAVE_SELINUX)
except ImportError:
# no selinux on test system, so skip
pass
self.clear_modules(['ansible.module_utils.compat.selinux', 'ansible.module_utils.basic'])
mock_import.side_effect = _mock_import
mod = builtins.__import__('ansible.module_utils.basic')
self.assertFalse(mod.module_utils.basic.HAVE_SELINUX)
# FIXME: doesn't work yet
# @patch.object(builtins, 'bytes')
# def test_module_utils_basic_bytes(self, mock_bytes):
# mock_bytes.side_effect = NameError()
# from ansible.module_utils import basic
@patch.object(builtins, '__import__')
def test_module_utils_basic_import_systemd_journal(self, mock_import):
def _mock_import(name, *args, **kwargs):
try:
fromlist = kwargs.get('fromlist', args[2])
except IndexError:
fromlist = []
if name == 'systemd' and 'journal' in fromlist:
raise ImportError
return realimport(name, *args, **kwargs)
self.clear_modules(['systemd', 'ansible.module_utils.basic'])
mod = builtins.__import__('ansible.module_utils.basic')
self.assertTrue(mod.module_utils.basic.has_journal)
self.clear_modules(['systemd', 'ansible.module_utils.basic'])
mock_import.side_effect = _mock_import
mod = builtins.__import__('ansible.module_utils.basic')
self.assertFalse(mod.module_utils.basic.has_journal)
| TestImports |
python | google__jax | jax/_src/pallas/core.py | {
"start": 10227,
"end": 10386
} | class ____:
"""Represents a one-sized block dimension that is squeezed out in the kernel."""
squeezed = Squeezed()
@dataclasses.dataclass(frozen=True)
| Squeezed |
python | numpy__numpy | tools/swig/test/testMatrix.py | {
"start": 12850,
"end": 14439
} | class ____(MatrixTestCase):
def __init__(self, methodName="runTest"):
MatrixTestCase.__init__(self, methodName)
self.typeStr = "double"
self.typeCode = "d"
######################################################################
if __name__ == "__main__":
# Build the test suite
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( scharTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ucharTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( shortTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ushortTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( intTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( uintTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ulongTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( floatTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( doubleTestCase))
# Execute the test suite
print("Testing 2D Functions of Module Matrix")
print("NumPy version", np.__version__)
print()
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(bool(result.errors + result.failures))
| doubleTestCase |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_scatter14.py | {
"start": 315,
"end": 1633
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_scatter14.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "scatter", "subtype": "straight"})
chart.axis_ids = [69216512, 69214976]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
"marker": {"type": "star", "size": 5},
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
"marker": {"type": "plus", "size": 5},
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | Textualize__textual | src/textual/css/_style_properties.py | {
"start": 7576,
"end": 9238
} | class ____:
"""Descriptor for lists of scalars.
Args:
percent_unit: The dimension to which percentage scalars will be relative to.
refresh_children: Whether to refresh the node children on value change.
"""
def __init__(self, percent_unit: Unit, refresh_children: bool = False) -> None:
self.percent_unit = percent_unit
self.refresh_children = refresh_children
def __set_name__(self, owner: StylesBase, name: str) -> None:
self.name = name
def __get__(
self, obj: StylesBase, objtype: type[StylesBase] | None = None
) -> tuple[Scalar, ...] | None:
return obj.get_rule(self.name) # type: ignore[return-value]
def __set__(
self, obj: StylesBase, value: str | Iterable[str | float] | None
) -> None:
if value is None:
obj.clear_rule(self.name)
obj.refresh(layout=True, children=self.refresh_children)
return
parse_values: Iterable[str | float]
if isinstance(value, str):
parse_values = value.split()
else:
parse_values = value
scalars = []
for parse_value in parse_values:
if isinstance(parse_value, (int, float)):
scalars.append(Scalar.from_number(parse_value))
else:
scalars.append(
Scalar.parse(parse_value, self.percent_unit)
if isinstance(parse_value, str)
else parse_value
)
if obj.set_rule(self.name, tuple(scalars)):
obj.refresh(layout=True, children=self.refresh_children)
| ScalarListProperty |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 21481,
"end": 21572
} | class ____(BaseModel):
ok: bool
type: Literal["OKResponse"] = "OKResponse"
| OKResponse |
python | openai__openai-python | src/openai/types/responses/response_code_interpreter_tool_call.py | {
"start": 345,
"end": 519
} | class ____(BaseModel):
logs: str
"""The logs output from the code interpreter."""
type: Literal["logs"]
"""The type of the output. Always `logs`."""
| OutputLogs |
python | huggingface__transformers | src/transformers/models/phi/modular_phi.py | {
"start": 925,
"end": 2491
} | class ____(LlamaRotaryEmbedding):
@staticmethod
def compute_default_rope_parameters(
config: Optional[PhiConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
partial_rotary_factor = config.rope_parameters.get("partial_rotary_factor", 1.0)
head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
dim = int(head_dim * partial_rotary_factor)
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
| PhiRotaryEmbedding |
python | great-expectations__great_expectations | tests/core/test_expectation_configuration.py | {
"start": 6035,
"end": 10863
} | class ____:
@pytest.mark.unit
def test_hash_consistency_with_equality(self, config1, config2):
assert config1 == config2
assert hash(config1) == hash(config2)
@pytest.mark.unit
def test_hash_different_for_different_types(self):
config1 = ExpectationConfiguration(
type="expect_column_values_to_not_be_null", kwargs={"column": "test_column"}
)
config2 = ExpectationConfiguration(
type="expect_column_values_to_be_between", kwargs={"column": "test_column"}
)
assert config1 != config2
assert hash(config1) != hash(config2)
@pytest.mark.unit
def test_hash_different_for_different_kwargs(self):
config1 = ExpectationConfiguration(
type="expect_column_values_to_not_be_null", kwargs={"column": "test_column_1"}
)
config2 = ExpectationConfiguration(
type="expect_column_values_to_not_be_null", kwargs={"column": "test_column_2"}
)
assert config1 != config2
assert hash(config1) != hash(config2)
@pytest.mark.unit
def test_hash_different_for_different_meta(self):
config1 = ExpectationConfiguration(
type="expect_column_values_to_not_be_null",
kwargs={"column": "test_column"},
meta={"test": "value1"},
)
config2 = ExpectationConfiguration(
type="expect_column_values_to_not_be_null",
kwargs={"column": "test_column"},
meta={"test": "value2"},
)
assert config1 != config2
assert hash(config1) != hash(config2)
@pytest.mark.unit
def test_hash_stable_across_runs(self, config1):
hash1 = hash(config1)
hash2 = hash(config1)
hash3 = hash(config1)
assert hash1 == hash2 == hash3
@pytest.mark.unit
def test_expectation_configuration_severity_functionality():
"""Test that severity is properly handled in ExpectationConfiguration."""
from great_expectations.expectations.metadata_types import FailureSeverity
# Test default severity
config = ExpectationConfiguration(
type="expect_column_values_to_not_be_null",
kwargs={"column": "test_column"},
)
assert config.severity == FailureSeverity.CRITICAL
# Test setting severity via constructor
config = ExpectationConfiguration(
type="expect_column_values_to_not_be_null",
kwargs={"column": "test_column"},
severity=FailureSeverity.WARNING,
)
assert config.severity == FailureSeverity.WARNING
# Test setting severity via property setter
config.severity = FailureSeverity.INFO
assert config.severity == FailureSeverity.INFO
# Test setting severity via string
config.severity = "warning"
assert config.severity == FailureSeverity.WARNING
# Test that severity is included in serialization
json_dict = config.to_json_dict()
assert "severity" in json_dict
assert json_dict["severity"] == "warning"
# Test that severity is preserved in to_domain_obj conversion
expectation = config.to_domain_obj()
assert expectation.severity == FailureSeverity.WARNING
# Test that severity is included in configuration property
expectation_config = expectation.configuration
assert expectation_config.severity == FailureSeverity.WARNING
# Test invalid severity values
from great_expectations.exceptions import InvalidExpectationConfigurationError
with pytest.raises(InvalidExpectationConfigurationError, match="Invalid severity"):
config.severity = "invalid_severity"
with pytest.raises(
InvalidExpectationConfigurationError, match="Severity must be string or enum"
):
config.severity = 123
@pytest.mark.unit
def test_expectation_configuration_severity_equality():
"""Test that severity is NOT considered in equality comparisons (current implementation)."""
config1 = ExpectationConfiguration(
type="expect_column_values_to_not_be_null",
kwargs={"column": "test_column"},
severity="critical",
)
config2 = ExpectationConfiguration(
type="expect_column_values_to_not_be_null",
kwargs={"column": "test_column"},
severity="warning",
)
config3 = ExpectationConfiguration(
type="expect_column_values_to_not_be_null",
kwargs={"column": "test_column"},
severity="critical",
)
# Note: Current implementation doesn't include severity in equality comparison
assert config1 == config2 # Same type and kwargs, different severity
assert config1 == config3 # Same type and kwargs, same severity
assert hash(config1) == hash(config2) # Same hash (severity not included)
assert hash(config1) == hash(config3) # Same hash
| TestExpectationConfigurationHash |
python | apache__airflow | airflow-core/tests/unit/serialization/test_dag_serialization.py | {
"start": 164379,
"end": 176619
} | class ____:
"""Test MappedOperator serialization with client defaults and callback properties."""
def test_mapped_operator_client_defaults_application(self, operator_defaults):
"""Test that client_defaults are correctly applied to MappedOperator during deserialization."""
with operator_defaults({"retry_delay": 200.0}):
with DAG(dag_id="test_mapped_dag") as dag:
# Create a mapped operator
BashOperator.partial(
task_id="mapped_task",
retries=5, # Override default
).expand(bash_command=["echo 1", "echo 2", "echo 3"])
# Serialize the DAG
serialized_dag = SerializedDAG.to_dict(dag)
# Should have client_defaults section
assert "client_defaults" in serialized_dag
assert "tasks" in serialized_dag["client_defaults"]
# Deserialize and check that client_defaults are applied
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_task = deserialized_dag.get_task("mapped_task")
# Verify it's still a MappedOperator
from airflow.models.mappedoperator import MappedOperator as SchedulerMappedOperator
assert isinstance(deserialized_task, SchedulerMappedOperator)
# Check that client_defaults values are applied (e.g., retry_delay from client_defaults)
client_defaults = serialized_dag["client_defaults"]["tasks"]
if "retry_delay" in client_defaults:
# If retry_delay wasn't explicitly set, it should come from client_defaults
# Since we can't easily convert timedelta back, check the serialized format
assert hasattr(deserialized_task, "retry_delay")
# Explicit values should override client_defaults
assert deserialized_task.retries == 5 # Explicitly set value
@pytest.mark.parametrize(
("task_config", "dag_id", "task_id", "non_default_fields"),
[
# Test case 1: Size optimization with non-default values
pytest.param(
{"retries": 3}, # Only set non-default values
"test_mapped_size",
"mapped_size_test",
{"retries"},
id="non_default_fields",
),
# Test case 2: No duplication with default values
pytest.param(
{"retries": 0}, # This should match client_defaults and be optimized out
"test_no_duplication",
"mapped_task",
set(), # No fields should be non-default (all optimized out)
id="duplicate_fields",
),
# Test case 3: Mixed default/non-default values
pytest.param(
{"retries": 2, "max_active_tis_per_dag": 16}, # Mix of default and non-default
"test_mixed_optimization",
"mixed_task",
{"retries", "max_active_tis_per_dag"}, # Both should be preserved as they're non-default
id="test_mixed_optimization",
),
],
)
def test_mapped_operator_client_defaults_optimization(
self, task_config, dag_id, task_id, non_default_fields, operator_defaults
):
"""Test that MappedOperator serialization optimizes using client defaults."""
with operator_defaults({"retry_delay": 200.0}):
with DAG(dag_id=dag_id) as dag:
# Create mapped operator with specified configuration
BashOperator.partial(
task_id=task_id,
**task_config,
).expand(bash_command=["echo 1", "echo 2", "echo 3"])
serialized_dag = SerializedDAG.to_dict(dag)
mapped_task_serialized = serialized_dag["dag"]["tasks"][0]["__var"]
assert mapped_task_serialized is not None
assert mapped_task_serialized.get("_is_mapped") is True
# Check optimization behavior
client_defaults = serialized_dag["client_defaults"]["tasks"]
partial_kwargs = mapped_task_serialized["partial_kwargs"]
# Check that all fields are optimized correctly
for field, default_value in client_defaults.items():
if field in non_default_fields:
# Non-default fields should be present in partial_kwargs
assert field in partial_kwargs, (
f"Field '{field}' should be in partial_kwargs as it's non-default"
)
# And have different values than defaults
assert partial_kwargs[field] != default_value, (
f"Field '{field}' should have non-default value"
)
else:
# Default fields should either not be present or have different values if present
if field in partial_kwargs:
assert partial_kwargs[field] != default_value, (
f"Field '{field}' with default value should be optimized out"
)
def test_mapped_operator_expand_input_preservation(self):
"""Test that expand_input is correctly preserved during serialization."""
with DAG(dag_id="test_expand_input"):
mapped_task = BashOperator.partial(task_id="test_expand").expand(
bash_command=["echo 1", "echo 2", "echo 3"], env={"VAR1": "value1", "VAR2": "value2"}
)
# Serialize and deserialize
serialized = BaseSerialization.serialize(mapped_task)
deserialized = BaseSerialization.deserialize(serialized)
# Check expand_input structure
assert hasattr(deserialized, "expand_input")
expand_input = deserialized.expand_input
# Verify the expand_input contains the expected data
assert hasattr(expand_input, "value")
expand_value = expand_input.value
assert "bash_command" in expand_value
assert "env" in expand_value
assert expand_value["bash_command"] == ["echo 1", "echo 2", "echo 3"]
assert expand_value["env"] == {"VAR1": "value1", "VAR2": "value2"}
@pytest.mark.parametrize(
("partial_kwargs_data", "expected_results"),
[
# Test case 1: Encoded format with client defaults
pytest.param(
{
"retry_delay": {"__type": "timedelta", "__var": 600.0},
"execution_timeout": {"__type": "timedelta", "__var": 1800.0},
"owner": "test_user",
},
{
"retry_delay": timedelta(seconds=600),
"execution_timeout": timedelta(seconds=1800),
"owner": "test_user",
},
id="encoded_with_client_defaults",
),
# Test case 2: Non-encoded format (optimized)
pytest.param(
{
"retry_delay": 600.0,
"execution_timeout": 1800.0,
},
{
"retry_delay": timedelta(seconds=600),
"execution_timeout": timedelta(seconds=1800),
},
id="non_encoded_optimized",
),
# Test case 3: Mixed format (some encoded, some not)
pytest.param(
{
"retry_delay": {"__type": "timedelta", "__var": 600.0}, # Encoded
"execution_timeout": 1800.0, # Non-encoded
},
{
"retry_delay": timedelta(seconds=600),
"execution_timeout": timedelta(seconds=1800),
},
id="mixed_encoded_non_encoded",
),
],
)
def test_partial_kwargs_deserialization_formats(self, partial_kwargs_data, expected_results):
"""Test deserialization of partial_kwargs in various formats (encoded, non-encoded, mixed)."""
result = SerializedBaseOperator._deserialize_partial_kwargs(partial_kwargs_data)
# Verify all expected results
for key, expected_value in expected_results.items():
assert key in result, f"Missing key '{key}' in result"
assert result[key] == expected_value, f"key '{key}': expected {expected_value}, got {result[key]}"
def test_partial_kwargs_end_to_end_deserialization(self):
"""Test end-to-end partial_kwargs deserialization with real MappedOperator."""
with DAG(dag_id="test_e2e_partial_kwargs") as dag:
BashOperator.partial(
task_id="mapped_task",
retry_delay=timedelta(seconds=600), # Non-default value
owner="custom_owner", # Non-default value
# retries not specified, should potentially get from client_defaults
).expand(bash_command=["echo 1", "echo 2"])
# Serialize and deserialize the DAG
serialized_dag = SerializedDAG.to_dict(dag)
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_task = deserialized_dag.get_task("mapped_task")
# Verify the task has correct values after round-trip
assert deserialized_task.retry_delay == timedelta(seconds=600)
assert deserialized_task.owner == "custom_owner"
# Verify partial_kwargs were deserialized correctly
assert "retry_delay" in deserialized_task.partial_kwargs
assert "owner" in deserialized_task.partial_kwargs
assert deserialized_task.partial_kwargs["retry_delay"] == timedelta(seconds=600)
assert deserialized_task.partial_kwargs["owner"] == "custom_owner"
@pytest.mark.parametrize(
("callbacks", "expected_has_flags", "absent_keys"),
[
pytest.param(
{
"on_failure_callback": lambda ctx: None,
"on_success_callback": lambda ctx: None,
"on_retry_callback": lambda ctx: None,
},
["has_on_failure_callback", "has_on_success_callback", "has_on_retry_callback"],
["on_failure_callback", "on_success_callback", "on_retry_callback"],
id="multiple_callbacks",
),
pytest.param(
{"on_failure_callback": lambda ctx: None},
["has_on_failure_callback"],
["on_failure_callback", "has_on_success_callback", "on_success_callback"],
id="single_callback",
),
pytest.param(
{"on_failure_callback": lambda ctx: None, "on_execute_callback": None},
["has_on_failure_callback"],
["on_failure_callback", "has_on_execute_callback", "on_execute_callback"],
id="callback_with_none",
),
pytest.param(
{},
[],
[
"has_on_execute_callback",
"has_on_failure_callback",
"has_on_success_callback",
"has_on_retry_callback",
"has_on_skipped_callback",
],
id="no_callbacks",
),
],
)
def test_dag_default_args_callbacks_serialization(callbacks, expected_has_flags, absent_keys):
"""Test callbacks in DAG default_args are serialized as boolean flags."""
default_args = {"owner": "test_owner", "retries": 2, **callbacks}
with DAG(dag_id="test_default_args_callbacks", default_args=default_args) as dag:
BashOperator(task_id="task1", bash_command="echo 1", dag=dag)
serialized_dag_dict = SerializedDAG.serialize_dag(dag)
default_args_dict = serialized_dag_dict["default_args"][Encoding.VAR]
for flag in expected_has_flags:
assert default_args_dict.get(flag) is True
for key in absent_keys:
assert key not in default_args_dict
assert default_args_dict["owner"] == "test_owner"
assert default_args_dict["retries"] == 2
deserialized_dag = SerializedDAG.deserialize_dag(serialized_dag_dict)
assert deserialized_dag.dag_id == "test_default_args_callbacks"
| TestMappedOperatorSerializationAndClientDefaults |
python | davidhalter__jedi | test/completion/classes.py | {
"start": 3851,
"end": 4007
} | class ____(classgetter()):
def react(self):
#? ['shout']
self.s
# -----------------
# multiple inheritance # 1071
# -----------------
| Dude |
python | sqlalchemy__sqlalchemy | test/sql/test_deprecations.py | {
"start": 3691,
"end": 4860
} | class ____(fixtures.TablesTest, AssertsCompiledSQL):
__dialect__ = default.DefaultDialect(supports_native_boolean=True)
run_setup_bind = None
run_create_tables = None
@classmethod
def define_tables(cls, metadata):
Table(
"people",
metadata,
Column("people_id", Integer, primary_key=True),
Column("age", Integer),
Column("name", String(30)),
)
Table(
"bookcases",
metadata,
Column("bookcase_id", Integer, primary_key=True),
Column(
"bookcase_owner_id", Integer, ForeignKey("people.people_id")
),
Column("bookcase_shelves", Integer),
Column("bookcase_width", Integer),
)
Table(
"books",
metadata,
Column("book_id", Integer, primary_key=True),
Column(
"bookcase_id", Integer, ForeignKey("bookcases.bookcase_id")
),
Column("book_owner_id", Integer, ForeignKey("people.people_id")),
Column("book_weight", Integer),
)
| LateralSubqueryCoercionsTest |
python | pytorch__pytorch | torch/distributed/checkpoint/_experimental/types.py | {
"start": 430,
"end": 759
} | class ____:
"""
Information about the current rank in a distributed training environment.
Attributes:
global_rank: The global rank ID of the current process.
global_world_size: The total number of processes in the distributed environment.
"""
global_rank: int
global_world_size: int
| RankInfo |
python | sqlalchemy__sqlalchemy | test/sql/test_selectable.py | {
"start": 132557,
"end": 133480
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_basic_clone(self):
t = table("t", column("c"))
s = select(t).with_for_update(read=True, of=t.c.c)
s2 = visitors.ReplacingCloningVisitor().traverse(s)
assert s2._for_update_arg is not s._for_update_arg
eq_(s2._for_update_arg.read, True)
eq_(s2._for_update_arg.of, [t.c.c])
self.assert_compile(
s2, "SELECT t.c FROM t FOR SHARE OF t", dialect="postgresql"
)
def test_adapt(self):
t = table("t", column("c"))
s = select(t).with_for_update(read=True, of=t.c.c)
a = t.alias()
s2 = sql_util.ClauseAdapter(a).traverse(s)
eq_(s2._for_update_arg.of, [a.c.c])
self.assert_compile(
s2,
"SELECT t_1.c FROM t AS t_1 FOR SHARE OF t_1",
dialect="postgresql",
)
| ForUpdateTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/streams/streams.py | {
"start": 10358,
"end": 10534
} | class ____(IncrementalShopifyGraphQlBulkStream):
parent_stream_class = Customers
bulk_query: CustomerAddresses = CustomerAddresses
cursor_field = "id"
| CustomerAddress |
python | falconry__falcon | falcon/util/mediatypes.py | {
"start": 4306,
"end": 11803
} | class ____:
main_type: str
subtype: str
quality: float
params: dict
__slots__ = ('main_type', 'subtype', 'quality', 'params')
_NOT_MATCHING = (-1, -1, -1, -1, 0.0)
_Q_VALUE_ERROR_MESSAGE = (
'If provided, the q parameter must be a real number in the range 0 through 1.'
)
@classmethod
def parse(cls, media_range: str) -> _MediaRange:
try:
main_type, subtype, params = _parse_media_type_header(media_range)
except errors.InvalidMediaType as ex:
raise errors.InvalidMediaRange(
'The media range value must contain type/subtype.'
) from ex
# NOTE(vytas): We don't need to special-case Q since the above
# parse_header always lowercases parameter names.
# PERF(vytas): Short-circuit if q is absent.
if 'q' not in params:
return cls(main_type, subtype, 1.0, params)
try:
q = float(params.pop('q'))
except (TypeError, ValueError) as ex:
# NOTE(vytas): RFC 9110, Section 12.4.2:
# weight = OWS ";" OWS "q=" qvalue
# qvalue = ( "0" [ "." 0*3DIGIT ] ) / ( "1" [ "." 0*3("0") ] )
raise errors.InvalidMediaRange(cls._Q_VALUE_ERROR_MESSAGE) from ex
if not (0.0 <= q <= 1.0) or not math.isfinite(q):
raise errors.InvalidMediaRange(cls._Q_VALUE_ERROR_MESSAGE)
# NOTE(vytas): RFC 9110, Section 12.4.2 states that a sender of qvalue
# MUST NOT generate more than three digits after the decimal point,
# but we are more permissive here, and opt not to spend any extra CPU
# cycles, if we have already managed to convert the value to float.
return cls(main_type, subtype, q, params)
def match_score(self, media_type: _MediaType) -> tuple[int, int, int, int, float]:
if self.main_type == '*' or media_type.main_type == '*':
main_matches = 0
elif self.main_type != media_type.main_type:
return self._NOT_MATCHING
else:
main_matches = 1
if self.subtype == '*' or media_type.subtype == '*':
sub_matches = 0
elif self.subtype != media_type.subtype:
return self._NOT_MATCHING
else:
sub_matches = 1
# PERF(vytas): We could also use bitwise operators directly between
# params.keys(), but set()/frozenset() seem to outperform dict.keys()
# slightly regardless of the number of elements, especially when we
# reuse the same sets for both intersection and symmetric_difference.
mr_pnames = frozenset(self.params)
mt_pnames = frozenset(media_type.params)
exact_match = 0 if mr_pnames ^ mt_pnames else 1
matching = mr_pnames & mt_pnames
for pname in matching:
if self.params[pname] != media_type.params[pname]:
return self._NOT_MATCHING
return (main_matches, sub_matches, exact_match, len(matching), self.quality)
# PERF(vytas): It is possible to cache a classmethod too, but the invocation is
# less efficient, especially in the case of a cache hit.
# NOTE(vytas): Also, if we decide to make these classes public, we need to keep
# these cached parsers private.
_parse_media_type = functools.lru_cache(_MediaType.parse)
_parse_media_range = functools.lru_cache(_MediaRange.parse)
@functools.lru_cache
def _parse_media_ranges(header: str) -> tuple[_MediaRange, ...]:
return tuple(_MediaRange.parse(media_range) for media_range in header.split(','))
@functools.lru_cache
def quality(media_type: str, header: str) -> float:
"""Get quality of the most specific matching media range.
Media-ranges are parsed from the provided `header` value according to
RFC 9110, Section 12.5.1 (the ``Accept`` header).
The provided `media_type` is matched against each of the parsed media
ranges, and the fitness of each match is assessed as follows
(in the decreasing priority list of criteria):
1. Do the main types (as in ``type/subtype``) match?
The types must either match exactly, or as wildcard (``*``).
The matches involving a wildcard are prioritized lower.
2. Do the subtypes (as in ``type/subtype``) match?
The subtypes must either match exactly, or as wildcard (``*``).
The matches involving a wildcard are prioritized lower.
3. Do the parameters match exactly?
If all the parameter names and values (if any) between the media range
and media type match exactly, such match is prioritized higher than
matches involving extraneous parameters on either side.
Note that if parameter names match, the corresponding values must also
be equal, or the provided media type is considered not to match the
media range in question at all.
4. The number of matching parameters.
5. Finally, if two or more best media range matches are equally fit
according to all of the above criteria (1) through (4), the highest
quality (i.e., the value of the ``q`` parameter) of these is returned.
Note:
With the exception of evaluating the exact parameter match (3), the
number of extraneous parameters (i.e. where the names are only present
in the media type, or only in the media range) currently does not
influence the described specificity sort order.
Args:
media_type: The Internet media type to match against the provided
HTTP ``Accept`` header value.
header: The value of a header that conforms to the format of the
HTTP ``Accept`` header.
Returns:
Quality of the most specific media range matching the provided
`media_type`. (If none matches, 0.0 is returned.)
.. versionadded:: 4.0
"""
parsed_media_type = _parse_media_type(media_type)
most_specific = max(
media_range.match_score(parsed_media_type)
for media_range in _parse_media_ranges(header)
)
return most_specific[-1]
def best_match(media_types: Iterable[str], header: str) -> str:
"""Choose media type with the highest :func:`quality` from a list of candidates.
Args:
media_types: An iterable over one or more Internet media types
to match against the provided header value.
header: The value of a header that conforms to the format of the
HTTP ``Accept`` header.
Returns:
Best match from the supported candidates, or an empty string if the
provided header value does not match any of the given types.
.. versionadded:: 4.0
"""
# PERF(vytas): Using the default parameter, i.e., max(..., default='', 0.0)
# would be much nicer than EAFP, but for some reason it is quite slow
# regardless of whether media_types is empty or not.
try:
matching, best_quality = max(
((media_type, quality(media_type, header)) for media_type in media_types),
key=lambda mt_quality: mt_quality[1],
)
if best_quality > 0.0:
return matching
except errors.InvalidMediaType:
# NOTE(vytas): Do not swallow instances of InvalidMediaType
# (it a subclass of ValueError).
raise
except ValueError:
# NOTE(vytas): Barring unknown bugs, we only expect unhandled
# ValueErrors from supplying an empty media_types value.
pass
return ''
| _MediaRange |
python | walkccc__LeetCode | solutions/430. Flatten a Multilevel Doubly Linked List/430-2.py | {
"start": 0,
"end": 438
} | class ____:
def flatten(self, head: 'Node') -> 'Node':
curr = head
while curr:
if curr.child:
cachedNext = curr.next
curr.next = curr.child
curr.child.prev = curr
curr.child = None
tail = curr.next
while tail.next:
tail = tail.next
tail.next = cachedNext
if cachedNext:
cachedNext.prev = tail
curr = curr.next
return head
| Solution |
python | allegroai__clearml | clearml/backend_api/services/v2_9/tasks.py | {
"start": 183116,
"end": 184971
} | class ____(Response):
"""
Response of tasks.failed endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "tasks"
_action = "failed"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, fields: Optional[dict] = None, **kwargs: Any) -> None:
super(FailedResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self) -> Optional[dict]:
return self._property_fields
@fields.setter
def fields(self, value: Optional[dict]) -> None:
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
| FailedResponse |
python | pyca__cryptography | tests/hazmat/primitives/test_hashes.py | {
"start": 1516,
"end": 1767
} | class ____:
test_sha1 = generate_base_hash_test(
hashes.SHA1(),
digest_size=20,
)
@pytest.mark.supported(
only_if=lambda backend: backend.hash_supported(hashes.SHA224()),
skip_message="Does not support SHA224",
)
| TestSHA1 |
python | anthropics__anthropic-sdk-python | src/anthropic/lib/tools/_beta_builtin_memory_tool.py | {
"start": 4859,
"end": 9123
} | class ____(BetaAsyncBuiltinFunctionTool):
"""Abstract base class for memory tool implementations.
This class provides the interface for implementing a custom memory backend for Claude.
Subclass this to create your own memory storage solution (e.g., database, cloud storage, encrypted files, etc.).
Example usage:
```py
class MyMemoryTool(BetaAbstractMemoryTool):
def view(self, command: BetaMemoryTool20250818ViewCommand) -> BetaFunctionToolResultType:
...
return "view result"
def create(self, command: BetaMemoryTool20250818CreateCommand) -> BetaFunctionToolResultType:
...
return "created successfully"
# ... implement other abstract methods
client = Anthropic()
memory_tool = MyMemoryTool()
message = client.beta.messages.run_tools(
model="claude-3-5-sonnet-20241022",
messages=[{"role": "user", "content": "Remember that I like coffee"}],
tools=[memory_tool],
).until_done()
```
"""
def __init__(self, *, cache_control: BetaCacheControlEphemeralParam | None = None) -> None:
super().__init__()
self._cache_control = cache_control
@override
def to_dict(self) -> BetaMemoryTool20250818Param:
param: BetaMemoryTool20250818Param = {"type": "memory_20250818", "name": "memory"}
if self._cache_control is not None:
param["cache_control"] = self._cache_control
return param
@override
async def call(self, input: object) -> BetaFunctionToolResultType:
command = cast(
BetaMemoryTool20250818Command,
construct_type_unchecked(value=input, type_=cast(Any, BetaMemoryTool20250818Command)),
)
return await self.execute(command)
async def execute(self, command: BetaMemoryTool20250818Command) -> BetaFunctionToolResultType:
"""Execute a memory command and return the result.
This method dispatches to the appropriate handler method based on the
command type (view, create, str_replace, insert, delete, rename).
You typically don't need to override this method.
"""
if command.command == "view":
return await self.view(command)
elif command.command == "create":
return await self.create(command)
elif command.command == "str_replace":
return await self.str_replace(command)
elif command.command == "insert":
return await self.insert(command)
elif command.command == "delete":
return await self.delete(command)
elif command.command == "rename":
return await self.rename(command)
elif TYPE_CHECKING: # type: ignore[unreachable]
assert_never(command)
else:
raise NotImplementedError(f"Unknown command: {command.command}")
@abstractmethod
async def view(self, command: BetaMemoryTool20250818ViewCommand) -> BetaFunctionToolResultType:
"""View the contents of a memory path."""
pass
@abstractmethod
async def create(self, command: BetaMemoryTool20250818CreateCommand) -> BetaFunctionToolResultType:
"""Create a new memory file with the specified content."""
pass
@abstractmethod
async def str_replace(self, command: BetaMemoryTool20250818StrReplaceCommand) -> BetaFunctionToolResultType:
"""Replace text in a memory file."""
pass
@abstractmethod
async def insert(self, command: BetaMemoryTool20250818InsertCommand) -> BetaFunctionToolResultType:
"""Insert text at a specific line number in a memory file."""
pass
@abstractmethod
async def delete(self, command: BetaMemoryTool20250818DeleteCommand) -> BetaFunctionToolResultType:
"""Delete a memory file or directory."""
pass
@abstractmethod
async def rename(self, command: BetaMemoryTool20250818RenameCommand) -> BetaFunctionToolResultType:
"""Rename or move a memory file or directory."""
pass
async def clear_all_memory(self) -> BetaFunctionToolResultType:
"""Clear all memory data."""
raise NotImplementedError("clear_all_memory not implemented")
| BetaAsyncAbstractMemoryTool |
python | mitsuhiko__rye | rye-devtools/src/rye_devtools/find_downloads.py | {
"start": 946,
"end": 8855
} | class ____(Finder):
implementation = PythonImplementation.CPYTHON
RELEASE_URL = (
"https://api.github.com/repos/indygreg/python-build-standalone/releases"
)
FLAVOR_PREFERENCES = [
"shared-pgo",
"shared-noopt",
"shared-noopt",
"pgo+lto",
"pgo",
"lto",
]
HIDDEN_FLAVORS = [
"debug",
"noopt",
"install_only",
]
SPECIAL_TRIPLES = {
"macos": "x86_64-apple-darwin",
"linux64": "x86_64-unknown-linux-gnu",
"windows-amd64": "x86_64-pc-windows-msvc",
"windows-x86-shared-pgo": "i686-pc-windows-msvc-shared-pgo",
"windows-amd64-shared-pgo": "x86_64-pc-windows-msvc-shared-pgo",
"windows-x86": "i686-pc-windows-msvc",
"linux64-musl": "x86_64-unknown-linux-musl",
}
# matches these: https://doc.rust-lang.org/std/env/consts/constant.ARCH.html
ARCH_MAPPING = {
"x86_64": "x86_64",
"x86": "x86",
"i686": "x86",
"aarch64": "aarch64",
}
# matches these: https://doc.rust-lang.org/std/env/consts/constant.OS.html
PLATFORM_MAPPING = {
"darwin": "macos",
"windows": "windows",
"linux": "linux",
}
ENV_MAPPING = {
"gnu": "gnu",
# We must ignore musl for now
# "musl": "musl",
}
FILENAME_RE = re.compile(
r"""(?x)
^
cpython-(?P<ver>\d+\.\d+\.\d+?)
(?:\+\d+)?
-(?P<triple>.*?)
(?:-[\dT]+)?\.tar\.(?:gz|zst)
$
"""
)
def __init__(self, client: httpx.AsyncClient):
self.client = client
async def find(self) -> list[PythonDownload]:
downloads = await self.fetch_indygreg_downloads()
await self.fetch_indygreg_checksums(downloads, n=20)
return downloads
async def fetch_indygreg_downloads(self, pages: int = 100) -> list[PythonDownload]:
"""Fetch all the indygreg downloads from the release API."""
results: dict[Version, dict[tuple[str, str], list[PythonDownload]]] = {}
for page in range(1, pages):
log(f"Fetching indygreg release page {page}")
resp = await fetch(self.client, "%s?page=%d" % (self.RELEASE_URL, page))
rows = resp.json()
if not rows:
break
for row in rows:
for asset in row["assets"]:
url = asset["browser_download_url"]
download = self.parse_download_url(url)
if download is not None:
(
results.setdefault(download.version, {})
# For now, we only group by arch and platform, because Rust's PythonVersion doesn't have a notion
# of environment. Flavor will never be used to sort download choices and must not be included in grouping.
.setdefault(
(download.triple.arch, download.triple.platform), []
)
.append(download)
)
downloads = []
for version, platform_downloads in results.items():
for flavors in platform_downloads.values():
best = self.pick_best_download(flavors)
if best is not None:
downloads.append(best)
return downloads
@classmethod
def parse_download_url(cls, url: str) -> PythonDownload | None:
"""Parse an indygreg download URL into a PythonDownload object."""
# The URL looks like this:
# https://github.com/indygreg/python-build-standalone/releases/download/20240107/cpython-3.12.1%2B20240107-aarch64-unknown-linux-gnu-lto-full.tar.zst
filename = unquote(url.rsplit("/", maxsplit=1)[-1])
if filename.endswith(".sha256"):
return
match = cls.FILENAME_RE.match(filename)
if match is None:
return
version_str, triple_str = match.groups()
version = Version.from_str(version_str)
triple = cls.parse_triple(triple_str)
if triple is None:
return
return PythonDownload(
version=version,
triple=triple,
implementation=PythonImplementation.CPYTHON,
filename=filename,
url=url,
)
@classmethod
def parse_triple(cls, triple: str) -> PlatformTriple | None:
"""Parse a triple into a PlatformTriple object."""
def match_flavor(triple: str) -> str | None:
for flavor in cls.FLAVOR_PREFERENCES + cls.HIDDEN_FLAVORS:
if flavor in triple:
return flavor
return None
def match_mapping(
pieces: list[str], mapping: dict[str, str]
) -> tuple[str | None, list[str]]:
for i in reversed(range(0, len(pieces))):
if pieces[i] in mapping:
return mapping[pieces[i]], pieces[:i]
return None, pieces
# Map, old, special triplets to proper triples for parsing, or
# return the triple if it's not a special one
triple = cls.SPECIAL_TRIPLES.get(triple, triple)
# freethreaded builds are experimental, ignore them for now
if "freethreaded" in triple:
return
pieces = triple.split("-")
flavor = match_flavor(triple)
env, pieces = match_mapping(pieces, cls.ENV_MAPPING)
platform, pieces = match_mapping(pieces, cls.PLATFORM_MAPPING)
arch, pieces = match_mapping(pieces, cls.ARCH_MAPPING)
if arch is None or platform is None:
return
if env is None and platform == "linux":
return
return PlatformTriple(arch, platform, env, flavor)
@classmethod
def pick_best_download(
cls, downloads: list[PythonDownload]
) -> PythonDownload | None:
"""Pick the best download from the list of downloads."""
def preference(download: PythonDownload) -> int:
try:
return cls.FLAVOR_PREFERENCES.index(download.triple.flavor)
except ValueError:
return len(cls.FLAVOR_PREFERENCES) + 1
downloads.sort(key=preference)
return downloads[0] if downloads else None
async def fetch_indygreg_checksums(
self, downloads: list[PythonDownload], n: int = 10
) -> None:
"""Fetch the checksums for the given downloads."""
checksums_url = set()
for download in downloads:
release_url = download.url.rsplit("/", maxsplit=1)[0]
checksum_url = release_url + "/SHA256SUMS"
checksums_url.add(checksum_url)
async def fetch_checksums(url: str):
try:
resp = await fetch(self.client, url)
except HTTPStatusError as e:
if e.response.status_code != 404:
raise
return None
return resp
completed = 0
tasks = []
for batch in batched(checksums_url, n):
log(f"Fetching indygreg checksums: {completed}/{len(checksums_url)}")
async with asyncio.TaskGroup() as tg:
for url in batch:
task = tg.create_task(fetch_checksums(url))
tasks.append(task)
completed += n
checksums = {}
for task in tasks:
resp = task.result()
if resp is None:
continue
lines = resp.text.splitlines()
for line in lines:
checksum, filename = line.split(" ", maxsplit=1)
filename = filename.strip()
checksums[filename] = checksum
for download in downloads:
download.sha256 = checksums.get(download.filename)
| CPythonFinder |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/instigation.py | {
"start": 30607,
"end": 31170
} | class ____(graphene.Union):
class Meta:
types = (GrapheneInstigationStates, GraphenePythonError)
name = "InstigationStatesOrError"
types = [
GrapheneDryRunInstigationTick,
GrapheneDryRunInstigationTicks,
GrapheneInstigationTypeSpecificData,
GrapheneInstigationState,
GrapheneInstigationStateNotFoundError,
GrapheneInstigationStateOrError,
GrapheneInstigationStates,
GrapheneInstigationStatesOrError,
GrapheneInstigationTick,
GrapheneScheduleData,
GrapheneSensorData,
]
| GrapheneInstigationStatesOrError |
python | ApeWorX__ape | src/ape_ethereum/ecosystem.py | {
"start": 6769,
"end": 11295
} | class ____(PluginConfig):
"""
L2 plugins should use this as their config base-class.
"""
DEFAULT_TRANSACTION_TYPE: ClassVar[int] = TransactionType.DYNAMIC.value
DEFAULT_LOCAL_GAS_LIMIT: ClassVar[GasLimit] = "max"
NETWORKS: ClassVar[dict[str, tuple[int, int]]] = NETWORKS
default_network: str = LOCAL_NETWORK_NAME
_forked_configs: dict[str, ForkedNetworkConfig] = {}
_custom_networks: dict[str, NetworkConfig] = {}
# NOTE: This gets appended to Ape's root User-Agent string.
request_headers: dict = {}
model_config = SettingsConfigDict(extra="allow", env_prefix="APE_ETHEREUM_")
@model_validator(mode="before")
@classmethod
def load_network_configs(cls, values):
cfg_forks: dict[str, ForkedNetworkConfig] = {}
custom_networks = {}
for name, obj in values.items():
if name.startswith("_"):
continue
net_name = name.replace("-", "_")
key = net_name.replace("_fork", "")
if net_name.endswith("_fork"):
key = net_name.replace("_fork", "")
default_fork_model = create_local_network_config(
use_fork=True,
default_transaction_type=cls.DEFAULT_TRANSACTION_TYPE,
gas_limit=cls.DEFAULT_LOCAL_GAS_LIMIT,
).model_dump(by_alias=True)
data = merge_configs(default_fork_model, obj)
cfg_forks[key] = ForkedNetworkConfig.model_validate(data)
elif (
key != LOCAL_NETWORK_NAME
and key not in cls.NETWORKS
and isinstance(obj, dict)
and key not in ("request_headers",)
):
# Custom network.
default_network_model = create_network_config(
default_transaction_type=cls.DEFAULT_TRANSACTION_TYPE
).model_dump(by_alias=True)
data = merge_configs(default_network_model, obj)
custom_networks[name] = NetworkConfig.model_validate(data)
values["_forked_configs"] = {**cfg_forks, **values.get("_forked_configs", {})}
return {**values, **custom_networks}
@computed_field # type: ignore[misc]
@cached_property
def local(self) -> NetworkConfig:
return create_local_network_config(
default_provider="test",
default_transaction_type=self.DEFAULT_TRANSACTION_TYPE,
gas_limit=self.DEFAULT_LOCAL_GAS_LIMIT,
)
@only_raise_attribute_error
def __getattr__(self, key: str) -> Any:
_assert_not_ipython_check(key)
net_key = key.replace("-", "_")
if net_key.endswith("_fork"):
return self._get_forked_config(net_key)
try:
return super().__getattr__(key)
except AttributeError:
return NetworkConfig(default_transaction_type=self.DEFAULT_TRANSACTION_TYPE)
def __contains__(self, key: str) -> bool:
net_key = key.replace("-", "_")
if net_key.endswith("_fork"):
return self._get_forked_config(net_key) is not None
return super().__contains__(key)
def get(self, key: str, default: Optional[Any] = None) -> Any:
net_key = key.replace("-", "_")
if net_key.endswith("_fork"):
if cfg := self._get_forked_config(net_key):
return cfg
result: Any
if result := super().get(key, default=default):
return result
# Handle weird base-class differences.
try:
return self.__getattr__(key)
except AttributeError:
return default
def _get_forked_config(self, name: str) -> Optional[ForkedNetworkConfig]:
live_key: str = name.replace("_fork", "")
if self._forked_configs.get(live_key):
return self._forked_configs[live_key]
live_cfg: Any
if live_cfg := self.get(live_key):
if isinstance(live_cfg, NetworkConfig):
fork_cfg = create_local_network_config(
use_fork=True,
default_transaction_type=self.DEFAULT_TRANSACTION_TYPE,
gas_limit=self.DEFAULT_LOCAL_GAS_LIMIT,
)
self._forked_configs[live_key] = fork_cfg
return fork_cfg
return None
def _get_custom_network(self, name: str) -> NetworkConfig:
return self._custom_networks.get(name, NetworkConfig())
| BaseEthereumConfig |
python | pytorch__pytorch | test/distributed/pipelining/model_registry.py | {
"start": 5428,
"end": 5946
} | class ____(torch.nn.Module):
def __init__(self, d_hid: int, n_layers: int = 2):
super().__init__()
self.layers = torch.nn.ModuleList([MLPModule(d_hid) for _ in range(n_layers)])
# For testing purpose only, this should be defined by user
self.split_spec = {
f"layers.{i}": SplitPoint.BEGINNING for i in range(1, n_layers)
}
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
# Multi-MLP with kwargs model
| MultiMLP |
python | lepture__authlib | authlib/integrations/flask_client/apps.py | {
"start": 1590,
"end": 2417
} | class ____(FlaskAppMixin, OAuth1Mixin, BaseApp):
client_cls = OAuth1Session
def authorize_access_token(self, **kwargs):
"""Fetch access token in one step.
:return: A token dict.
"""
params = request.args.to_dict(flat=True)
state = params.get("oauth_token")
if not state:
raise OAuthError(description='Missing "oauth_token" parameter')
data = self.framework.get_state_data(session, state)
if not data:
raise OAuthError(description='Missing "request_token" in temporary data')
params["request_token"] = data["request_token"]
params.update(kwargs)
self.framework.clear_state_data(session, state)
token = self.fetch_access_token(**params)
self.token = token
return token
| FlaskOAuth1App |
python | pytorch__pytorch | test/jit/test_device_analysis.py | {
"start": 395,
"end": 11473
} | class ____(JitTestCase):
@classmethod
def setUpClass(cls):
cls.cpu = torch.device("cpu")
cls.cuda = torch.device("cuda")
cls.vulkan = torch.device("vulkan")
cls.mkldnn = torch.device(
"mkldnn"
) # MKLDNN can't mix with other device types at all
cls.device_types = [cls.cpu, cls.cuda, cls.vulkan]
@staticmethod
def node_output_device(graph):
graph_out = list(graph.outputs())
assert len(graph_out) == 1
return graph_out[0].type().device()
def prop_device_on_graph(self, graph, example_devices, in_shapes=None):
graph_inputs = list(graph.inputs())
torch._C._jit_pass_erase_shape_information(graph)
self.assertEqual(len(graph_inputs), len(example_devices))
for graph_i, device_i in zip(graph_inputs, example_devices):
if device_i is not None:
graph_i.setType(graph_i.type().with_device(device_i))
if in_shapes:
for graph_i, shapes_i in zip(graph_inputs, in_shapes):
if shapes_i is not None:
graph_i.setType(graph_i.type().with_sizes(shapes_i))
torch._C._jit_pass_propagate_shapes_on_graph(graph)
torch._C._jit_pass_propagate_device(graph)
def assert_device_equal(
self, fn, in_devices, expected_device, in_shapes=None, subtest_str=""
):
with self.subTest(
f"In device: {in_devices}, expected: {expected_device}, \n {subtest_str}"
):
graph = torch.jit.script(fn).graph
self.prop_device_on_graph(graph, in_devices, in_shapes)
actual_device = self.node_output_device(graph)
if expected_device is None or actual_device is None:
self.assertEqual(actual_device, expected_device)
else:
self.assertEqual(
actual_device.type, expected_device.type, "Failed Verification"
)
def test_device_apply(self):
# Test if the device is properly applied to the input
def add_self(x):
return x + x
graph = torch.jit.script(add_self).graph
graph_input = next(graph.inputs())
graph_input.setType(graph_input.type().with_device(self.cpu))
# self.prop_device_on_graph(graph, [self.cpu])
self.assertEqual(graph_input.type().device(), self.cpu)
@unittest.skipIf(models is None, "Requires torchvision")
def test_mobilenet(self):
in_cpu = torch.randn(1, 3, 224, 224, device=self.cpu)
in_example = in_cpu
expected_device = self.cpu
m = torch.jit.script(models.mobilenet_v3_small())
m.eval()
graph = torch.jit.freeze(m).graph
# torch._C._jit_pass_erase_shape_information(graph)
apply_input_props_using_example(graph, in_example)
torch._C._jit_pass_propagate_shapes_on_graph(graph)
torch._C._jit_pass_propagate_device(graph)
actual_device = self.node_output_device(graph)
if expected_device is None or actual_device is None:
self.assertEqual(actual_device, expected_device)
else:
self.assertEqual(
actual_device.type, expected_device.type, "Failed Verification"
)
def test_simple(self):
def add_self(x):
return x + x
def relu_(x):
return torch.nn.functional.relu_(x)
functions = [add_self, relu_]
for in_device, fn in product(self.device_types, functions):
self.assert_device_equal(fn, [in_device], in_device)
def test_set_dtype(self):
def set_device(x):
return x.to("cpu")
for in_device in self.device_types:
self.assert_device_equal(set_device, [in_device], self.cpu)
def test_device_arg(self):
# Test that no device gets propagated when arg is passed in
def set_device(x, device_name: torch.device):
return x.to(device=device_name)
for in_device in self.device_types:
self.assert_device_equal(set_device, [in_device, None], None)
def test_tensor_as_fns(self):
def view_as_fn(x, y):
return x.view_as(y)
def expand_as_fn(x, y):
return x.expand_as(y)
def reshape_as_fn(x, y):
return x.reshape_as(y)
for test_fn in [view_as_fn, expand_as_fn, reshape_as_fn]:
self.assert_device_equal(test_fn, [self.cpu, self.cpu], self.cpu)
self.assert_device_equal(test_fn, [self.cuda, None], self.cuda)
self.assert_device_equal(test_fn, [None, self.mkldnn], None)
def type_as_fn(x, y):
return x.type_as(y)
self.assert_device_equal(type_as_fn, [self.cpu, self.cpu], self.cpu)
self.assert_device_equal(type_as_fn, [self.cuda, None], None)
self.assert_device_equal(type_as_fn, [None, self.mkldnn], self.mkldnn)
def zerodim_test_core(self, device_pairs):
# Test the support of zerodim tensors with non-zerodim tensors
def mul(x, y):
return x * y
def add(x, y):
return x + y
fns = [mul, add]
input_shapes = [
((1, 2, 2), (2, 2)), # Different dim, non-zerodim
((1, 2, 2), ()), # one zerodim
((), ()), # both zerodim
]
for fn, shapes, devices in product(fns, input_shapes, device_pairs):
subtest_str = f"{fn.__name__} \n shapes: {shapes}, \n devices: {devices}"
in0 = torch.rand(shapes[0], device=devices[0])
in1 = torch.rand(shapes[1], device=devices[1])
try:
out = fn(in0, in1)
except Exception as e:
# Don't expect eager failures for CPU zerodim tensors
for i in range(len(devices)):
if shapes[i] == () and devices[i] == self.cpu:
raise e
# only expect eager failures on different devices
if devices[0] == devices[1]:
raise e
# Expect result device to be None for the failure cases.
self.assert_device_equal(fn, devices, None, shapes, subtest_str)
continue
self.assert_device_equal(fn, devices, out.device, shapes, subtest_str)
# Test that without shapes, we either get the same device or None for the device
# Aka that the code is convservative for tensor shapes.
graph = torch.jit.script(fn).graph
self.prop_device_on_graph(graph, devices)
actual_device = self.node_output_device(graph)
self.assertTrue(
(actual_device is None) or (actual_device.type == out.device.type)
)
def test_zerodim_cpu(self):
# Allow for minimal testing locally
self.zerodim_test_core([(self.cpu, self.cpu)])
def test_zerodim_no_device(self):
# If device is missing, you should never be able to infer device type.
def mul(x, y):
return x * y
def add(x, y):
return x + y
fns = [mul, add]
device_pairs = [
(self.cpu, None),
(None, self.cpu),
(None, None),
]
input_shapes = [
((1, 2, 2), (2, 2)), # Different dim, non-zerodim
((1, 2, 2), ()), # one zerodim
((), ()), # both zerodim
]
for fn, shapes, devices in product(fns, input_shapes, device_pairs):
self.assert_device_equal(fn, devices, None, shapes)
@unittest.skipIf(not TEST_CUDA, "No CUDA")
def test_zerodim_gpu(self):
device_pairs = [
(self.cpu, self.cuda),
(self.cuda, self.cpu),
(self.cuda, self.cuda),
]
self.zerodim_test_core(device_pairs)
def test_custom_device_op(self):
# Test both of the custom functions and check that the devicetype is
# correctly applied
def set_cuda(x):
return x.cuda()
def set_cpu(x):
return x.cpu()
def set_mkldnn(x):
return x.to_mkldnn()
device_pairs = (
(set_cuda, self.cuda),
(set_cpu, self.cpu),
(set_mkldnn, self.mkldnn),
)
for fn, out_device in device_pairs:
for in_device in self.device_types:
self.assert_device_equal(fn, [in_device], out_device)
def test_device_if_propagation(self):
def test_fn(x, y, z: bool):
if z:
return x + 3
else:
return y * 2
self.assert_device_equal(test_fn, [self.cpu, self.cpu, None], self.cpu)
self.assert_device_equal(test_fn, [self.mkldnn, self.mkldnn, None], self.mkldnn)
self.assert_device_equal(test_fn, [self.cpu, self.cuda, None], None)
def test_loop_simple(self):
def test_fn(x, y, z: int):
for _ in range(z):
y = x
return y
self.assert_device_equal(test_fn, [self.cpu, self.cpu, None], self.cpu)
self.assert_device_equal(test_fn, [self.cpu, self.cuda, None], None)
self.assert_device_equal(test_fn, [self.cpu, None, None], None)
def test_loop_device_change(self):
def test_fn(x, z: int):
for _ in range(z):
x = x.cuda()
return x
self.assert_device_equal(test_fn, [self.cpu, None], None)
self.assert_device_equal(test_fn, [self.cuda, None], self.cuda)
self.assert_device_equal(test_fn, [None, None], None)
def test_while_change(self):
def test_fn(x, z: int):
while z > 0:
x = x.cuda()
z = 0
return x
self.assert_device_equal(test_fn, [self.cpu, None], None)
self.assert_device_equal(test_fn, [self.cuda, None], self.cuda)
self.assert_device_equal(test_fn, [None, None], None)
def test_nested_loops(self):
def test_fn(x, z: int):
for i in range(z):
x = x.cpu()
for _ in range(i):
x = x + 1
return x
self.assert_device_equal(test_fn, [self.cpu, None], self.cpu)
self.assert_device_equal(test_fn, [self.cuda, None], None)
self.assert_device_equal(test_fn, [None, None], None)
def test_if_loop_mix(self):
def test_fn(x, y, z: bool, a: bool):
c = x
while a:
if z:
c = x + 3
else:
c = y * 2
a = False
return c
self.assert_device_equal(test_fn, [self.cpu, self.cpu, None, None], self.cpu)
self.assert_device_equal(
test_fn, [self.mkldnn, self.mkldnn, None, None], self.mkldnn
)
self.assert_device_equal(test_fn, [self.cpu, self.cuda, None, None], None)
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
| TestDeviceAnalysis |
python | python-pillow__Pillow | src/PIL/ExifTags.py | {
"start": 8197,
"end": 9126
} | class ____(IntEnum):
GPSVersionID = 0x00
GPSLatitudeRef = 0x01
GPSLatitude = 0x02
GPSLongitudeRef = 0x03
GPSLongitude = 0x04
GPSAltitudeRef = 0x05
GPSAltitude = 0x06
GPSTimeStamp = 0x07
GPSSatellites = 0x08
GPSStatus = 0x09
GPSMeasureMode = 0x0A
GPSDOP = 0x0B
GPSSpeedRef = 0x0C
GPSSpeed = 0x0D
GPSTrackRef = 0x0E
GPSTrack = 0x0F
GPSImgDirectionRef = 0x10
GPSImgDirection = 0x11
GPSMapDatum = 0x12
GPSDestLatitudeRef = 0x13
GPSDestLatitude = 0x14
GPSDestLongitudeRef = 0x15
GPSDestLongitude = 0x16
GPSDestBearingRef = 0x17
GPSDestBearing = 0x18
GPSDestDistanceRef = 0x19
GPSDestDistance = 0x1A
GPSProcessingMethod = 0x1B
GPSAreaInformation = 0x1C
GPSDateStamp = 0x1D
GPSDifferential = 0x1E
GPSHPositioningError = 0x1F
"""Maps EXIF GPS tags to tag names."""
GPSTAGS = {i.value: i.name for i in GPS}
| GPS |
python | jschneier__django-storages | tests/test_s3.py | {
"start": 37685,
"end": 39308
} | class ____(TestCase):
def setUp(self) -> None:
self.storage = s3.S3Storage()
self.storage._connections.connection = mock.MagicMock()
def test_loading_ssec(self):
params = {"SSECustomerKey": "xyz", "CacheControl": "never"}
self.storage.get_object_parameters = lambda name: params
filtered = {"SSECustomerKey": "xyz"}
f = s3.S3File("test", "r", self.storage)
f.obj.load.assert_called_once_with(**filtered)
f.file
f.obj.download_fileobj.assert_called_once_with(
mock.ANY, ExtraArgs=filtered, Config=self.storage.transfer_config
)
def test_closed(self):
with s3.S3File("test", "wb", self.storage) as f:
with self.subTest("after init"):
self.assertFalse(f.closed)
with self.subTest("after file access"):
# Ensure _get_file has been called
f.file
self.assertFalse(f.closed)
with self.subTest("after close"):
f.close()
self.assertTrue(f.closed)
with self.subTest("reopening"):
f.file
self.assertFalse(f.closed)
def test_reopening(self):
f = s3.S3File("test", "wb", self.storage)
with f.open() as fp:
fp.write(b"xyz")
with f.open() as fp:
fp.write(b"xyz")
# Properties are reset
self.assertEqual(f._write_counter, 0)
self.assertEqual(f._raw_bytes_written, 0)
self.assertFalse(f._is_dirty)
self.assertIsNone(f._multipart)
@mock_aws
| S3FileTests |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.