language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
getsentry__sentry
|
src/sentry/utils/function_cache.py
|
{
"start": 4279,
"end": 5636
}
|
class ____(Generic[*Ts, R]):
"""
A callable class that wraps a function with caching capabilities and provides a batch method
for processing multiple sets of arguments efficiently.
"""
def __init__(self, func: Callable[[*Ts], R], cache_ttl: timedelta):
self.func = func
self.cache_ttl = cache_ttl
update_wrapper(self, func)
def __call__(self, *args: *Ts) -> R:
cache_key = cache_key_for_cached_func(self.func, *args)
cached_val = cache.get(cache_key, None)
if cached_val is None:
cached_val = self.func(*args)
cache.set(cache_key, cached_val, timeout=self.cache_ttl.total_seconds())
return cached_val
def batch(self, args_list: Sequence[tuple[*Ts]]) -> list[R]:
cache_keys = [cache_key_for_cached_func(self.func, *args) for args in args_list]
values = cache.get_many(cache_keys)
missing_keys = {ck: args for ck, args in zip(cache_keys, args_list) if ck not in values}
to_cache = {}
for cache_key, args in missing_keys.items():
result = self.func(*args)
values[cache_key] = result
to_cache[cache_key] = result
if to_cache:
cache.set_many(to_cache, timeout=self.cache_ttl.total_seconds())
return [values[ck] for ck in cache_keys]
|
CachedFunction
|
python
|
PrefectHQ__prefect
|
src/prefect/blocks/notifications.py
|
{
"start": 22663,
"end": 26259
}
|
class ____(AbstractAppriseNotificationBlock):
"""
Enables sending notifications via a provided Discord webhook.
See [Apprise notify_Discord docs](https://github.com/caronc/apprise/wiki/Notify_Discord) # noqa
Examples:
Load a saved Discord webhook and send a message:
```python
from prefect.blocks.notifications import DiscordWebhook
discord_webhook_block = DiscordWebhook.load("BLOCK_NAME")
discord_webhook_block.notify("Hello from Prefect!")
```
"""
_description = "Enables sending notifications via a provided Discord webhook."
_block_type_name = "Discord Webhook"
_block_type_slug = "discord-webhook"
_logo_url = HttpUrl(
"https://cdn.sanity.io/images/3ugk85nk/production/9e94976c80ef925b66d24e5d14f0d47baa6b8f88-250x250.png"
)
_documentation_url = HttpUrl(
"https://docs.prefect.io/latest/automate/events/automations-triggers#sending-notifications-with-automations"
)
webhook_id: SecretStr = Field(
default=...,
description=(
"The first part of 2 tokens provided to you after creating a"
" incoming-webhook."
),
)
webhook_token: SecretStr = Field(
default=...,
description=(
"The second part of 2 tokens provided to you after creating a"
" incoming-webhook."
),
)
botname: Optional[str] = Field(
title="Bot name",
default=None,
description=(
"Identify the name of the bot that should issue the message. If one isn't"
" specified then the default is to just use your account (associated with"
" the incoming-webhook)."
),
)
tts: bool = Field(
default=False,
description="Whether to enable Text-To-Speech.",
)
include_image: bool = Field(
default=False,
description=(
"Whether to include an image in-line with the message describing the"
" notification type."
),
)
avatar: bool = Field(
default=False,
description="Whether to override the default discord avatar icon.",
)
avatar_url: Optional[str] = Field(
title="Avatar URL",
default=None,
description=(
"Over-ride the default discord avatar icon URL. By default this is not set"
" and Apprise chooses the URL dynamically based on the type of message"
" (info, success, warning, or error)."
),
)
def block_initialization(self) -> None:
try:
# Try importing for apprise>=1.18.0
from apprise.plugins.discord import NotifyDiscord
except ImportError:
# Fallback for versions apprise<1.18.0
from apprise.plugins.NotifyDiscord import ( # pyright: ignore[reportMissingImports] this is a fallback
NotifyDiscord, # pyright: ignore[reportUnknownVariableType] incomplete type hints in apprise
)
url = SecretStr(
NotifyDiscord(
webhook_id=self.webhook_id.get_secret_value(),
webhook_token=self.webhook_token.get_secret_value(),
botname=self.botname,
tts=self.tts,
include_image=self.include_image,
avatar=self.avatar,
avatar_url=self.avatar_url,
).url() # pyright: ignore[reportUnknownMemberType, reportUnknownArgumentType] incomplete type hints in apprise
)
self._start_apprise_client(url)
|
DiscordWebhook
|
python
|
google__jax
|
tests/sparse_bcoo_bcsr_test.py
|
{
"start": 72026,
"end": 79038
}
|
class ____(sptu.SparseTestCase):
@jtu.sample_product(
[
dict(shape=shape, n_batch=layout.n_batch, n_dense=layout.n_dense)
for shape in [(5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]
for layout in sptu.iter_bcsr_layouts(shape)
],
dtype=all_dtypes,
)
def test_bcsr_dense_round_trip(self, shape, dtype, n_batch, n_dense):
n_sparse = len(shape) - n_batch - n_dense
rng = sptu.rand_sparse(self.rng())
M = rng(shape, dtype)
nse = sparse.util._count_stored_elements(M, n_batch=n_batch, n_dense=n_dense)
def round_trip(M):
return sparse.BCSR.fromdense(M, nse=nse, n_batch=n_batch, n_dense=n_dense).todense()
args_maker = lambda: [M]
ident = lambda x: x
self._CheckAgainstNumpy(ident, round_trip, args_maker)
self._CompileAndCheck(round_trip, args_maker)
self._CheckBatchingSparse(ident, round_trip, args_maker, bdims=self._random_bdims(n_batch))
if jnp.issubdtype(dtype, jnp.floating):
# For n_sparse != 0, we can't use an identity because output zeros must not
# be dependent on input zeros. This mimics the code in count_stored_elements().
def expected(M):
if n_sparse == 0: return M
mask = (M != 0).any(range(M.ndim - n_dense, M.ndim), keepdims=True)
return jnp.where(mask, M, 0)
self._CheckGradsSparse(expected, round_trip, args_maker)
@jtu.sample_product(
[
dict(shape=shape, n_batch=n_batch)
for shape in [(5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]
for n_batch in range(len(shape) - 1)
],
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
)
def test_bcsr_bcoo_round_trip(self, shape, n_batch, dtype):
n_sparse = 2
n_dense = len(shape) - n_sparse - n_batch
rng = self.rng()
sprng = sptu.rand_bcsr(rng, n_batch=n_batch, n_dense=n_dense)
M_bcsr = sprng(shape, dtype)
self.assertIsInstance(M_bcsr, sparse.BCSR)
M_dense = M_bcsr.todense()
M_bcoo = M_bcsr.to_bcoo()
self.assertIsInstance(M_bcoo, sparse.BCOO)
self.assertAllClose(M_dense, M_bcoo.todense())
M_bcsr2 = sparse.BCSR.from_bcoo(M_bcoo)
self.assertAllClose(M_dense, M_bcsr2.todense())
self.assertArraysEqual(M_bcsr.indptr, M_bcsr2.indptr)
# TODO(jakevdp): This will only be true in general when M_bcsr.indices is sorted.
# self.assertSparseArraysEquivalent(M_bcsr, M_bcsr2)
@jtu.sample_product(
[
dict(shape=shape, n_batch=n_batch)
for shape in [(5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]
for n_batch in range(len(shape) - 1)
],
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
)
def test_bcsr_extract(self, shape, dtype, n_batch):
n_dense = len(shape) - n_batch - 2
rng = sptu.rand_sparse(self.rng())
M = rng(shape, dtype)
nse = sparse.util._count_stored_elements(M, n_batch=n_batch,
n_dense=n_dense)
data, indices, indptr = sparse_bcsr._bcsr_fromdense(
M, nse=nse, n_batch=n_batch, n_dense=n_dense)
data2 = sparse.bcsr_extract(indices, indptr, M)
self.assertArraysEqual(data, data2)
args_maker_bcsr_extract = lambda: [indices, indptr, M]
self._CompileAndCheck(sparse.bcsr_extract, args_maker_bcsr_extract)
@jtu.sample_product(
props=_generate_batched_dot_general_properties(
shapes=((2, 3), (2, 3, 4), (2, 3, 4, 4)), sparse_format="bcsr"
),
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
)
@jax.default_matmul_precision("float32")
def test_bcsr_dot_general(
self, dtype: np.dtype, props: sptu.BatchedDotGeneralProperties
):
rng = jtu.rand_default(self.rng())
sprng = sptu.rand_bcsr(self.rng(), n_batch=props.n_batch, n_dense=props.n_dense)
args_maker = lambda: [sprng(props.lhs_shape, dtype),
rng(props.rhs_shape, dtype)]
dense_fun = partial(lax.dot_general,
dimension_numbers=props.dimension_numbers)
sparse_fun = partial(sparse.bcsr_dot_general,
dimension_numbers=props.dimension_numbers)
tol = {np.float64: 1E-12, np.complex128: 1E-12,
np.float32: 1E-5, np.complex64: 1E-5}
self._CheckAgainstDense(dense_fun, sparse_fun, args_maker, tol=tol)
if jnp.issubdtype(dtype, jnp.floating) and props.n_dense == 0:
# Dense dimensions not yet fully supported in reverse mode.
modes = ['fwd'] if props.n_dense != 0 else ['fwd', 'rev']
self._CheckGradsSparse(dense_fun, sparse_fun, args_maker, modes=modes, atol=tol, rtol=tol)
self._CheckBatchingSparse(dense_fun, sparse_fun, args_maker, atol=tol, rtol=tol,
bdims=self._random_bdims(props.n_batch, len(props.rhs_shape)))
@jtu.sample_product(
[
dict(shape=shape, n_batch=layout.n_batch, n_dense=layout.n_dense)
for shape in [(3, 5), (3, 5, 4)]
for layout in sptu.iter_bcsr_layouts(shape)
],
dtype=all_dtypes,
)
def test_bcsr_broadcast_in_dim(self, shape, dtype, n_batch, n_dense):
rng = sptu.rand_sparse(self.rng())
x = jnp.array(rng(shape, dtype))
xsp = sparse.BCSR.fromdense(x, n_batch=n_batch, n_dense=n_dense)
self.assertEqual(xsp[None].n_batch, xsp.n_batch + 1)
self.assertArraysEqual(xsp[None].todense(), x[None])
if n_batch == 1:
self.assertEqual(xsp[:, None].n_batch, xsp.n_batch + 1)
self.assertArraysEqual(xsp[:, None].todense(), x[:, None])
@jtu.sample_product(
[
dict(
shape=shape,
n_batch=layout.n_batch,
n_dense=layout.n_dense,
dimension=dimension,
)
for shape in [(3, 5), (3, 5, 4)]
for layout in sptu.iter_sparse_layouts(shape)
for dimension in range(
len(shape) - layout.n_dense
) # Concatenation of dense dimensions not implemented.
],
dtype=all_dtypes,
)
def test_bcsr_concatenate(self, shape, dtype, n_batch, n_dense, dimension):
sprng = sptu.rand_bcoo(self.rng(), n_batch=n_batch, n_dense=n_dense)
args_maker = lambda: [[sprng(shape, dtype) for i in range(3)]]
dense_func = partial(lax.concatenate, dimension=dimension)
sparse_func = partial(sparse.bcoo_concatenate, dimension=dimension)
self._CheckAgainstDense(dense_func, sparse_func, args_maker)
if jnp.issubdtype(dtype, jnp.floating):
self._CheckGradsSparse(dense_func, sparse_func, args_maker)
def test_bcoo_spdot_abstract_eval_bug(self):
# Regression test for https://github.com/jax-ml/jax/issues/21921
lhs = sparse.BCOO(
(jnp.float32([[1]]), lax.broadcasted_iota(jnp.int32, (10, 1, 1), 0)),
shape=(10, 10))
rhs = sparse.BCOO(
(jnp.float32([1]), jnp.int32([[3]])),
shape=(10,))
args_maker = lambda: [lhs, rhs]
def func(lhs, rhs):
return (lhs @ rhs).todense()
self._CompileAndCheck(func, args_maker)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
BCSRTest
|
python
|
getsentry__sentry
|
src/sentry/monitors/consumers/incident_occurrences_consumer.py
|
{
"start": 5875,
"end": 6296
}
|
class ____(ProcessingStrategyFactory[KafkaPayload]):
def __init__(self) -> None:
pass
def create_with_partitions(
self,
commit: Commit,
partitions: Mapping[Partition, int],
) -> ProcessingStrategy[KafkaPayload]:
return RunTask(
function=process_incident_occurrence,
next_step=CommitOffsets(commit),
)
|
MonitorIncidentOccurenceStrategyFactory
|
python
|
ray-project__ray
|
python/ray/llm/_internal/batch/stages/vllm_engine_stage.py
|
{
"start": 21604,
"end": 25968
}
|
class ____(StatefulStage):
"""
A stage that runs vLLM engine.
"""
fn: Type[StatefulStageUDF] = vLLMEngineStageUDF
@root_validator(pre=True)
def post_init(cls, values):
"""Post-initialize the stage. Specifically,
this function determines the num_gpus and Ray remote args
for the .map_batches() call in this stage.
Args:
values: The raw stage values.
Returns:
The updated values.
"""
map_batches_kwargs = values["map_batches_kwargs"]
accelerator_type = map_batches_kwargs.get("accelerator_type", "")
fn_constructor_kwargs = values["fn_constructor_kwargs"]
engine_kwargs = fn_constructor_kwargs.get("engine_kwargs", {})
ray_remote_args = {}
if accelerator_type:
ray_remote_args["accelerator_type"] = accelerator_type
# Setup num_workers required per vLLM engine.
tp_size = engine_kwargs.get("tensor_parallel_size", 1)
pp_size = engine_kwargs.get("pipeline_parallel_size", 1)
num_bundles_per_replica = tp_size * pp_size
# Use the MP backend by default.
engine_kwargs.setdefault("distributed_executor_backend", "mp")
executor_backend = engine_kwargs.get("distributed_executor_backend")
# When Ray is used in the vLLM engine, we set num_devices to 0 so that
# Ray Data won't reserve GPUs in advance. Instead, we specify scheduling
# strategy in .map_batches() arguments and let vLLM Ray executor to
# create placement groups for each TP/PP worker.
placement_group_config = fn_constructor_kwargs.pop(
"placement_group_config", None
)
if executor_backend == "ray":
# Note that we have to use partial() to pass a function
# instead of an object.
map_batches_kwargs["ray_remote_args_fn"] = partial(
_ray_scheduling_strategy_fn,
num_bundles_per_replica,
accelerator_type,
placement_group_config,
)
ray_remote_args["num_gpus"] = 0
else:
if not placement_group_config:
# Default to GPUs per bundle if placement group is not specified.
ray_remote_args["num_gpus"] = num_bundles_per_replica
else:
bundles = placement_group_config["bundles"]
resource_counter = Counter()
for bundle in bundles:
resource_counter.update(bundle)
total_cpus = resource_counter.pop("CPU", 0)
total_gpus = resource_counter.pop("GPU", 0)
# Ray Data expects CPU/GPU to be specified via num_cpus/num_gpus,
# not inside the resources dict.
if total_cpus:
ray_remote_args["num_cpus"] = total_cpus
if total_gpus:
ray_remote_args["num_gpus"] = total_gpus
# Keep only non-CPU/GPU custom resources, if any.
if resource_counter:
ray_remote_args["resources"] = dict(resource_counter)
map_batches_kwargs.update(ray_remote_args)
return values
def get_required_input_keys(self) -> Dict[str, str]:
"""The required input keys of the stage and their descriptions."""
ret = {"prompt": "The text prompt (str)."}
task_type = self.fn_constructor_kwargs.get("task_type", vLLMTaskType.GENERATE)
if task_type == vLLMTaskType.GENERATE:
ret["sampling_params"] = (
"The sampling parameters. See "
"https://docs.vllm.ai/en/latest/api/inference_params.html#sampling-parameters "
"for details."
)
return ret
def get_optional_input_keys(self) -> Dict[str, str]:
"""The optional input keys of the stage and their descriptions."""
return {
"tokenized_prompt": "The tokenized prompt. If provided, the prompt will not be tokenized by the vLLM engine.",
"image": "The image(s) for multimodal input. Accepts a single image or list of images.",
"model": "The model to use for this request. If the model is different from the "
"model set in the stage, then this is a LoRA request.",
}
|
vLLMEngineStage
|
python
|
huggingface__transformers
|
examples/modular-transformers/modeling_test_detr.py
|
{
"start": 38673,
"end": 45341
}
|
class ____(TestDetrPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* deformable attention layers. Each layer is a
[`TestDetrEncoderLayer`].
The encoder updates the flattened multi-scale feature maps through multiple deformable attention layers.
Args:
config: TestDetrConfig
"""
def __init__(self, config: TestDetrConfig):
super().__init__(config)
self.gradient_checkpointing = False
self.dropout = config.dropout
self.layers = nn.ModuleList([TestDetrEncoderLayer(config) for _ in range(config.encoder_layers)])
# Initialize weights and apply final processing
self.post_init()
@staticmethod
def get_reference_points(spatial_shapes, valid_ratios, device):
"""
Get reference points for each feature map. Used in decoder.
Args:
spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`):
Spatial shapes of each feature map.
valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):
Valid ratios of each feature map.
device (`torch.device`):
Device on which to create the tensors.
Returns:
`torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)`
"""
reference_points_list = []
for level, (height, width) in enumerate(spatial_shapes):
ref_y, ref_x = meshgrid(
torch.linspace(0.5, height - 0.5, height, dtype=valid_ratios.dtype, device=device),
torch.linspace(0.5, width - 0.5, width, dtype=valid_ratios.dtype, device=device),
indexing="ij",
)
# TODO: valid_ratios could be useless here. check https://github.com/fundamentalvision/Deformable-DETR/issues/36
ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, level, 1] * height)
ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, level, 0] * width)
ref = torch.stack((ref_x, ref_y), -1)
reference_points_list.append(ref)
reference_points = torch.cat(reference_points_list, 1)
reference_points = reference_points[:, :, None] * valid_ratios[:, None]
return reference_points
def forward(
self,
inputs_embeds=None,
attention_mask=None,
position_embeddings=None,
spatial_shapes=None,
spatial_shapes_list=None,
level_start_index=None,
valid_ratios=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Flattened feature map (output of the backbone + projection layer) that is passed to the encoder.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`:
- 1 for pixel features that are real (i.e. **not masked**),
- 0 for pixel features that are padding (i.e. **masked**).
[What are attention masks?](../glossary#attention-mask)
position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Position embeddings that are added to the queries and keys in each self-attention layer.
spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`):
Spatial shapes of each feature map.
level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`):
Starting index of each feature map.
valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):
Ratio of valid area in each feature level.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = inputs_embeds
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
spatial_shapes_tuple = tuple(spatial_shapes_list)
reference_points = self.get_reference_points(spatial_shapes_tuple, valid_ratios, device=inputs_embeds.device)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
position_embeddings=position_embeddings,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
spatial_shapes_list=spatial_shapes_list,
level_start_index=level_start_index,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=encoder_states,
attentions=all_attentions,
)
def inverse_sigmoid(x, eps=1e-5):
x = x.clamp(min=0, max=1)
x1 = x.clamp(min=eps)
x2 = (1 - x).clamp(min=eps)
return torch.log(x1 / x2)
|
TestDetrEncoder
|
python
|
getsentry__sentry
|
src/sentry/grouping/fingerprinting/rules.py
|
{
"start": 414,
"end": 530
}
|
class ____(NamedTuple):
fingerprint: list[str]
attributes: FingerprintRuleAttributes
|
FingerprintWithAttributes
|
python
|
huggingface__transformers
|
tests/models/imagegpt/test_modeling_imagegpt.py
|
{
"start": 1455,
"end": 8109
}
|
class ____:
def __init__(
self,
parent,
batch_size=14,
seq_length=7,
is_training=True,
use_token_type_ids=True,
use_input_mask=True,
use_labels=True,
use_mc_token_ids=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_token_type_ids = use_token_type_ids
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.use_mc_token_ids = use_mc_token_ids
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = None
def prepare_config_and_inputs(
self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False
):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 1)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
mc_token_ids = None
if self.use_mc_token_ids:
mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config(
gradient_checkpointing=gradient_checkpointing,
scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx,
reorder_and_upcast_attn=reorder_and_upcast_attn,
)
return (
config,
input_ids,
input_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def get_config(
self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False
):
return ImageGPTConfig(
vocab_size=self.vocab_size,
n_embd=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
n_inner=self.intermediate_size,
activation_function=self.hidden_act,
resid_pdrop=self.hidden_dropout_prob,
attn_pdrop=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
use_cache=True,
gradient_checkpointing=gradient_checkpointing,
scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx,
reorder_and_upcast_attn=reorder_and_upcast_attn,
)
def get_pipeline_config(self):
config = self.get_config()
config.vocab_size = 513
config.max_position_embeddings = 1024
return config
def create_and_check_imagegpt_model(self, config, input_ids, input_mask, token_type_ids, *args):
model = ImageGPTModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values), config.n_layer)
def create_and_check_lm_head_model(self, config, input_ids, input_mask, token_type_ids, *args):
model = ImageGPTForCausalImageModeling(config)
model.to(torch_device)
model.eval()
labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 1)
result = model(input_ids, token_type_ids=token_type_ids, labels=labels)
self.parent.assertEqual(result.loss.shape, ())
# ImageGPTForCausalImageModeling doesn't have tied input- and output embeddings
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size - 1))
def create_and_check_imagegpt_for_image_classification(
self, config, input_ids, input_mask, token_type_ids, mc_token_ids, sequence_labels, *args
):
config.num_labels = self.num_labels
model = ImageGPTForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
return config, inputs_dict
@require_torch
|
ImageGPTModelTester
|
python
|
Lightning-AI__lightning
|
tests/parity_pytorch/models.py
|
{
"start": 1028,
"end": 2362
}
|
class ____(LightningModule):
def __init__(self, backbone="resnet101", hidden_dim=1024, learning_rate=1e-3, weights="DEFAULT"):
super().__init__()
self.save_hyperparameters()
self.learning_rate = learning_rate
self.num_classes = 10
self.backbone = get_torchvision_model(backbone, weights=weights)
self.classifier = torch.nn.Sequential(
torch.nn.Linear(1000, hidden_dim), torch.nn.Linear(hidden_dim, self.num_classes)
)
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
self._loss = [] # needed for checking if the loss is the same as vanilla torch
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self.backbone(x)
y_hat = self.classifier(y_hat)
loss = F.cross_entropy(y_hat, y)
self._loss.append(loss.item())
return {"loss": loss}
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.learning_rate)
def train_dataloader(self):
return DataLoader(
CIFAR10(root=_PATH_DATASETS, train=True, download=True, transform=self.transform),
batch_size=32,
num_workers=1,
)
|
ParityModuleCIFAR
|
python
|
huggingface__transformers
|
tests/models/exaone4/test_modeling_exaone4.py
|
{
"start": 1518,
"end": 8576
}
|
class ____(unittest.TestCase):
TEST_MODEL_ID = "LGAI-EXAONE/EXAONE-4.0-32B"
def setUp(self):
cleanup(torch_device, gc_collect=True)
def tearDown(self):
# TODO (joao): automatic compilation, i.e. compilation when `cache_implementation="static"` is used, leaves
# some memory allocated in the cache, which means some object is not being released properly. This causes some
# unoptimal memory usage, e.g. after certain teruff format examples tests src utilssts a 7B model in FP16 no longer fits in a 24GB GPU.
# Investigate the root cause.
cleanup(torch_device, gc_collect=True)
@slow
def test_model_logits(self):
input_ids = [405, 7584, 79579, 76636, 2907, 94640, 373]
model = Exaone4ForCausalLM.from_pretrained(
self.TEST_MODEL_ID,
device_map="auto",
dtype=torch.bfloat16,
)
input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device)
with torch.no_grad():
out = model(input_ids).logits.float().cpu()
EXPECTED_MEAN = torch.tensor([[22.1993, 8.5845, 10.0401, 12.4262, 9.3112, 29.7933, 8.2628]])
EXPECTED_SLICE = torch.tensor(
[20.6250, 19.6250, 14.5000, 21.1250, 24.5000, 22.1250, 24.0000, 24.8750, 25.0000, 25.3750]
)
torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, atol=1e-2, rtol=1e-2)
torch.testing.assert_close(out[0, 0, :10], EXPECTED_SLICE, atol=1e-4, rtol=1e-4)
@slow
def test_model_generation_eager(self):
EXPECTED_TEXT = "Tell me about the Miracle on the Han river.\n\nOkay, the Miracle on the Han River refers to the rapid industrialization and economic growth of South"
prompt = "Tell me about the Miracle on the Han river."
tokenizer = AutoTokenizer.from_pretrained(self.TEST_MODEL_ID)
model = Exaone4ForCausalLM.from_pretrained(
self.TEST_MODEL_ID, device_map="auto", dtype=torch.bfloat16, attn_implementation="eager"
)
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
# greedy generation outputs
generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT, text)
@slow
def test_model_generation_sdpa(self):
EXPECTED_TEXT = "Tell me about the Miracle on the Han river.\n\nOkay, the Miracle on the Han River refers to the rapid industrialization and economic growth of South"
prompt = "Tell me about the Miracle on the Han river."
tokenizer = AutoTokenizer.from_pretrained(self.TEST_MODEL_ID)
model = Exaone4ForCausalLM.from_pretrained(
self.TEST_MODEL_ID, device_map="auto", dtype=torch.bfloat16, attn_implementation="sdpa"
)
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
# greedy generation outputs
generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT, text)
@pytest.mark.flash_attn_test
@slow
@require_torch_accelerator
@require_flash_attn
def test_model_generation_long_flash(self):
EXPECTED_OUTPUT_TOKEN_IDS = [433, 9055]
input_ids = [433, 9055] * 2048
model = Exaone4ForCausalLM.from_pretrained(
self.TEST_MODEL_ID, device_map="auto", dtype=torch.bfloat16, attn_implementation="flash_attention_2"
)
input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device)
generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0)
self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist())
@slow
@require_torch_accelerator
def test_model_generation_beyond_sliding_window(self):
EXPECTED_TEXT_COMPLETION = " This is a nice place. I really enjoy the scenery, and the atmosphere is so relaxing. I'm grateful for the opportunity to experience this place. It"
tokenizer = AutoTokenizer.from_pretrained(self.TEST_MODEL_ID)
prompt = "This is a nice place. " * 700 + "I really enjoy the scenery,"
model = Exaone4ForCausalLM.from_pretrained(
self.TEST_MODEL_ID, device_map="auto", dtype=torch.bfloat16, attn_implementation="sdpa"
)
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0)
text = tokenizer.decode(generated_ids[0, -32:], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
@pytest.mark.torch_export_test
@slow
def test_export_static_cache(self):
if version.parse(torch.__version__) < version.parse("2.4.0"):
self.skipTest(reason="This test requires torch >= 2.4 to run.")
from transformers.integrations.executorch import (
TorchExportableModuleWithStaticCache,
convert_and_export_with_cache,
)
tokenizer = AutoTokenizer.from_pretrained(self.TEST_MODEL_ID, padding_side="right")
EXPECTED_TEXT_COMPLETION = ["The Deep Learning is \n['Deep Learning',"]
max_generation_length = tokenizer(EXPECTED_TEXT_COMPLETION, return_tensors="pt", padding=True)[
"input_ids"
].shape[-1]
# Load model
device = "cpu"
dtype = torch.bfloat16
cache_implementation = "static"
attn_implementation = "sdpa"
batch_size = 1
model = Exaone4ForCausalLM.from_pretrained(
self.TEST_MODEL_ID,
device_map=device,
dtype=dtype,
attn_implementation=attn_implementation,
generation_config=GenerationConfig(
use_cache=True,
cache_implementation=cache_implementation,
max_length=max_generation_length,
cache_config={
"batch_size": batch_size,
"max_cache_len": max_generation_length,
},
),
)
prompt = ["The Deep Learning is "]
prompt_tokens = tokenizer(prompt, return_tensors="pt", padding=True).to(model.device)
prompt_token_ids = prompt_tokens["input_ids"]
max_new_tokens = max_generation_length - prompt_token_ids.shape[-1]
# Static Cache + export
exported_program = convert_and_export_with_cache(model)
ep_generated_ids = TorchExportableModuleWithStaticCache.generate(
exported_program=exported_program, prompt_token_ids=prompt_token_ids, max_new_tokens=max_new_tokens
)
ep_generated_text = tokenizer.batch_decode(ep_generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, ep_generated_text)
|
Exaone4IntegrationTest
|
python
|
pytorch__pytorch
|
torch/ao/quantization/quantizer/quantizer.py
|
{
"start": 601,
"end": 847
}
|
class ____(ABC): # noqa: B024
"""Base class for different types of quantization specs that allows users to
specify how to quantize a Tensor (input/output of a Node) in the model
"""
@dataclass(eq=True, frozen=True)
|
QuantizationSpecBase
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_fibonacci_number.py
|
{
"start": 997,
"end": 2008
}
|
class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_fibonacci_number"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_fibonacci_number(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
|
ColumnValuesToBeFibonacciNumber
|
python
|
huggingface__transformers
|
src/transformers/models/jamba/modeling_jamba.py
|
{
"start": 48569,
"end": 48777
}
|
class ____(GenericForSequenceClassification, JambaPreTrainedModel):
pass
__all__ = ["JambaForCausalLM", "JambaForSequenceClassification", "JambaModel", "JambaPreTrainedModel"]
|
JambaForSequenceClassification
|
python
|
django__django
|
django/core/checks/messages.py
|
{
"start": 1890,
"end": 2013
}
|
class ____(CheckMessage):
def __init__(self, *args, **kwargs):
super().__init__(WARNING, *args, **kwargs)
|
Warning
|
python
|
jupyterlab__jupyterlab
|
jupyterlab/labextensions.py
|
{
"start": 12387,
"end": 12861
}
|
class ____(BaseExtensionApp):
description = "Unlink packages by name or path"
def run_task(self):
self.extra_args = self.extra_args or [os.getcwd()]
options = AppOptions(
app_dir=self.app_dir,
logger=self.log,
labextensions_path=self.labextensions_path,
core_config=self.core_config,
)
return any(unlink_package(arg, app_options=options) for arg in self.extra_args)
|
UnlinkLabExtensionApp
|
python
|
gevent__gevent
|
src/gevent/tests/test__socket_ssl.py
|
{
"start": 356,
"end": 865
}
|
class ____(greentest.TestCase):
__timeout__ = 30
def test_amazon_response(self):
conn = httplib.HTTPSConnection('sdb.amazonaws.com')
conn.request('GET', '/')
conn.getresponse()
def test_str_and_repr(self):
conn = socket.socket()
conn.connect(('sdb.amazonaws.com', 443))
ssl_conn = socket.ssl(conn) # pylint:disable=no-member
assert str(ssl_conn)
assert repr(ssl_conn)
if __name__ == "__main__":
greentest.main()
|
AmazonHTTPSTests
|
python
|
great-expectations__great_expectations
|
docs/docusaurus/docs/core/customize_expectations/_examples/define_a_custom_expectation_class.py
|
{
"start": 1856,
"end": 2803
}
|
class ____(gx.expectations.ExpectColumnValuesToBeBetween):
# </snippet>
column: str = "passenger_count"
min_value: int = 1
max_value: int = 6
# </snippet>
description: str = "There should be between **1** and **6** passengers."
# </snippet>
# Create an instance of the custom Expectation
# <snippet name="docs/docusaurus/docs/core/customize_expectations/_examples/define_a_custom_expectation_class.py - instantiate a Custom Expectation">
expectation = ExpectValidPassengerCount() # Uses the predefined default values
# </snippet>
# Optional. Test the Expectation with some sample data
data_source_name = "my_data_source"
asset_name = "my_data_asset"
batch_definition_name = "my_batch_definition"
batch = (
context.data_sources.get(data_source_name)
.get_asset(asset_name)
.get_batch_definition(batch_definition_name)
.get_batch()
)
print(batch.validate(expectation))
# </snippet>
|
ExpectValidPassengerCount
|
python
|
Textualize__textual
|
tests/snapshot_tests/snapshot_apps/data_table_row_labels.py
|
{
"start": 553,
"end": 1098
}
|
class ____(App):
def compose(self) -> ComposeResult:
yield DataTable()
def on_mount(self) -> None:
table = self.query_one(DataTable)
table.fixed_rows = 1
table.fixed_columns = 1
table.focus()
rows = iter(ROWS)
column_labels = next(rows)
for column in column_labels:
table.add_column(column, key=column)
for index, row in enumerate(rows):
table.add_row(*row, label=str(index))
app = TableApp()
if __name__ == "__main__":
app.run()
|
TableApp
|
python
|
conda__conda
|
tests/plugins/test_transaction_hooks.py
|
{
"start": 492,
"end": 561
}
|
class ____(DummyTransactionAction):
pass
|
DummyPostTransactionAction
|
python
|
modin-project__modin
|
asv_bench/benchmarks/benchmarks.py
|
{
"start": 12718,
"end": 14146
}
|
class ____:
param_names = ["shape", "axis"]
params = [
get_benchmark_shapes("TimeArithmetic"),
[0, 1],
]
def setup(self, shape, axis):
self.df = generate_dataframe("int", *shape, RAND_LOW, RAND_HIGH)
def time_sum(self, shape, axis):
execute(self.df.sum(axis=axis))
def time_count(self, shape, axis):
execute(self.df.count(axis=axis))
def time_median(self, shape, axis):
execute(self.df.median(axis=axis))
def time_nunique(self, shape, axis):
execute(self.df.nunique(axis=axis))
def time_apply(self, shape, axis):
execute(self.df.apply(lambda df: df.sum(), axis=axis))
def time_mean(self, shape, axis):
execute(self.df.mean(axis=axis))
def time_mode(self, shape, axis):
execute(self.df.mode(axis=axis))
def time_add(self, shape, axis):
execute(self.df.add(2, axis=axis))
def time_mul(self, shape, axis):
execute(self.df.mul(2, axis=axis))
def time_mod(self, shape, axis):
execute(self.df.mod(2, axis=axis))
def time_abs(self, shape, axis):
execute(self.df.abs())
def time_aggregate(self, shape, axis):
execute(self.df.aggregate(lambda df: df.sum(), axis=axis))
def time_is_in(self, shape, axis):
execute(self.df.isin([0, 2]))
def time_transpose(self, shape, axis):
execute(self.df.transpose())
|
TimeArithmetic
|
python
|
getsentry__sentry-python
|
sentry_sdk/integrations/_wsgi_common.py
|
{
"start": 7073,
"end": 7558
}
|
class ____:
"""
Wrapper to make it possible to use list[HttpStatusCodeRange] as a Container[int].
Used for backwards compatibility with the old `failed_request_status_codes` option.
"""
def __init__(self, code_ranges):
# type: (list[HttpStatusCodeRange]) -> None
self._code_ranges = code_ranges
def __contains__(self, item):
# type: (object) -> bool
return _in_http_status_code_range(item, self._code_ranges)
|
HttpCodeRangeContainer
|
python
|
django__django
|
django/contrib/postgres/operations.py
|
{
"start": 373,
"end": 2681
}
|
class ____(Operation):
reversible = True
category = OperationCategory.ADDITION
def __init__(self, name, hints=None):
self.name = name
self.hints = hints or {}
def state_forwards(self, app_label, state):
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if schema_editor.connection.vendor != "postgresql" or not router.allow_migrate(
schema_editor.connection.alias, app_label, **self.hints
):
return
if not self.extension_exists(schema_editor, self.name):
schema_editor.execute(
"CREATE EXTENSION IF NOT EXISTS %s"
% schema_editor.quote_name(self.name)
)
# Clear cached, stale oids.
get_hstore_oids.cache_clear()
get_citext_oids.cache_clear()
# Registering new type handlers cannot be done before the extension is
# installed, otherwise a subsequent data migration would use the same
# connection.
register_type_handlers(schema_editor.connection)
if hasattr(schema_editor.connection, "register_geometry_adapters"):
schema_editor.connection.register_geometry_adapters(
schema_editor.connection.connection, True
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
if not router.allow_migrate(
schema_editor.connection.alias, app_label, **self.hints
):
return
if self.extension_exists(schema_editor, self.name):
schema_editor.execute(
"DROP EXTENSION IF EXISTS %s" % schema_editor.quote_name(self.name)
)
# Clear cached, stale oids.
get_hstore_oids.cache_clear()
get_citext_oids.cache_clear()
def extension_exists(self, schema_editor, extension):
with schema_editor.connection.cursor() as cursor:
cursor.execute(
"SELECT 1 FROM pg_extension WHERE extname = %s",
[extension],
)
return bool(cursor.fetchone())
def describe(self):
return "Creates extension %s" % self.name
@property
def migration_name_fragment(self):
return "create_extension_%s" % self.name
|
CreateExtension
|
python
|
doocs__leetcode
|
solution/0400-0499/0467.Unique Substrings in Wraparound String/Solution.py
|
{
"start": 0,
"end": 337
}
|
class ____:
def findSubstringInWraproundString(self, s: str) -> int:
f = defaultdict(int)
k = 0
for i, c in enumerate(s):
if i and (ord(c) - ord(s[i - 1])) % 26 == 1:
k += 1
else:
k = 1
f[c] = max(f[c], k)
return sum(f.values())
|
Solution
|
python
|
kamyu104__LeetCode-Solutions
|
Python/finding-pairs-with-a-certain-sum.py
|
{
"start": 112,
"end": 834
}
|
class ____(object):
def __init__(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
"""
self.__nums2 = nums2
self.__count1 = collections.Counter(nums1)
self.__count2 = collections.Counter(nums2)
def add(self, index, val):
"""
:type index: int
:type val: int
:rtype: None
"""
self.__count2[self.__nums2[index]] -= 1
self.__nums2[index] += val
self.__count2[self.__nums2[index]] += 1
def count(self, tot):
"""
:type tot: int
:rtype: int
"""
return sum(cnt * self.__count2[tot-x] for x, cnt in self.__count1.iteritems())
|
FindSumPairs
|
python
|
doocs__leetcode
|
solution/3200-3299/3247.Number of Subsequences with Odd Sum/Solution.py
|
{
"start": 0,
"end": 347
}
|
class ____:
def subsequenceCount(self, nums: List[int]) -> int:
mod = 10**9 + 7
f = [0] * 2
for x in nums:
if x % 2:
f[0], f[1] = (f[0] + f[1]) % mod, (f[0] + f[1] + 1) % mod
else:
f[0], f[1] = (f[0] + f[0] + 1) % mod, (f[1] + f[1]) % mod
return f[1]
|
Solution
|
python
|
donnemartin__interactive-coding-challenges
|
online_judges/merge_ranges/test_merge_ranges.py
|
{
"start": 18,
"end": 841
}
|
class ____(unittest.TestCase):
def test_merge_ranges(self):
solution = Solution()
self.assertRaises(TypeError, solution.merge_ranges, None)
self.assertEqual(solution.merge_ranges([]), [])
array = [(2, 3), (7, 9)]
expected = [(2, 3), (7, 9)]
self.assertEqual(solution.merge_ranges(array), expected)
array = [(3, 5), (2, 3), (7, 9), (8, 10)]
expected = [(2, 5), (7, 10)]
self.assertEqual(solution.merge_ranges(array), expected)
array = [(2, 3), (3, 5), (7, 9), (8, 10), (1, 11)]
expected = [(1, 11)]
self.assertEqual(solution.merge_ranges(array), expected)
print('Success: test_merge_ranges')
def main():
test = TestMergeRanges()
test.test_merge_ranges()
if __name__ == '__main__':
main()
|
TestMergeRanges
|
python
|
huggingface__transformers
|
src/transformers/models/emu3/modeling_emu3.py
|
{
"start": 46758,
"end": 49998
}
|
class ____(Emu3PreTrainedModel):
_can_record_outputs = {
"hidden_states": Emu3DecoderLayer,
"attentions": Emu3Attention,
}
def __init__(self, config: Emu3Config):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[Emu3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Emu3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Emu3RotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring
|
Emu3TextModel
|
python
|
ray-project__ray
|
python/ray/experimental/collective/collective.py
|
{
"start": 615,
"end": 8228
}
|
class ____:
"""Singleton class to store the mapping between actors and communicators
that the actors are a part of.
"""
def __init__(self):
# Handles to communicators that we created. Key is a user-provided
# name or UUID.
self._remote_communicators: Dict[str, CommunicatorHandle] = {}
@staticmethod
def get() -> "RemoteCommunicatorManager":
global _remote_communicator_manager
with _remote_communicator_manager_lock:
if _remote_communicator_manager is None:
_remote_communicator_manager = RemoteCommunicatorManager()
return _remote_communicator_manager
def add_remote_communicator(self, comm_handle: CommunicatorHandle):
self._remote_communicators[comm_handle.name] = comm_handle
def remove_remote_communicator(self, name: str):
return self._remote_communicators.pop(name, None)
def get_collective_groups(
self,
actors: Optional[List[ray.actor.ActorHandle]] = None,
backend: Optional[str] = None,
):
"""
Get the collective groups that the given actors are a subset of. Filter by
backend if provided.
"""
actors = actors or []
actors = set(actors)
collectives = []
# Find all collective groups that the given actors are a subset
# of, with the matching backend if provided.
for collective in self._remote_communicators.values():
if actors.issubset(set(collective.actors)):
if backend is None or collective.backend == backend:
collectives.append(collective)
return collectives
def _do_init_collective_group(
self,
world_size: int,
rank: int,
backend: str = Backend.NCCL,
name: str = "default",
):
"""Helper method that runs as a task on a remote actor to create a
collective group.
"""
ray.util.collective.init_collective_group(
world_size, rank, backend, group_name=name
)
def _do_destroy_collective_group(self, name):
"""Helper method that runs as a task on a remote actor to destroy a
collective group.
"""
ray.util.collective.destroy_collective_group(name)
@PublicAPI(stability="alpha")
def get_collective_groups(
actors: List[ray.actor.ActorHandle], backend: Optional[str] = None
) -> List[CommunicatorHandle]:
"""
Get the collective groups that the given actors are a subset of. Filter by
backend if provided.
Args:
actors: List of actors. Return handles to all collective groups that
these actors are a subset of.
backend: An optional backend to filter by. See
ray.util.collective.types.Backend for valid backends.
Returns:
A list of communicator handles that the actors are a subset of.
"""
manager = RemoteCommunicatorManager.get()
return manager.get_collective_groups(actors, backend)
@PublicAPI(stability="alpha")
def create_collective_group(
actors: List[ray.actor.ActorHandle],
backend: str,
name: Optional[str] = None,
) -> CommunicatorHandle:
"""Create a collective group on the given list of actors. If this function
returns successfully, then the collective group has been initialized on all
actors, using the given order of actors as the ranks.
Currently, an actor can only participate in one collective group per
backend at a time. To reuse an actor, destroy its collective group and
create a new one.
Args:
actors: The actors to participate in the collective group.
backend: The backend to use. See ray.util.collective.types.Backend for
valid backends.
name: A name to use for the collective group. If None is provided, a
random name will be generated.
Returns:
Handle to the communicator.
"""
manager = RemoteCommunicatorManager.get()
if name is None:
name = str(uuid.uuid4())
# Validate the backend.
backend = Backend(backend)
world_size = len(actors)
for actor in actors:
if manager.get_collective_groups([actor], backend):
raise RuntimeError(
f"Actor {actor} already in group for backend {backend}. Actors can currently only participate in at most one group per backend."
)
actor_ids = [actor._ray_actor_id for actor in actors]
if len(set(actor_ids)) != len(actor_ids):
raise ValueError(f"All actors must be unique, got: {actors}")
metadata_key = None
if backend == Backend.GLOO:
# Perform extra setup for torch.distributed.
# torch.distributed requires a master address and port. Find a suitable
# port on one of the actors.
master_addr, master_port = ray.get(
actors[0].__ray_call__.remote(lambda self: get_address_and_port())
)
# Store the metadata on a named actor that all of the other
# actors can access.
metadata_key = get_master_address_metadata_key(name)
internal_kv._internal_kv_put(metadata_key, f"{master_addr}:{master_port}")
try:
init_tasks = [
actor.__ray_call__.remote(
_do_init_collective_group, world_size, rank, backend, name
)
for rank, actor in enumerate(actors)
]
ray.get(init_tasks)
finally:
# Clean up the metadata once collective group is initialized
# (or failed to initialize).
if metadata_key is not None:
internal_kv._internal_kv_del(metadata_key)
# Group was successfully created.
# Register GLOO groups under TORCH_GLOO since GLOO uses torch.distributed.
registration_backend = Backend.TORCH_GLOO if backend == Backend.GLOO else backend
comm = CommunicatorHandle(actors, name, registration_backend)
manager.add_remote_communicator(comm)
return comm
@PublicAPI(stability="alpha")
def destroy_collective_group(group_or_name: Union[CommunicatorHandle, str]):
"""
Destroy a collective group. If this functions returns successfully, then
the actors that were in the collective can be reused to create a new
collective group.
Args:
group_or_name: Either a communicator handle or the name of the group to
destroy.
"""
if isinstance(group_or_name, CommunicatorHandle):
name = group_or_name.name
elif isinstance(group_or_name, str):
name = group_or_name
else:
raise ValueError("Expected CommunicatorHandle or str (group name).")
manager = RemoteCommunicatorManager.get()
group = manager.remove_remote_communicator(name)
if group is not None:
destroy_tasks = [
actor.__ray_call__.options(concurrency_group="_ray_system").remote(
_do_destroy_collective_group, name
)
for actor in group.actors
]
try:
ray.get(destroy_tasks)
except ray.exceptions.ActorDiedError:
pass
else:
raise ValueError(f"No group with name {name} found.")
@PublicAPI(stability="alpha")
def destroy_all_collective_groups():
"""
Destroy all collective groups. This will destroy all collective groups that
were previously created by this process. After this function returns, the
actors participating in those collective groups can be reused to create a
new collective group.
"""
manager = RemoteCommunicatorManager.get()
for collective in manager.get_collective_groups():
destroy_collective_group(collective.name)
|
RemoteCommunicatorManager
|
python
|
PrefectHQ__prefect
|
tests/server/orchestration/api/test_flow_runs.py
|
{
"start": 86822,
"end": 87418
}
|
class ____:
async def test_history_interval_must_be_one_second_or_larger(self, client):
response = await client.post(
"/flow_runs/history",
json=dict(
history_start=str(now("UTC")),
history_end=str(now("UTC") + datetime.timedelta(days=1)),
history_interval_seconds=0.9,
),
)
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY, (
response.text
)
assert b"History interval must not be less than 1 second" in response.content
|
TestFlowRunHistory
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_mutating_webhook_configuration.py
|
{
"start": 383,
"end": 7187
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'webhooks': 'list[V1MutatingWebhook]'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'webhooks': 'webhooks'
}
def __init__(self, api_version=None, kind=None, metadata=None, webhooks=None, local_vars_configuration=None): # noqa: E501
"""V1MutatingWebhookConfiguration - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._webhooks = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if webhooks is not None:
self.webhooks = webhooks
@property
def api_version(self):
"""Gets the api_version of this V1MutatingWebhookConfiguration. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1MutatingWebhookConfiguration. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1MutatingWebhookConfiguration.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1MutatingWebhookConfiguration. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1MutatingWebhookConfiguration. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1MutatingWebhookConfiguration. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1MutatingWebhookConfiguration.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1MutatingWebhookConfiguration. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1MutatingWebhookConfiguration. # noqa: E501
:return: The metadata of this V1MutatingWebhookConfiguration. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1MutatingWebhookConfiguration.
:param metadata: The metadata of this V1MutatingWebhookConfiguration. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def webhooks(self):
"""Gets the webhooks of this V1MutatingWebhookConfiguration. # noqa: E501
Webhooks is a list of webhooks and the affected resources and operations. # noqa: E501
:return: The webhooks of this V1MutatingWebhookConfiguration. # noqa: E501
:rtype: list[V1MutatingWebhook]
"""
return self._webhooks
@webhooks.setter
def webhooks(self, webhooks):
"""Sets the webhooks of this V1MutatingWebhookConfiguration.
Webhooks is a list of webhooks and the affected resources and operations. # noqa: E501
:param webhooks: The webhooks of this V1MutatingWebhookConfiguration. # noqa: E501
:type: list[V1MutatingWebhook]
"""
self._webhooks = webhooks
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1MutatingWebhookConfiguration):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1MutatingWebhookConfiguration):
return True
return self.to_dict() != other.to_dict()
|
V1MutatingWebhookConfiguration
|
python
|
qdrant__qdrant-client
|
qdrant_client/http/models/models.py
|
{
"start": 2792,
"end": 3082
}
|
class ____(BaseModel):
debug: bool = Field(..., description="")
service_debug_feature: bool = Field(..., description="")
recovery_mode: bool = Field(..., description="")
gpu: bool = Field(..., description="")
rocksdb: bool = Field(..., description="")
|
AppFeaturesTelemetry
|
python
|
huggingface__transformers
|
tests/models/mllama/test_modeling_mllama.py
|
{
"start": 9591,
"end": 17952
}
|
class ____(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
"""
Model tester for `MllamaForConditionalGeneration`.
"""
all_model_classes = (
(
MllamaModel,
MllamaForConditionalGeneration,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = {"image-text-to-text": MllamaForConditionalGeneration} if is_torch_available() else ()
_is_composite = True
def setUp(self):
self.model_tester = MllamaVisionText2TextModelTester(self)
self.config_tester = ConfigTester(
self, config_class=MllamaConfig, has_text_modality=False, common_properties=["image_token_index"]
)
def test_config(self):
self.config_tester.run_common_tests()
def test_resize_embeddings_results_in_successful_loss(self):
# resizing embeddings should result in successful loss computation
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
model = MllamaForConditionalGeneration(config).to(torch_device)
model_vocab_size = config.get_text_config().vocab_size
inputs = self._prepare_for_class(inputs, MllamaForConditionalGeneration, return_labels=True)
# Resize embeddings and call forward
model.resize_token_embeddings(model_vocab_size + 10)
output = model(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
labels=inputs["labels"],
return_dict=True,
)
self.assertTrue("loss" in output)
def _check_attentions_for_generate(
self, batch_size, attentions, prompt_length, output_length, config, decoder_past_key_values
):
# Mllama has cross attention layers and those have a different shape than normal attention layers
self.assertIsInstance(attentions, tuple)
self.assertListEqual(
[isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions)
)
self.assertEqual(len(attentions), (output_length - prompt_length))
cross_attention_layers = self.model_tester.text_config["cross_attention_layers"]
use_cache = decoder_past_key_values is not None
for generated_length, iter_attentions in enumerate(attentions):
# regardless of using cache, the first forward pass will have the full prompt as input
if use_cache and generated_length > 0:
model_input_length = 1
else:
model_input_length = prompt_length + generated_length
query_length = prompt_length + generated_length
expected_shape = (
batch_size,
config.num_attention_heads,
model_input_length,
query_length,
)
expected_shape_cross = (
batch_size,
config.num_attention_heads,
model_input_length,
self.model_tester.image_length,
)
expected_shapes = [
expected_shape if layer_idx not in cross_attention_layers else expected_shape_cross
for layer_idx in range(len(iter_attentions))
]
self.assertListEqual([layer_attention.shape for layer_attention in iter_attentions], expected_shapes)
@require_optimum_quanto
@pytest.mark.generate
@unittest.skip("Mllama is actually an encoder decoder cache and thus can't supports quant cache")
def test_generate_with_quant_cache(self):
pass
@unittest.skip("For some unknown reasons the tests fails in CrossAttention layer when doing torch.sdpa(). ")
@pytest.mark.torch_compile_test
def test_sdpa_can_compile_dynamic(self):
pass
@unittest.skip(reason="AssertionError: Items in the second set but not the first: might be a setting issue")
def test_model_parallelism(self):
pass
@unittest.skip(reason="Mllama can't assisted decoding due to cache format and `Cache.crop()`")
def test_assisted_decoding_with_num_logits_to_keep(self):
pass
@unittest.skip(reason="Mllama uses self.weights directly causing device mismatch when offloading`")
def test_cpu_offload(self):
pass
@unittest.skip(reason="Mllama uses self.weights directly causing device mismatch when offloading`")
def test_disk_offload_bin(self):
pass
@unittest.skip(reason="Mllama uses self.weights directly causing device mismatch when offloading`")
def test_disk_offload_safetensors(self):
pass
@unittest.skip("Mllama applies key/query norm which doesn't work with packing")
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("Mllama applies key/query norm which doesn't work with packing")
def test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs(self):
pass
@unittest.skip("Mllama applies key/query norm which doesn't work with packing")
def test_eager_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("Mllama applies key/query norm which doesn't work with packing")
def test_sdpa_padding_matches_padding_free_with_position_ids(self):
pass
# overridden because mllama has special cache for self and cross attentions
def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config):
self.assertIsInstance(past_key_values, Cache)
num_heads = getattr(config, "num_key_value_heads", config.num_attention_heads)
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
for layer_idx in range(len(past_key_values)):
if layer_idx in self.model_tester.text_config["cross_attention_layers"]:
expected_shape = (batch_size, num_heads, self.model_tester.image_length, head_dim)
else:
expected_shape = (batch_size, num_heads, seq_length, head_dim)
# check shape key, value
self.assertEqual(past_key_values.layers[layer_idx].keys.shape, expected_shape)
self.assertEqual(past_key_values.layers[layer_idx].values.shape, expected_shape)
def test_generate_text_only_with_cache(self):
"""
Tests that our cached generation with text-only inputs works. When mllama was introduced, this feature
required cache modifications (because layers are skipped in practice). This test should prevent regressions.
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_generative_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
model.generate(input_ids, use_cache=True)
@pytest.mark.generate
def test_left_padding_compatibility(self):
# Overwrite -- mllama needs to prepare `cross_attention_mask`, and it must be padded accordingly
_, inputs_dict = self.prepare_config_and_inputs_for_generate()
input_ids = inputs_dict["input_ids"]
cross_attention_mask = inputs_dict["cross_attention_mask"]
pad_cross_attn_size = (input_ids.shape[0], 32, *cross_attention_mask.shape[2:])
extra_cross_attn_mask = torch.zeros(pad_cross_attn_size, dtype=cross_attention_mask.dtype, device=torch_device)
padded_cross_attention_mask = torch.cat([extra_cross_attn_mask, cross_attention_mask], dim=1)
# `cross_attention_mask` is randomly generated in `prepare_config_and_inputs_for_generate`, and it must match
# its padded version for the test to be valid -- we need to pass both
unpadded_custom_inputs = {"cross_attention_mask": cross_attention_mask}
padded_custom_inputs = {"cross_attention_mask": padded_cross_attention_mask}
super().test_left_padding_compatibility(
unpadded_custom_inputs=unpadded_custom_inputs, padded_custom_inputs=padded_custom_inputs
)
@require_torch
|
MllamaForConditionalGenerationModelTest
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/protocol53.py
|
{
"start": 2568,
"end": 2669
}
|
class ____(Protocol):
def m[T: Proto_ContraGeneric](self: T, x: T) -> None: ...
|
Proto_ContraGeneric
|
python
|
crytic__slither
|
slither/detectors/compiler_bugs/array_by_reference.py
|
{
"start": 777,
"end": 7152
}
|
class ____(AbstractDetector):
"""
Detects passing of arrays located in memory to functions which expect to modify arrays via storage reference.
"""
ARGUMENT = "array-by-reference"
HELP = "Modifying storage array by value"
IMPACT = DetectorClassification.HIGH
CONFIDENCE = DetectorClassification.HIGH
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#modifying-storage-array-by-value"
WIKI_TITLE = "Modifying storage array by value"
WIKI_DESCRIPTION = (
"Detect arrays passed to a function that expects reference to a storage array"
)
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract Memory {
uint[1] public x; // storage
function f() public {
f1(x); // update x
f2(x); // do not update x
}
function f1(uint[1] storage arr) internal { // by reference
arr[0] = 1;
}
function f2(uint[1] arr) internal { // by value
arr[0] = 2;
}
}
```
Bob calls `f()`. Bob assumes that at the end of the call `x[0]` is 2, but it is 1.
As a result, Bob's usage of the contract is incorrect."""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = "Ensure the correct usage of `memory` and `storage` in the function parameters. Make all the locations explicit."
@staticmethod
def get_funcs_modifying_array_params(contracts: List[Contract]) -> Set[FunctionContract]:
"""
Obtains a set of functions which take arrays not located in storage as parameters, and writes to them.
:param contracts: The collection of contracts to check functions in.
:return: A set of functions which take an array not located in storage as a parameter and writes to it.
"""
# Initialize our resulting set of functions which modify non-reference array parameters
results = set()
# Loop through all functions in all contracts.
for contract in contracts:
for function in contract.functions_declared:
# Skip any constructor functions.
if function.is_constructor:
continue
# Determine if this function takes an array as a parameter and the location isn't storage.
# If it has been written to, we know this sets an non-storage-ref array.
for param in function.parameters:
if isinstance(param.type, ArrayType) and param.location != "storage":
if param in function.variables_written:
results.add(function)
break
return results
@staticmethod
def detect_calls_passing_ref_to_function(
contracts: List[Contract], array_modifying_funcs: Set[FunctionContract]
) -> List[Tuple[Node, Variable, Union[Function, Variable]]]:
"""
Obtains all calls passing storage arrays by value to a function which cannot write to them successfully.
:param contracts: The collection of contracts to check for problematic calls in.
:param array_modifying_funcs: The collection of functions which take non-storage arrays as input and writes to
them.
:return: A list of tuples (calling_node, affected_argument, invoked_function) which denote all problematic
nodes invoking a function with some storage array argument where the invoked function seemingly attempts to
write to the array unsuccessfully.
"""
# Define our resulting array.
results: List[Tuple[Node, Variable, Union[Function, Variable]]] = []
# Verify we have functions in our list to check for.
if not array_modifying_funcs:
return results
# Loop for each node in each function/modifier in each contract
# pylint: disable=too-many-nested-blocks
for contract in contracts:
for function in contract.functions_and_modifiers_declared:
for ir in [ir for _, ir in function.high_level_calls] + function.internal_calls:
# Verify this references a function in our array modifying functions collection.
if ir.function not in array_modifying_funcs:
continue
# Verify one of these parameters is an array in storage.
for (param, arg) in zip(ir.function.parameters, ir.arguments):
# Verify this argument is a variable that is an array type.
if not isinstance(arg, (StateVariable, LocalVariable)):
continue
if not isinstance(arg.type, ArrayType):
continue
# If it is a state variable OR a local variable referencing storage, we add it to the list.
if (
isinstance(arg, StateVariable)
or (isinstance(arg, LocalVariable) and arg.location == "storage")
) and (isinstance(param.type, ArrayType) and param.location != "storage"):
results.append((ir.node, arg, ir.function))
return results
def _detect(self) -> List[Output]:
"""
Detects passing of arrays located in memory to functions which expect to modify arrays via storage reference.
:return: The JSON results of the detector, which contains the calling_node, affected_argument_variable and
invoked_function for each result found.
"""
results = []
array_modifying_funcs = self.get_funcs_modifying_array_params(self.contracts)
problematic_calls = self.detect_calls_passing_ref_to_function(
self.contracts, array_modifying_funcs
)
if problematic_calls:
for calling_node, affected_argument, invoked_function in problematic_calls:
info: DETECTOR_INFO = [
calling_node.function,
" passes array ",
affected_argument,
" by reference to ",
invoked_function,
" which only takes arrays by value\n",
]
res = self.generate_result(info)
results.append(res)
return results
|
ArrayByReference
|
python
|
django__django
|
tests/model_regress/models.py
|
{
"start": 869,
"end": 932
}
|
class ____(models.Model):
when = models.DateTimeField()
|
Event
|
python
|
tensorflow__tensorflow
|
tensorflow/lite/python/convert_phase.py
|
{
"start": 938,
"end": 1460
}
|
class ____(enum.Enum):
"""Enum class defining name of the converter components."""
# Validate the given input and prepare and optimize TensorFlow Model.
PREPARE_TF_MODEL = "PREPARE_TF_MODEL"
# Convert to TFLite model format.
CONVERT_TF_TO_TFLITE_MODEL = "CONVERT_TF_TO_TFLITE_MODEL"
# RUN quantization and sparsification.
OPTIMIZE_TFLITE_MODEL = "OPTIMIZE_TFLITE_MODEL"
SubComponentItem = collections.namedtuple("SubComponentItem",
["name", "component"])
|
Component
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_vertex_ai.py
|
{
"start": 111358,
"end": 113595
}
|
class ____:
@mock.patch(VERTEX_AI_PATH.format("model_service.model_service.UploadModelResponse.to_dict"))
@mock.patch(VERTEX_AI_PATH.format("model_service.ModelServiceHook"))
def test_execute(self, mock_hook, to_dict_mock):
op = UploadModelOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
model=TEST_MODEL_OBJ,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.upload_model.assert_called_once_with(
region=GCP_LOCATION,
project_id=GCP_PROJECT,
model=TEST_MODEL_OBJ,
parent_model=None,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
@mock.patch(VERTEX_AI_PATH.format("model_service.model_service.UploadModelResponse.to_dict"))
@mock.patch(VERTEX_AI_PATH.format("model_service.ModelServiceHook"))
def test_execute_with_parent_model(self, mock_hook, to_dict_mock):
op = UploadModelOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
model=TEST_MODEL_OBJ,
parent_model=TEST_PARENT_MODEL,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.upload_model.assert_called_once_with(
region=GCP_LOCATION,
project_id=GCP_PROJECT,
model=TEST_MODEL_OBJ,
parent_model=TEST_PARENT_MODEL,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
|
TestVertexAIUploadModelOperator
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_page_breaks04.py
|
{
"start": 315,
"end": 1190
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("page_breaks04.xlsx")
self.ignore_files = [
"xl/printerSettings/printerSettings1.bin",
"xl/worksheets/_rels/sheet1.xml.rels",
]
self.ignore_elements = {
"[Content_Types].xml": ['<Default Extension="bin"'],
"xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"],
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with page breaks."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_v_pagebreaks([1])
worksheet.write("A1", "Foo")
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
etianen__django-reversion
|
tests/test_app/models.py
|
{
"start": 2457,
"end": 2689
}
|
class ____(models.Model):
name = models.CharField(
max_length=191,
default="v1",
)
objects = TestModelWithNaturalKeyManager()
def natural_key(self):
return (self.name,)
|
TestModelWithNaturalKey
|
python
|
miyuchina__mistletoe
|
mistletoe/contrib/mathjax.py
|
{
"start": 179,
"end": 1266
}
|
class ____(HtmlRenderer, LaTeXRenderer):
def __init__(self, **kwargs):
"""
Args:
**kwargs: additional parameters to be passed to the ancestors'
constructors.
"""
super().__init__(**kwargs)
mathjax_src = '<script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-MML-AM_CHTML"></script>\n'
def render_math(self, token):
"""
Convert single dollar sign enclosed math expressions to the ``\\(...\\)`` syntax, to support
the default MathJax settings which ignore single dollar signs as described at
https://docs.mathjax.org/en/latest/basic/mathematics.html#tex-and-latex-input.
"""
if token.content.startswith('$$'):
return self.render_raw_text(token)
return '\\({}\\)'.format(self.render_raw_text(token).strip('$'))
def render_document(self, token):
"""
Append CDN link for MathJax to the end of <body>.
"""
return super().render_document(token) + self.mathjax_src
|
MathJaxRenderer
|
python
|
pandas-dev__pandas
|
pandas/tests/tools/test_to_datetime.py
|
{
"start": 102676,
"end": 103815
}
|
class ____:
@pytest.mark.parametrize(
"test_list",
[
[
"2011-12-30 00:00:00.000000",
"2011-12-30 00:00:00.000000",
"2011-12-30 00:00:00.000000",
],
[np.nan, np.nan, "2011-12-30 00:00:00.000000"],
["", "2011-12-30 00:00:00.000000"],
["NaT", "2011-12-30 00:00:00.000000"],
["2011-12-30 00:00:00.000000", "random_string"],
["now", "2011-12-30 00:00:00.000000"],
["today", "2011-12-30 00:00:00.000000"],
],
)
def test_guess_datetime_format_for_array(self, test_list):
expected_format = "%Y-%m-%d %H:%M:%S.%f"
test_array = np.array(test_list, dtype=object)
assert tools._guess_datetime_format_for_array(test_array) == expected_format
@td.skip_if_not_us_locale
def test_guess_datetime_format_for_array_all_nans(self):
format_for_string_of_nans = tools._guess_datetime_format_for_array(
np.array([np.nan, np.nan, np.nan], dtype="O")
)
assert format_for_string_of_nans is None
|
TestGuessDatetimeFormat
|
python
|
kamyu104__LeetCode-Solutions
|
Python/next-greater-element-iv.py
|
{
"start": 42,
"end": 586
}
|
class ____(object):
def secondGreaterElement(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
result, stk1, stk2 = [-1]*len(nums), [], []
for i, x in enumerate(nums):
while stk2 and nums[stk2[-1]] < x:
result[stk2.pop()] = x
tmp = []
while stk1 and nums[stk1[-1]] < x:
tmp.append(stk1.pop())
stk1.append(i)
for x in reversed(tmp):
stk2.append(x)
return result
|
Solution
|
python
|
crytic__slither
|
slither/vyper_parsing/ast/types.py
|
{
"start": 3218,
"end": 3298
}
|
class ____(ASTNode):
name: str
body: List[ASTNode]
@dataclass
|
InterfaceDef
|
python
|
getsentry__sentry
|
tests/sentry/api/endpoints/test_organization_metrics.py
|
{
"start": 1179,
"end": 2961
}
|
class ____(APITestCase):
(method, endpoint) = ("get", "sentry-api-0-organization-metrics-data")
def setUp(self) -> None:
self.create_project(name="Bar", slug="bar", teams=[self.team], fire_project_created=True)
def send_request(
self, organization: Organization, token: ApiToken, method: str, endpoint: str, *args: str
) -> HttpResponse:
url = reverse(endpoint, args=(organization.slug,) + args)
return getattr(self.client, method)(
url, HTTP_AUTHORIZATION=f"Bearer {token.token}", format="json"
)
def test_access_with_wrong_permission_scopes(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
token = ApiToken.objects.create(user=self.user, scope_list=["alerts:read"])
response = self.send_request(self.organization, token, self.method, self.endpoint)
assert response.status_code == 403
def test_access_of_another_organization(self) -> None:
other_user = self.create_user("admin_2@localhost", is_superuser=True, is_staff=True)
self.create_organization(name="foo", slug="foo", owner=other_user)
with assume_test_silo_mode(SiloMode.CONTROL):
token = ApiToken.objects.create(user=other_user, scope_list=["org:read"])
response = self.send_request(self.organization, token, self.method, self.endpoint)
assert response.status_code == 403
def test_access_with_permissions(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
token = ApiToken.objects.create(user=self.user, scope_list=["org:read"])
response = self.send_request(self.organization, token, self.method, self.endpoint)
assert response.status_code in (200, 400, 404)
|
OrganizationMetricsPermissionTest
|
python
|
python-pillow__Pillow
|
src/PIL/MpegImagePlugin.py
|
{
"start": 402,
"end": 1361
}
|
class ____:
def __init__(self, fp: SupportsRead[bytes]) -> None:
self.fp = fp
self.bits = 0
self.bitbuffer = 0
def next(self) -> int:
return i8(self.fp.read(1))
def peek(self, bits: int) -> int:
while self.bits < bits:
self.bitbuffer = (self.bitbuffer << 8) + self.next()
self.bits += 8
return self.bitbuffer >> (self.bits - bits) & (1 << bits) - 1
def skip(self, bits: int) -> None:
while self.bits < bits:
self.bitbuffer = (self.bitbuffer << 8) + i8(self.fp.read(1))
self.bits += 8
self.bits = self.bits - bits
def read(self, bits: int) -> int:
v = self.peek(bits)
self.bits = self.bits - bits
return v
def _accept(prefix: bytes) -> bool:
return prefix.startswith(b"\x00\x00\x01\xb3")
##
# Image plugin for MPEG streams. This plugin can identify a stream,
# but it cannot read it.
|
BitStream
|
python
|
pypa__warehouse
|
tests/unit/accounts/test_views.py
|
{
"start": 141737,
"end": 143268
}
|
class ____:
def test_view_terms_of_service_no_user(self):
user_service = pretend.stub(
record_tos_engagement=pretend.call_recorder(lambda *a, **kw: None)
)
pyramid_request = pretend.stub(
user=None,
find_service=lambda *a, **kw: user_service,
registry=pretend.stub(settings={"terms.revision": "the-revision"}),
)
result = views.view_terms_of_service(pyramid_request)
assert isinstance(result, HTTPSeeOther)
assert (
result.headers["Location"]
== "https://policies.python.org/pypi.org/Terms-of-Service/"
)
assert user_service.record_tos_engagement.calls == []
def test_view_terms_of_service(self):
user_service = pretend.stub(
record_tos_engagement=pretend.call_recorder(lambda *a, **kw: None)
)
pyramid_request = pretend.stub(
user=pretend.stub(id="user-id"),
find_service=lambda *a, **kw: user_service,
registry=pretend.stub(settings={"terms.revision": "the-revision"}),
)
result = views.view_terms_of_service(pyramid_request)
assert isinstance(result, HTTPSeeOther)
assert (
result.headers["Location"]
== "https://policies.python.org/pypi.org/Terms-of-Service/"
)
assert user_service.record_tos_engagement.calls == [
pretend.call("user-id", "the-revision", TermsOfServiceEngagement.Viewed)
]
|
TestViewTermsOfService
|
python
|
doocs__leetcode
|
solution/1700-1799/1750.Minimum Length of String After Deleting Similar Ends/Solution.py
|
{
"start": 0,
"end": 338
}
|
class ____:
def minimumLength(self, s: str) -> int:
i, j = 0, len(s) - 1
while i < j and s[i] == s[j]:
while i + 1 < j and s[i] == s[i + 1]:
i += 1
while i < j - 1 and s[j - 1] == s[j]:
j -= 1
i, j = i + 1, j - 1
return max(0, j - i + 1)
|
Solution
|
python
|
pydata__xarray
|
xarray/backends/zarr.py
|
{
"start": 20812,
"end": 61244
}
|
class ____(AbstractWritableDataStore):
"""Store for reading and writing data via zarr"""
__slots__ = (
"_align_chunks",
"_append_dim",
"_cache_members",
"_close_store_on_close",
"_consolidate_on_close",
"_group",
"_members",
"_mode",
"_read_only",
"_safe_chunks",
"_synchronizer",
"_use_zarr_fill_value_as_mask",
"_write_empty",
"_write_region",
"zarr_group",
)
@classmethod
def open_store(
cls,
store,
mode: ZarrWriteModes = "r",
synchronizer=None,
group=None,
consolidated=False,
consolidate_on_close=False,
chunk_store=None,
storage_options=None,
append_dim=None,
write_region=None,
safe_chunks=True,
align_chunks=False,
zarr_version=None,
zarr_format=None,
use_zarr_fill_value_as_mask=None,
write_empty: bool | None = None,
cache_members: bool = True,
):
(
zarr_group,
consolidate_on_close,
close_store_on_close,
use_zarr_fill_value_as_mask,
) = _get_open_params(
store=store,
mode=mode,
synchronizer=synchronizer,
group=group,
consolidated=consolidated,
consolidate_on_close=consolidate_on_close,
chunk_store=chunk_store,
storage_options=storage_options,
zarr_version=zarr_version,
use_zarr_fill_value_as_mask=use_zarr_fill_value_as_mask,
zarr_format=zarr_format,
)
from zarr import Group
group_members: dict[str, Group] = {}
group_paths = list(_iter_zarr_groups(zarr_group, parent=group))
for path in group_paths:
if path == group:
group_members[path] = zarr_group
else:
rel_path = path.removeprefix(f"{group}/")
group_members[path] = zarr_group[rel_path.removeprefix("/")]
out = {
group: cls(
group_store,
mode,
consolidate_on_close,
append_dim,
write_region,
safe_chunks,
write_empty,
close_store_on_close,
use_zarr_fill_value_as_mask,
align_chunks=align_chunks,
cache_members=cache_members,
)
for group, group_store in group_members.items()
}
return out
@classmethod
def open_group(
cls,
store,
mode: ZarrWriteModes = "r",
synchronizer=None,
group=None,
consolidated=False,
consolidate_on_close=False,
chunk_store=None,
storage_options=None,
append_dim=None,
write_region=None,
safe_chunks=True,
align_chunks=False,
zarr_version=None,
zarr_format=None,
use_zarr_fill_value_as_mask=None,
write_empty: bool | None = None,
cache_members: bool = True,
):
(
zarr_group,
consolidate_on_close,
close_store_on_close,
use_zarr_fill_value_as_mask,
) = _get_open_params(
store=store,
mode=mode,
synchronizer=synchronizer,
group=group,
consolidated=consolidated,
consolidate_on_close=consolidate_on_close,
chunk_store=chunk_store,
storage_options=storage_options,
zarr_version=zarr_version,
use_zarr_fill_value_as_mask=use_zarr_fill_value_as_mask,
zarr_format=zarr_format,
)
return cls(
zarr_group,
mode,
consolidate_on_close,
append_dim,
write_region,
safe_chunks,
write_empty,
close_store_on_close,
use_zarr_fill_value_as_mask,
align_chunks=align_chunks,
cache_members=cache_members,
)
def __init__(
self,
zarr_group,
mode=None,
consolidate_on_close=False,
append_dim=None,
write_region=None,
safe_chunks=True,
write_empty: bool | None = None,
close_store_on_close: bool = False,
use_zarr_fill_value_as_mask=None,
align_chunks: bool = False,
cache_members: bool = True,
):
if align_chunks:
# Disabled the safe_chunks validations if the alignment is going to be applied
safe_chunks = False
self.zarr_group = zarr_group
self._read_only = self.zarr_group.read_only
self._synchronizer = self.zarr_group.synchronizer
self._group = self.zarr_group.path
self._mode = mode
self._consolidate_on_close = consolidate_on_close
self._append_dim = append_dim
self._write_region = write_region
self._align_chunks = align_chunks
self._safe_chunks = safe_chunks
self._write_empty = write_empty
self._close_store_on_close = close_store_on_close
self._use_zarr_fill_value_as_mask = use_zarr_fill_value_as_mask
self._cache_members: bool = cache_members
self._members: dict[str, ZarrArray | ZarrGroup] = {}
if self._cache_members:
# initialize the cache
# this cache is created here and never updated.
# If the `ZarrStore` instance creates a new zarr array, or if an external process
# removes an existing zarr array, then the cache will be invalid.
# We use this cache only to record any pre-existing arrays when the group was opened
# create a new ZarrStore instance if you want to
# capture the current state of the zarr group, or create a ZarrStore with
# `cache_members` set to `False` to disable this cache and instead fetch members
# on demand.
self._members = self._fetch_members()
def get_child_store(self, group: str) -> Self:
zarr_group = self.zarr_group.require_group(group)
return type(self)(
zarr_group=zarr_group,
mode=self._mode,
consolidate_on_close=self._consolidate_on_close,
append_dim=self._append_dim,
write_region=self._write_region,
safe_chunks=self._safe_chunks,
write_empty=self._write_empty,
close_store_on_close=self._close_store_on_close,
use_zarr_fill_value_as_mask=self._use_zarr_fill_value_as_mask,
align_chunks=self._align_chunks,
cache_members=self._cache_members,
)
@property
def members(self) -> dict[str, ZarrArray | ZarrGroup]:
"""
Model the arrays and groups contained in self.zarr_group as a dict. If `self._cache_members`
is true, the dict is cached. Otherwise, it is retrieved from storage.
"""
if not self._cache_members:
return self._fetch_members()
else:
return self._members
def _fetch_members(self) -> dict[str, ZarrArray | ZarrGroup]:
"""
Get the arrays and groups defined in the zarr group modelled by this Store
"""
import zarr
if zarr.__version__ >= "3":
return dict(self.zarr_group.members())
else:
return dict(self.zarr_group.items())
def array_keys(self) -> tuple[str, ...]:
from zarr import Array as ZarrArray
return tuple(
key for (key, node) in self.members.items() if isinstance(node, ZarrArray)
)
def arrays(self) -> tuple[tuple[str, ZarrArray], ...]:
from zarr import Array as ZarrArray
return tuple(
(key, node)
for (key, node) in self.members.items()
if isinstance(node, ZarrArray)
)
@property
def ds(self):
# TODO: consider deprecating this in favor of zarr_group
return self.zarr_group
def open_store_variable(self, name):
zarr_array = self.members[name]
data = indexing.LazilyIndexedArray(ZarrArrayWrapper(zarr_array))
try_nczarr = self._mode == "r"
dimensions, attributes = _get_zarr_dims_and_attrs(
zarr_array, DIMENSION_KEY, try_nczarr
)
attributes = dict(attributes)
encoding = {
"chunks": zarr_array.chunks,
"preferred_chunks": dict(zip(dimensions, zarr_array.chunks, strict=True)),
}
if _zarr_v3():
encoding.update(
{
"compressors": zarr_array.compressors,
"filters": zarr_array.filters,
"shards": zarr_array.shards,
}
)
if self.zarr_group.metadata.zarr_format == 3:
encoding.update({"serializer": zarr_array.serializer})
else:
encoding.update(
{
"compressor": zarr_array.compressor,
"filters": zarr_array.filters,
}
)
if self._use_zarr_fill_value_as_mask:
# Setting this attribute triggers CF decoding for missing values
# by interpreting Zarr's fill_value to mean the same as netCDF's _FillValue
if zarr_array.fill_value is not None:
attributes["_FillValue"] = zarr_array.fill_value
elif "_FillValue" in attributes:
attributes["_FillValue"] = FillValueCoder.decode(
attributes["_FillValue"], zarr_array.dtype
)
return Variable(dimensions, data, attributes, encoding)
def get_variables(self):
return FrozenDict((k, self.open_store_variable(k)) for k in self.array_keys())
def get_attrs(self):
return {
k: v
for k, v in self.zarr_group.attrs.asdict().items()
if not k.lower().startswith("_nc")
}
def get_dimensions(self):
try_nczarr = self._mode == "r"
dimensions = {}
for _k, v in self.arrays():
dim_names, _ = _get_zarr_dims_and_attrs(v, DIMENSION_KEY, try_nczarr)
for d, s in zip(dim_names, v.shape, strict=True):
if d in dimensions and dimensions[d] != s:
raise ValueError(
f"found conflicting lengths for dimension {d} "
f"({s} != {dimensions[d]})"
)
dimensions[d] = s
return dimensions
def set_dimensions(self, variables, unlimited_dims=None):
if unlimited_dims is not None:
raise NotImplementedError(
"Zarr backend doesn't know how to handle unlimited dimensions"
)
def set_attributes(self, attributes):
_put_attrs(self.zarr_group, attributes)
def encode_variable(self, variable, name=None):
variable = encode_zarr_variable(variable, name=name)
return variable
def encode_attribute(self, a):
return encode_zarr_attr_value(a)
def store(
self,
variables,
attributes,
check_encoding_set=frozenset(),
writer=None,
unlimited_dims=None,
):
"""
Top level method for putting data on this store, this method:
- encodes variables/attributes
- sets dimensions
- sets variables
Parameters
----------
variables : dict-like
Dictionary of key/value (variable name / xr.Variable) pairs
attributes : dict-like
Dictionary of key/value (attribute name / attribute) pairs
check_encoding_set : list-like
List of variables that should be checked for invalid encoding
values
writer : ArrayWriter
unlimited_dims : list-like
List of dimension names that should be treated as unlimited
dimensions.
dimension on which the zarray will be appended
only needed in append mode
"""
if TYPE_CHECKING:
import zarr
else:
zarr = attempt_import("zarr")
if self._mode == "w":
# always overwrite, so we don't care about existing names,
# and consistency of encoding
new_variable_names = set(variables)
existing_keys = {}
existing_variable_names = {}
else:
existing_keys = self.array_keys()
existing_variable_names = {
vn for vn in variables if _encode_variable_name(vn) in existing_keys
}
new_variable_names = set(variables) - existing_variable_names
if self._mode == "r+" and (
new_names := [k for k in variables if k not in existing_keys]
):
raise ValueError(
f"dataset contains non-pre-existing variables {new_names!r}, "
"which is not allowed in ``xarray.Dataset.to_zarr()`` with "
"``mode='r+'``. To allow writing new variables, set ``mode='a'``."
)
if self._append_dim is not None and self._append_dim not in existing_keys:
# For dimensions without coordinate values, we must parse
# the _ARRAY_DIMENSIONS attribute on *all* arrays to check if it
# is a valid existing dimension name.
# TODO: This `get_dimensions` method also does shape checking
# which isn't strictly necessary for our check.
existing_dims = self.get_dimensions()
if self._append_dim not in existing_dims:
raise ValueError(
f"append_dim={self._append_dim!r} does not match any existing "
f"dataset dimensions {existing_dims}"
)
variables_encoded, attributes = self.encode(
{vn: variables[vn] for vn in new_variable_names}, attributes
)
if existing_variable_names:
# We make sure that values to be appended are encoded *exactly*
# as the current values in the store.
# To do so, we decode variables directly to access the proper encoding,
# without going via xarray.Dataset to avoid needing to load
# index variables into memory.
existing_vars, _, _ = conventions.decode_cf_variables(
variables={
k: self.open_store_variable(name=k) for k in existing_variable_names
},
# attributes = {} since we don't care about parsing the global
# "coordinates" attribute
attributes={},
)
# Modified variables must use the same encoding as the store.
vars_with_encoding = {}
for vn in existing_variable_names:
_validate_datatypes_for_zarr_append(
vn, existing_vars[vn], variables[vn]
)
vars_with_encoding[vn] = variables[vn].copy(deep=False)
vars_with_encoding[vn].encoding = existing_vars[vn].encoding
vars_with_encoding, _ = self.encode(vars_with_encoding, {})
variables_encoded.update(vars_with_encoding)
for var_name in existing_variable_names:
variables_encoded[var_name] = _validate_and_transpose_existing_dims(
var_name,
variables_encoded[var_name],
existing_vars[var_name],
self._write_region,
self._append_dim,
)
if self._mode not in ["r", "r+"]:
self.set_attributes(attributes)
self.set_dimensions(variables_encoded, unlimited_dims=unlimited_dims)
# if we are appending to an append_dim, only write either
# - new variables not already present, OR
# - variables with the append_dim in their dimensions
# We do NOT overwrite other variables.
if self._mode == "a-" and self._append_dim is not None:
variables_to_set = {
k: v
for k, v in variables_encoded.items()
if (k not in existing_variable_names) or (self._append_dim in v.dims)
}
else:
variables_to_set = variables_encoded
self.set_variables(
variables_to_set, check_encoding_set, writer, unlimited_dims=unlimited_dims
)
if self._consolidate_on_close:
kwargs = {}
if _zarr_v3():
kwargs["zarr_format"] = self.zarr_group.metadata.zarr_format
zarr.consolidate_metadata(self.zarr_group.store, **kwargs)
def _open_existing_array(self, *, name) -> ZarrArray:
import zarr
from zarr import Array as ZarrArray
# TODO: if mode="a", consider overriding the existing variable
# metadata. This would need some case work properly with region
# and append_dim.
if self._write_empty is not None:
# Write to zarr_group.chunk_store instead of zarr_group.store
# See https://github.com/pydata/xarray/pull/8326#discussion_r1365311316 for a longer explanation
# The open_consolidated() enforces a mode of r or r+
# (and to_zarr with region provided enforces a read mode of r+),
# and this function makes sure the resulting Group has a store of type ConsolidatedMetadataStore
# and a 'normal Store subtype for chunk_store.
# The exact type depends on if a local path was used, or a URL of some sort,
# but the point is that it's not a read-only ConsolidatedMetadataStore.
# It is safe to write chunk data to the chunk_store because no metadata would be changed by
# to_zarr with the region parameter:
# - Because the write mode is enforced to be r+, no new variables can be added to the store
# (this is also checked and enforced in xarray.backends.api.py::to_zarr()).
# - Existing variables already have their attrs included in the consolidated metadata file.
# - The size of dimensions can not be expanded, that would require a call using `append_dim`
# which is mutually exclusive with `region`
empty: dict[str, bool] | dict[str, dict[str, bool]]
if _zarr_v3():
empty = dict(config={"write_empty_chunks": self._write_empty})
else:
empty = dict(write_empty_chunks=self._write_empty)
zarr_array = zarr.open(
store=(
self.zarr_group.store if _zarr_v3() else self.zarr_group.chunk_store
),
# TODO: see if zarr should normalize these strings.
path="/".join([self.zarr_group.name.rstrip("/"), name]).lstrip("/"),
**empty,
)
else:
zarr_array = self.zarr_group[name]
return cast(ZarrArray, zarr_array)
def _create_new_array(
self, *, name, shape, dtype, fill_value, encoding, attrs
) -> ZarrArray:
if coding.strings.check_vlen_dtype(dtype) is str:
dtype = str
if self._write_empty is not None:
if (
"write_empty_chunks" in encoding
and encoding["write_empty_chunks"] != self._write_empty
):
raise ValueError(
'Differing "write_empty_chunks" values in encoding and parameters'
f'Got {encoding["write_empty_chunks"] = } and {self._write_empty = }'
)
else:
encoding["write_empty_chunks"] = self._write_empty
if _zarr_v3():
# zarr v3 deprecated origin and write_empty_chunks
# instead preferring to pass them via the config argument
encoding["config"] = {}
for c in ("write_empty_chunks", "order"):
if c in encoding:
encoding["config"][c] = encoding.pop(c)
zarr_array = self.zarr_group.create(
name,
shape=shape,
dtype=dtype,
fill_value=fill_value,
**encoding,
)
zarr_array = _put_attrs(zarr_array, attrs)
return zarr_array
def set_variables(
self,
variables: dict[str, Variable],
check_encoding_set,
writer,
unlimited_dims=None,
):
"""
This provides a centralized method to set the variables on the data
store.
Parameters
----------
variables : dict-like
Dictionary of key/value (variable name / xr.Variable) pairs
check_encoding_set : list-like
List of variables that should be checked for invalid encoding
values
writer
unlimited_dims : list-like
List of dimension names that should be treated as unlimited
dimensions.
"""
existing_keys = self.array_keys()
is_zarr_v3_format = _zarr_v3() and self.zarr_group.metadata.zarr_format == 3
for vn, v in variables.items():
name = _encode_variable_name(vn)
attrs = v.attrs.copy()
dims = v.dims
dtype = v.dtype
shape = v.shape
if self._use_zarr_fill_value_as_mask:
fill_value = attrs.pop("_FillValue", None)
else:
fill_value = v.encoding.pop("fill_value", None)
if fill_value is None and v.dtype.kind == "f":
# For floating point data, Xarray defaults to a fill_value
# of NaN (unlike Zarr, which uses zero):
# https://github.com/pydata/xarray/issues/10646
fill_value = np.nan
if "_FillValue" in attrs:
# replace with encoded fill value
fv = attrs.pop("_FillValue")
if fv is not None:
attrs["_FillValue"] = FillValueCoder.encode(fv, dtype)
# _FillValue is never a valid encoding for Zarr
# TODO: refactor this logic so we don't need to check this here
if "_FillValue" in v.encoding:
if v.encoding.get("_FillValue") is not None:
raise ValueError("Zarr does not support _FillValue in encoding.")
else:
del v.encoding["_FillValue"]
zarr_shape = None
write_region = self._write_region if self._write_region is not None else {}
write_region = {dim: write_region.get(dim, slice(None)) for dim in dims}
if self._mode != "w" and name in existing_keys:
# existing variable
zarr_array = self._open_existing_array(name=name)
if self._append_dim is not None and self._append_dim in dims:
# resize existing variable
append_axis = dims.index(self._append_dim)
assert write_region[self._append_dim] == slice(None)
write_region[self._append_dim] = slice(
zarr_array.shape[append_axis], None
)
new_shape = (
zarr_array.shape[:append_axis]
+ (zarr_array.shape[append_axis] + v.shape[append_axis],)
+ zarr_array.shape[append_axis + 1 :]
)
zarr_array.resize(new_shape)
zarr_shape = zarr_array.shape
region = tuple(write_region[dim] for dim in dims)
# We need to do this for both new and existing variables to ensure we're not
# writing to a partial chunk, even though we don't use the `encoding` value
# when writing to an existing variable. See
# https://github.com/pydata/xarray/issues/8371 for details.
# Note: Ideally there should be two functions, one for validating the chunks and
# another one for extracting the encoding.
encoding = extract_zarr_variable_encoding(
v,
raise_on_invalid=vn in check_encoding_set,
name=vn,
zarr_format=3 if is_zarr_v3_format else 2,
)
if self._align_chunks and isinstance(encoding["chunks"], tuple):
v = grid_rechunk(
v=v,
enc_chunks=encoding["chunks"],
region=region,
)
if self._safe_chunks and isinstance(encoding["chunks"], tuple):
# the hard case
# DESIGN CHOICE: do not allow multiple dask chunks on a single zarr chunk
# this avoids the need to get involved in zarr synchronization / locking
# From zarr docs:
# "If each worker in a parallel computation is writing to a
# separate region of the array, and if region boundaries are perfectly aligned
# with chunk boundaries, then no synchronization is required."
# TODO: incorporate synchronizer to allow writes from multiple dask
# threads
shape = zarr_shape or v.shape
validate_grid_chunks_alignment(
nd_v_chunks=v.chunks,
enc_chunks=encoding["chunks"],
region=region,
allow_partial_chunks=self._mode != "r+",
name=name,
backend_shape=shape,
)
if self._mode == "w" or name not in existing_keys:
# new variable
encoded_attrs = {k: self.encode_attribute(v) for k, v in attrs.items()}
# the magic for storing the hidden dimension data
if is_zarr_v3_format:
encoding["dimension_names"] = dims
else:
encoded_attrs[DIMENSION_KEY] = dims
encoding["overwrite"] = self._mode == "w"
zarr_array = self._create_new_array(
name=name,
dtype=dtype,
shape=shape,
fill_value=fill_value,
encoding=encoding,
attrs=encoded_attrs,
)
writer.add(v.data, zarr_array, region)
def sync(self) -> None:
pass
def close(self) -> None:
if self._close_store_on_close:
self.zarr_group.store.close()
def _auto_detect_regions(self, ds, region):
for dim, val in region.items():
if val != "auto":
continue
if dim not in ds._variables:
# unindexed dimension
region[dim] = slice(0, ds.sizes[dim])
continue
variable = conventions.decode_cf_variable(
dim, self.open_store_variable(dim).compute()
)
assert variable.dims == (dim,)
index = pd.Index(variable.data)
idxs = index.get_indexer(ds[dim].data)
if (idxs == -1).any():
raise KeyError(
f"Not all values of coordinate '{dim}' in the new array were"
" found in the original store. Writing to a zarr region slice"
" requires that no dimensions or metadata are changed by the write."
)
if (np.diff(idxs) != 1).any():
raise ValueError(
f"The auto-detected region of coordinate '{dim}' for writing new data"
" to the original store had non-contiguous indices. Writing to a zarr"
" region slice requires that the new data constitute a contiguous subset"
" of the original store."
)
region[dim] = slice(idxs[0], idxs[-1] + 1)
return region
def _validate_and_autodetect_region(self, ds: Dataset) -> Dataset:
if self._write_region is None:
return ds
region = self._write_region
if region == "auto":
region = dict.fromkeys(ds.dims, "auto")
if not isinstance(region, dict):
raise TypeError(f"``region`` must be a dict, got {type(region)}")
if any(v == "auto" for v in region.values()):
if self._mode not in ["r+", "a"]:
raise ValueError(
f"``mode`` must be 'r+' or 'a' when using ``region='auto'``, got {self._mode!r}"
)
region = self._auto_detect_regions(ds, region)
# validate before attempting to auto-detect since the auto-detection
# should always return a valid slice.
for k, v in region.items():
if k not in ds.dims:
raise ValueError(
f"all keys in ``region`` are not in Dataset dimensions, got "
f"{list(region)} and {list(ds.dims)}"
)
if not isinstance(v, slice):
raise TypeError(
"all values in ``region`` must be slice objects, got "
f"region={region}"
)
if v.step not in {1, None}:
raise ValueError(
"step on all slices in ``region`` must be 1 or None, got "
f"region={region}"
)
non_matching_vars = [
k for k, v in ds.variables.items() if not set(region).intersection(v.dims)
]
if region and non_matching_vars:
raise ValueError(
f"when setting `region` explicitly in to_zarr(), all "
f"variables in the dataset to write must have at least "
f"one dimension in common with the region's dimensions "
f"{list(region.keys())}, but that is not "
f"the case for some variables here. To drop these variables "
f"from this dataset before exporting to zarr, write: "
f".drop_vars({non_matching_vars!r})"
)
if self._append_dim is not None and self._append_dim in region:
raise ValueError(
f"cannot list the same dimension in both ``append_dim`` and "
f"``region`` with to_zarr(), got {self._append_dim} in both"
)
self._write_region = region
# can't modify indexes with region writes
return ds.drop_vars(ds.indexes)
def _validate_encoding(self, encoding) -> None:
if encoding and self._mode in ["a", "a-", "r+"]:
existing_var_names = self.array_keys()
for var_name in existing_var_names:
if var_name in encoding:
raise ValueError(
f"variable {var_name!r} already exists, but encoding was provided"
)
def open_zarr(
store,
group=None,
synchronizer=None,
chunks="auto",
decode_cf=True,
mask_and_scale=True,
decode_times=True,
concat_characters=True,
decode_coords=True,
drop_variables=None,
consolidated=None,
overwrite_encoded_chunks=False,
chunk_store=None,
storage_options=None,
decode_timedelta=None,
use_cftime=None,
zarr_version=None,
zarr_format=None,
use_zarr_fill_value_as_mask=None,
chunked_array_type: str | None = None,
from_array_kwargs: dict[str, Any] | None = None,
create_default_indexes=True,
**kwargs,
):
"""Load and decode a dataset from a Zarr store.
The `store` object should be a valid store for a Zarr group. `store`
variables must contain dimension metadata encoded in the
`_ARRAY_DIMENSIONS` attribute or must have NCZarr format.
Parameters
----------
store : MutableMapping or str
A MutableMapping where a Zarr Group has been stored or a path to a
directory in file system where a Zarr DirectoryStore has been stored.
synchronizer : object, optional
Array synchronizer provided to zarr
group : str, optional
Group path. (a.k.a. `path` in zarr terminology.)
chunks : int, dict, 'auto' or None, default: 'auto'
If provided, used to load the data into dask arrays.
- ``chunks='auto'`` will use dask ``auto`` chunking taking into account the
engine preferred chunks.
- ``chunks=None`` skips using dask. This uses xarray's internally private
:ref:`lazy indexing classes <internal design.lazy indexing>`,
but data is eagerly loaded into memory as numpy arrays when accessed.
This can be more efficient for smaller arrays, though results may vary.
- ``chunks=-1`` loads the data with dask using a single chunk for all arrays.
- ``chunks={}`` loads the data with dask using engine preferred chunks if
exposed by the backend, otherwise with a single chunk for all arrays.
See dask chunking for more details.
overwrite_encoded_chunks : bool, optional
Whether to drop the zarr chunks encoded for each variable when a
dataset is loaded with specified chunk sizes (default: False)
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
drop_variables : str or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
consolidated : bool, optional
Whether to open the store using zarr's consolidated metadata
capability. Only works for stores that have already been consolidated.
By default (`consolidate=None`), attempts to read consolidated metadata,
falling back to read non-consolidated metadata if that fails.
When the experimental ``zarr_version=3``, ``consolidated`` must be
either be ``None`` or ``False``.
chunk_store : MutableMapping, optional
A separate Zarr store only for chunk data.
storage_options : dict, optional
Any additional parameters for the storage backend (ignored for local
paths).
decode_timedelta : bool, optional
If True, decode variables and coordinates with time units in
{'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds'}
into timedelta objects. If False, leave them encoded as numbers.
If None (default), assume the same value of decode_time.
use_cftime : bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. "gregorian", "proleptic_gregorian", "standard", or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
zarr_version : int or None, optional
.. deprecated:: 2024.9.1
Use ``zarr_format`` instead.
zarr_format : int or None, optional
The desired zarr format to target (currently 2 or 3). The default
of None will attempt to determine the zarr version from ``store`` when
possible, otherwise defaulting to the default version used by
the zarr-python library installed.
use_zarr_fill_value_as_mask : bool, optional
If True, use the zarr Array ``fill_value`` to mask the data, the same as done
for NetCDF data with ``_FillValue`` or ``missing_value`` attributes. If False,
the ``fill_value`` is ignored and the data are not masked. If None, this defaults
to True for ``zarr_version=2`` and False for ``zarr_version=3``.
chunked_array_type: str, optional
Which chunked array type to coerce this datasets' arrays to.
Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEntryPoint` system.
Experimental API that should not be relied upon.
from_array_kwargs: dict, optional
Additional keyword arguments passed on to the ``ChunkManagerEntrypoint.from_array`` method used to create
chunked arrays, via whichever chunk manager is specified through the ``chunked_array_type`` kwarg.
Defaults to ``{'manager': 'dask'}``, meaning additional kwargs will be passed eventually to
:py:func:`dask.array.from_array`. Experimental API that should not be relied upon.
create_default_indexes : bool, default: True
If True, create pandas indexes for :term:`dimension coordinates <dimension coordinate>`,
which loads the coordinate data into memory. Set it to False if you want to avoid loading
data into memory.
Note that backends can still choose to create other indexes. If you want to control that,
please refer to the backend's documentation.
Returns
-------
dataset : Dataset
The newly created dataset.
See Also
--------
open_dataset
open_mfdataset
References
----------
https://zarr.readthedocs.io/
"""
from xarray.backends.api import open_dataset
if from_array_kwargs is None:
from_array_kwargs = {}
if chunks == "auto":
try:
guess_chunkmanager(
chunked_array_type
) # attempt to import that parallel backend
chunks = {}
except (ValueError, ImportError):
chunks = None
if kwargs:
raise TypeError(
"open_zarr() got unexpected keyword arguments " + ",".join(kwargs.keys())
)
backend_kwargs = {
"synchronizer": synchronizer,
"consolidated": consolidated,
"overwrite_encoded_chunks": overwrite_encoded_chunks,
"chunk_store": chunk_store,
"storage_options": storage_options,
"zarr_version": zarr_version,
"zarr_format": zarr_format,
}
ds = open_dataset(
filename_or_obj=store,
group=group,
decode_cf=decode_cf,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
engine="zarr",
chunks=chunks,
drop_variables=drop_variables,
create_default_indexes=create_default_indexes,
chunked_array_type=chunked_array_type,
from_array_kwargs=from_array_kwargs,
backend_kwargs=backend_kwargs,
decode_timedelta=decode_timedelta,
use_cftime=use_cftime,
zarr_version=zarr_version,
use_zarr_fill_value_as_mask=use_zarr_fill_value_as_mask,
)
return ds
|
ZarrStore
|
python
|
apache__airflow
|
airflow-ctl/src/airflowctl/api/datamodels/generated.py
|
{
"start": 28855,
"end": 29224
}
|
class ____(BaseModel):
"""
Variable serializer for bodies.
"""
model_config = ConfigDict(
extra="forbid",
)
key: Annotated[str, Field(max_length=250, title="Key")]
value: JsonValue
description: Annotated[str | None, Field(title="Description")] = None
team_id: Annotated[UUID | None, Field(title="Team Id")] = None
|
VariableBody
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/callbacks.py
|
{
"start": 34963,
"end": 40633
}
|
class ____(Callback):
"""Callback that prints metrics to stdout.
Args:
count_mode: One of `"steps"` or `"samples"`.
Whether the progress bar should
count samples seen or steps (batches) seen.
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over an epoch.
Metrics in this list will be logged as-is.
All others will be averaged over time (e.g. loss, etc).
If not provided, defaults to the `Model`'s metrics.
Raises:
ValueError: In case of invalid `count_mode`.
"""
def __init__(self, count_mode='samples', stateful_metrics=None):
super(ProgbarLogger, self).__init__()
self._supports_tf_logs = True
if count_mode == 'samples':
self.use_steps = False
elif count_mode == 'steps':
self.use_steps = True
else:
raise ValueError('Unknown `count_mode`: ' + str(count_mode))
# Defaults to all Model's metrics except for loss.
self.stateful_metrics = set(stateful_metrics) if stateful_metrics else set()
self.seen = 0
self.progbar = None
self.target = None
self.verbose = 1
self.epochs = 1
self._train_step, self._test_step, self._predict_step = None, None, None
self._call_batch_hooks = True
self._called_in_fit = False
def set_params(self, params):
self.verbose = params['verbose']
self.epochs = params['epochs']
if self.use_steps and 'steps' in params:
self.target = params['steps']
elif not self.use_steps and 'samples' in params:
self.target = params['samples']
else:
self.target = None # Will be inferred at the end of the first epoch.
self._call_batch_hooks = self.verbose == 1
if self.target is None:
try:
self._train_step = self.model._train_counter # pylint: disable=protected-access
self._test_step = self.model._test_counter # pylint: disable=protected-access
self._predict_step = self.model._predict_counter # pylint: disable=protected-access
except AttributeError:
self._call_batch_hooks = True
def on_train_begin(self, logs=None):
# When this logger is called inside `fit`, validation is silent.
self._called_in_fit = True
def on_test_begin(self, logs=None):
if not self._called_in_fit:
self._reset_progbar()
self._maybe_init_progbar()
def on_predict_begin(self, logs=None):
self._reset_progbar()
self._maybe_init_progbar()
def on_epoch_begin(self, epoch, logs=None):
self._reset_progbar()
self._maybe_init_progbar()
if self.verbose and self.epochs > 1:
print('Epoch %d/%d' % (epoch + 1, self.epochs))
def on_train_batch_end(self, batch, logs=None):
self._batch_update_progbar(batch, logs)
def on_test_batch_end(self, batch, logs=None):
if not self._called_in_fit:
self._batch_update_progbar(batch, logs)
def on_predict_batch_end(self, batch, logs=None):
# Don't pass prediction results.
self._batch_update_progbar(batch, None)
def on_epoch_end(self, epoch, logs=None):
self._finalize_progbar(logs, self._train_step)
def on_test_end(self, logs=None):
if not self._called_in_fit:
self._finalize_progbar(logs, self._test_step)
def on_predict_end(self, logs=None):
self._finalize_progbar(logs, self._predict_step)
def _reset_progbar(self):
self.seen = 0
self.progbar = None
def _maybe_init_progbar(self):
"""Instantiate a `Progbar` if not yet, and update the stateful metrics."""
# TODO(rchao): Legacy TF1 code path may use list for
# `self.stateful_metrics`. Remove "cast to set" when TF1 support is dropped.
self.stateful_metrics = set(self.stateful_metrics)
if self.model:
# Update the existing stateful metrics as `self.model.metrics` may contain
# updated metrics after `MetricsContainer` is built in the first train
# step.
self.stateful_metrics = self.stateful_metrics.union(
set(m.name for m in self.model.metrics))
if self.progbar is None:
self.progbar = Progbar(
target=self.target,
verbose=self.verbose,
stateful_metrics=self.stateful_metrics,
unit_name='step' if self.use_steps else 'sample')
self.progbar._update_stateful_metrics(self.stateful_metrics) # pylint: disable=protected-access
def _implements_train_batch_hooks(self):
return self._call_batch_hooks
def _implements_test_batch_hooks(self):
return self._call_batch_hooks
def _implements_predict_batch_hooks(self):
return self._call_batch_hooks
def _batch_update_progbar(self, batch, logs=None):
"""Updates the progbar."""
logs = logs or {}
self._maybe_init_progbar()
if self.use_steps:
self.seen = batch + 1 # One-indexed.
else:
# v1 path only.
logs = copy.copy(logs)
batch_size = logs.pop('size', 0)
num_steps = logs.pop('num_steps', 1)
logs.pop('batch', None)
add_seen = num_steps * batch_size
self.seen += add_seen
if self.verbose == 1:
# Only block async when verbose = 1.
logs = tf_utils.sync_to_numpy_or_python_type(logs)
self.progbar.update(self.seen, list(logs.items()), finalize=False)
def _finalize_progbar(self, logs, counter):
logs = tf_utils.sync_to_numpy_or_python_type(logs or {})
if self.target is None:
if counter is not None:
counter = counter.numpy()
if not self.use_steps:
counter *= logs.get('size', 1)
self.target = counter or self.seen
self.progbar.target = self.target
self.progbar.update(self.target, list(logs.items()), finalize=True)
|
ProgbarLogger
|
python
|
getsentry__sentry
|
src/sentry/api/fields/serializedfile.py
|
{
"start": 425,
"end": 1539
}
|
class ____(serializers.Field):
def __init__(
self,
max_size=settings.SENTRY_MAX_SERIALIZED_FILE_SIZE,
**kwargs,
):
super().__init__(**kwargs)
self.max_size = max_size
def to_representation(self, value):
if not value:
return ""
if not isinstance(value, FileUpload):
raise ValueError
return [value.name, b64encode(value.content.getvalue()).decode("utf-8")]
def to_internal_value(self, data):
if not data:
return None
# data should be an array of [filename, b64 data]
try:
filename, filecontent = data
except (ValueError, TypeError):
raise serializers.ValidationError("Invalid file format.")
try:
decodedcontent = b64decode(filecontent)
except Exception:
raise serializers.ValidationError("Unable to read file content.")
if self.max_size and len(data) > self.max_size:
raise FileTooLarge()
return FileUpload(name=filename, content=BytesIO(decodedcontent))
|
SerializedFileField
|
python
|
Unity-Technologies__ml-agents
|
ml-agents-envs/mlagents_envs/side_channel/raw_bytes_channel.py
|
{
"start": 123,
"end": 1301
}
|
class ____(SideChannel):
"""
This is an example of what the SideChannel for raw bytes exchange would
look like. Is meant to be used for general research purpose.
"""
def __init__(self, channel_id: uuid.UUID):
self._received_messages: List[bytes] = []
super().__init__(channel_id)
def on_message_received(self, msg: IncomingMessage) -> None:
"""
Is called by the environment to the side channel. Can be called
multiple times per step if multiple messages are meant for that
SideChannel.
"""
self._received_messages.append(msg.get_raw_bytes())
def get_and_clear_received_messages(self) -> List[bytes]:
"""
returns a list of bytearray received from the environment.
"""
result = list(self._received_messages)
self._received_messages = []
return result
def send_raw_data(self, data: bytearray) -> None:
"""
Queues a message to be sent by the environment at the next call to
step.
"""
msg = OutgoingMessage()
msg.set_raw_bytes(data)
super().queue_message_to_send(msg)
|
RawBytesChannel
|
python
|
rapidsai__cudf
|
python/cudf_polars/cudf_polars/dsl/expressions/aggregation.py
|
{
"start": 682,
"end": 10124
}
|
class ____(Expr):
__slots__ = ("context", "name", "op", "options", "request")
_non_child = ("dtype", "name", "options", "context")
def __init__(
self,
dtype: DataType,
name: str,
options: Any,
context: ExecutionContext,
*children: Expr,
) -> None:
self.dtype = dtype
self.name = name
self.options = options
self.is_pointwise = False
self.children = children
self.context = context
if name not in Agg._SUPPORTED:
raise NotImplementedError(
f"Unsupported aggregation {name=}"
) # pragma: no cover; all valid aggs are supported
# TODO: nan handling in groupby case
if name == "min":
req = plc.aggregation.min()
elif name == "max":
req = plc.aggregation.max()
elif name == "median":
req = plc.aggregation.median()
elif name == "n_unique":
# TODO: datatype of result
req = plc.aggregation.nunique(null_handling=plc.types.NullPolicy.INCLUDE)
elif name == "first" or name == "last":
req = None
elif name == "mean":
req = plc.aggregation.mean()
elif name == "sum":
req = plc.aggregation.sum()
elif name == "std":
# TODO: handle nans
req = plc.aggregation.std(ddof=options)
elif name == "var":
# TODO: handle nans
req = plc.aggregation.variance(ddof=options)
elif name == "count":
req = plc.aggregation.count(
null_handling=plc.types.NullPolicy.EXCLUDE
if not options
else plc.types.NullPolicy.INCLUDE
)
elif name == "quantile":
child, quantile = self.children
if not isinstance(quantile, Literal):
raise NotImplementedError("Only support literal quantile values")
if options == "equiprobable":
raise NotImplementedError("Quantile with equiprobable interpolation")
if plc.traits.is_duration(child.dtype.plc_type):
raise NotImplementedError("Quantile with duration data type")
req = plc.aggregation.quantile(
quantiles=[quantile.value], interp=Agg.interp_mapping[options]
)
else:
raise NotImplementedError(
f"Unreachable, {name=} is incorrectly listed in _SUPPORTED"
) # pragma: no cover
if (
context == ExecutionContext.FRAME
and req is not None
and not plc.aggregation.is_valid_aggregation(dtype.plc_type, req)
):
# TODO: Check which cases polars raises vs returns all-NULL column.
# For the all-NULL column cases, we could build it using Column.all_null_like
# at evaluation time.
raise NotImplementedError(f"Invalid aggregation {req} with dtype {dtype}")
self.request = req
op = getattr(self, f"_{name}", None)
if op is None:
assert req is not None # Ensure req is not None for _reduce
op = partial(self._reduce, request=req)
elif name in {"min", "max"}:
op = partial(op, propagate_nans=options)
elif name == "count":
op = partial(op, include_nulls=options)
elif name in {"sum", "first", "last"}:
pass
else:
raise NotImplementedError(
f"Unreachable, supported agg {name=} has no implementation"
) # pragma: no cover
self.op = op
_SUPPORTED: ClassVar[frozenset[str]] = frozenset(
[
"min",
"max",
"median",
"n_unique",
"first",
"last",
"mean",
"sum",
"count",
"std",
"var",
"quantile",
]
)
interp_mapping: ClassVar[dict[str, plc.types.Interpolation]] = {
"nearest": plc.types.Interpolation.NEAREST,
"higher": plc.types.Interpolation.HIGHER,
"lower": plc.types.Interpolation.LOWER,
"midpoint": plc.types.Interpolation.MIDPOINT,
"linear": plc.types.Interpolation.LINEAR,
}
@property
def agg_request(self) -> plc.aggregation.Aggregation: # noqa: D102
if self.name == "first":
return plc.aggregation.nth_element(
0, null_handling=plc.types.NullPolicy.INCLUDE
)
elif self.name == "last":
return plc.aggregation.nth_element(
-1, null_handling=plc.types.NullPolicy.INCLUDE
)
else:
assert self.request is not None, "Init should have raised"
return self.request
def _reduce(
self, column: Column, *, request: plc.aggregation.Aggregation, stream: Stream
) -> Column:
if (
# For sum, this condition can only pass
# after expression decomposition in the streaming
# engine
self.name in {"sum", "mean", "median"}
and plc.traits.is_fixed_point(column.dtype.plc_type)
and self.dtype.plc_type.id() in {plc.TypeId.FLOAT32, plc.TypeId.FLOAT64}
):
column = column.astype(self.dtype, stream=stream)
return Column(
plc.Column.from_scalar(
plc.reduce.reduce(
column.obj, request, self.dtype.plc_type, stream=stream
),
1,
stream=stream,
),
name=column.name,
dtype=self.dtype,
)
def _count(self, column: Column, *, include_nulls: bool, stream: Stream) -> Column:
null_count = column.null_count if not include_nulls else 0
return Column(
plc.Column.from_scalar(
plc.Scalar.from_py(
column.size - null_count, self.dtype.plc_type, stream=stream
),
1,
stream=stream,
),
name=column.name,
dtype=self.dtype,
)
def _sum(self, column: Column, stream: Stream) -> Column:
if column.size == 0 or column.null_count == column.size:
dtype = self.dtype.plc_type
return Column(
plc.Column.from_scalar(
plc.Scalar.from_py(
Decimal(0).scaleb(dtype.scale())
if plc.traits.is_fixed_point(dtype)
else 0,
dtype,
stream=stream,
),
1,
stream=stream,
),
name=column.name,
dtype=self.dtype,
)
return self._reduce(column, request=plc.aggregation.sum(), stream=stream)
def _min(self, column: Column, *, propagate_nans: bool, stream: Stream) -> Column:
nan_count = column.nan_count(stream=stream)
if propagate_nans and nan_count > 0:
return Column(
plc.Column.from_scalar(
plc.Scalar.from_py(
float("nan"), self.dtype.plc_type, stream=stream
),
1,
stream=stream,
),
name=column.name,
dtype=self.dtype,
)
if nan_count > 0:
column = column.mask_nans(stream=stream)
return self._reduce(column, request=plc.aggregation.min(), stream=stream)
def _max(self, column: Column, *, propagate_nans: bool, stream: Stream) -> Column:
nan_count = column.nan_count(stream=stream)
if propagate_nans and nan_count > 0:
return Column(
plc.Column.from_scalar(
plc.Scalar.from_py(
float("nan"), self.dtype.plc_type, stream=stream
),
1,
stream=stream,
),
name=column.name,
dtype=self.dtype,
)
if nan_count > 0:
column = column.mask_nans(stream=stream)
return self._reduce(column, request=plc.aggregation.max(), stream=stream)
def _first(self, column: Column, stream: Stream) -> Column:
return Column(
plc.copying.slice(column.obj, [0, 1], stream=stream)[0],
name=column.name,
dtype=self.dtype,
)
def _last(self, column: Column, stream: Stream) -> Column:
n = column.size
return Column(
plc.copying.slice(column.obj, [n - 1, n], stream=stream)[0],
name=column.name,
dtype=self.dtype,
)
def do_evaluate(
self, df: DataFrame, *, context: ExecutionContext = ExecutionContext.FRAME
) -> Column:
"""Evaluate this expression given a dataframe for context."""
if context is not ExecutionContext.FRAME:
raise NotImplementedError(
f"Agg in context {context}"
) # pragma: no cover; unreachable
# Aggregations like quantiles may have additional children that were
# preprocessed into pylibcudf requests.
child = self.children[0]
return self.op(child.evaluate(df, context=context), stream=df.stream)
|
Agg
|
python
|
gevent__gevent
|
src/gevent/tests/test__refcount.py
|
{
"start": 3457,
"end": 3984
}
|
class ____(object):
server_data = None
def __init__(self, server_port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_port = server_port
def close(self):
self.socket.close()
self.socket = None
def make_request(self):
try:
self.socket.connect((params.DEFAULT_CONNECT, self.server_port))
self.socket.send(b'hello')
self.server_data = self.socket.recv(100)
finally:
self.close()
|
Client
|
python
|
apache__airflow
|
providers/microsoft/azure/src/airflow/providers/microsoft/azure/triggers/powerbi.py
|
{
"start": 1322,
"end": 2811
}
|
class ____(BaseTrigger):
"""
Base class for all PowerBI related triggers.
:param conn_id: The connection Id to connect to PowerBI.
:param timeout: The HTTP timeout being used by the `KiotaRequestAdapter` (default is None).
When no timeout is specified or set to None then there is no HTTP timeout on each request.
:param proxies: A dict defining the HTTP proxies to be used (default is None).
:param api_version: The API version of the Microsoft Graph API to be used (default is v1).
You can pass an enum named APIVersion which has 2 possible members v1 and beta,
or you can pass a string as `v1.0` or `beta`.
"""
def __init__(
self,
conn_id: str,
timeout: float = 60 * 60 * 24 * 7,
proxies: dict | None = None,
api_version: APIVersion | str | None = None,
):
super().__init__()
self.conn_id = conn_id
self.timeout = timeout
self.proxies = proxies
self.api_version = api_version
def get_conn(self) -> RequestAdapter:
"""
Initiate a new RequestAdapter connection.
.. warning::
This method is deprecated.
"""
return self.hook.get_conn()
@cached_property
def hook(self) -> PowerBIHook:
return PowerBIHook(
conn_id=self.conn_id,
timeout=self.timeout,
proxies=self.proxies,
api_version=self.api_version,
)
|
BasePowerBITrigger
|
python
|
huggingface__transformers
|
src/transformers/models/vit_mae/modeling_vit_mae.py
|
{
"start": 17755,
"end": 18256
}
|
class ____(nn.Module):
def __init__(self, config: ViTMAEConfig):
super().__init__()
self.attention = ViTMAESelfAttention(config)
self.output = ViTMAESelfOutput(config)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
self_attn_output, _ = self.attention(hidden_states)
output = self.output(self_attn_output, hidden_states)
return output
# Copied from transformers.models.vit.modeling_vit.ViTIntermediate ViT->ViTMAE
|
ViTMAEAttention
|
python
|
wandb__wandb
|
wandb/vendor/watchdog_0_9_0/wandb_watchdog/tricks/__init__.py
|
{
"start": 858,
"end": 1358
}
|
class ____(PatternMatchingEventHandler):
"""Your tricks should subclass this class."""
@classmethod
def generate_yaml(cls):
context = dict(module_name=cls.__module__,
klass_name=cls.__name__)
template_yaml = """- %(module_name)s.%(klass_name)s:
args:
- argument1
- argument2
kwargs:
patterns:
- "*.py"
- "*.js"
ignore_patterns:
- "version.py"
ignore_directories: false
"""
return template_yaml % context
|
Trick
|
python
|
pytorch__pytorch
|
test/distributed/tensor/test_xla_integration.py
|
{
"start": 1206,
"end": 6636
}
|
class ____(TestCase):
class SimpleLinear(nn.Module):
def __init__(self) -> None:
super(DTensorXLAIntegrationTest.SimpleLinear, self).__init__()
self.fc1 = nn.Linear(128, 64)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(64, 1)
def forward(self, x):
y = self.relu(self.fc1(x))
z = self.fc2(y)
return z
@with_xla
def test_xla_distribute_tensor_1d_shard(self):
import torch_xla.runtime as xr # type:ignore[import]
device_count = xr.global_runtime_device_count()
if device_count > 1:
device_mesh = DeviceMesh("xla", list(range(device_count)))
shard_spec = [Shard(0)]
for requires_grad in [True, False]:
tensor_to_shard = torch.randn(
3 * device_count, 3, requires_grad=requires_grad
)
dist_tensor = distribute_tensor(
tensor_to_shard, device_mesh, shard_spec
)
# TODO(yeounoh) switch to DTensor API when XLAShardedTensor inherits DTensor
assert type(dist_tensor).__name__ == "XLAShardedTensor"
global_tensor = dist_tensor.global_tensor # type:ignore[attr-defined]
self.assertEqual(
global_tensor.size(), torch.Size([3 * device_count, 3])
)
local_tensor = dist_tensor.local_shards[0].data
self.assertEqual(local_tensor.size(), torch.Size([3, 3]))
if requires_grad:
self.assertTrue(dist_tensor.global_tensor.requires_grad)
self.assertTrue(dist_tensor.is_leaf)
@with_xla
def test_xla_distribute_tensor_1d_replicate(self):
import torch_xla.runtime as xr # type:ignore[import]
device_count = xr.global_runtime_device_count()
device_mesh = DeviceMesh("xla", list(range(device_count)))
shard_spec = [Replicate()]
for requires_grad in [True, False]:
tensor_to_shard = torch.randn(
3 * device_count, 3, requires_grad=requires_grad
)
dist_tensor = distribute_tensor(tensor_to_shard, device_mesh, shard_spec)
# TODO(yeounoh) switch to DTensor API when XLAShardedTensor inherits DTensor
assert type(dist_tensor).__name__ == "XLAShardedTensor"
global_tensor = dist_tensor.global_tensor # type:ignore[attr-defined]
self.assertEqual(global_tensor.size(), torch.Size([3 * device_count, 3]))
local_tensor = dist_tensor.local_shards[0].data
self.assertEqual(local_tensor.size(), torch.Size([3 * device_count, 3]))
if requires_grad:
self.assertTrue(dist_tensor.global_tensor.requires_grad)
self.assertTrue(dist_tensor.is_leaf)
@with_xla
def test_xla_distribute_tensor_2d(self):
import torch_xla.runtime as xr # type:ignore[import]
device_count = xr.global_runtime_device_count()
if device_count > 1:
device_mesh = DeviceMesh(
"xla", np.array(range(device_count)).reshape(2, device_count // 2)
)
shard_spec = [Replicate(), Shard(0)]
for requires_grad in [True, False]:
tensor_to_shard = torch.randn(
3 * device_count // 2, 3, requires_grad=requires_grad
)
dist_tensor = distribute_tensor(
tensor_to_shard, device_mesh, shard_spec
)
# TODO(yeounoh) switch to DTensor API when XLAShardedTensor inherits DTensor
assert type(dist_tensor).__name__ == "XLAShardedTensor"
global_tensor = dist_tensor.global_tensor # type:ignore[attr-defined]
self.assertEqual(
global_tensor.size(), torch.Size([3 * device_count // 2, 3])
)
local_tensor = dist_tensor.local_shards[0].data
self.assertEqual(local_tensor.size(), torch.Size([3, 3]))
if requires_grad:
self.assertTrue(dist_tensor.global_tensor.requires_grad)
self.assertTrue(dist_tensor.is_leaf)
@with_xla
def text_xla_distribute_module(self):
import torch_xla # type:ignore[import]
import torch_xla.core.xla_model as xm # type:ignore[import]
import torch_xla.runtime as xr # type:ignore[import]
model = self.SimpleLinear().to(xm.xla_device())
device_count = xr.global_runtime_device_count()
device_mesh = DeviceMesh("xla", list(range(device_count)))
def shard_params(mod_name, mod, mesh):
shard_spec = [Shard(0)]
# annotate fc1 and fc2
if isinstance(mod, nn.Linear):
for _, param in mod.named_parameters():
# annotate the parameter tensors directly
distribute_tensor(param, mesh, shard_spec)
sharded_model = distribute_module(model, device_mesh, shard_params)
self.assertTrue(
torch_xla._XLAC._get_xla_sharding_spec(sharded_model.fc1.weight) != ""
)
self.assertTrue(
torch_xla._XLAC._get_xla_sharding_spec(sharded_model.fc2.weight) != ""
)
if __name__ == "__main__":
run_tests()
|
DTensorXLAIntegrationTest
|
python
|
neetcode-gh__leetcode
|
python/0081-search-in-rotated-sorted-array-ii.py
|
{
"start": 0,
"end": 758
}
|
class ____:
def search(self, nums: List[int], target: int) -> bool:
left,right = 0,len(nums) - 1
while left <= right:
mid = left + (right - left) // 2
if nums[mid] == target:
return True
#Left sorted portion
if nums[left] < nums[mid]:
if nums[left] <= target < nums[mid]:
right = mid - 1
else:
left = mid + 1
#Right sorted portion
elif nums[left] > nums[mid]:
if nums[mid] < target <= nums[right]:
left = mid + 1
else:
right = mid - 1
else:
left += 1
return False
|
Solution
|
python
|
walkccc__LeetCode
|
solutions/1564. Put Boxes Into the Warehouse I/1564.py
|
{
"start": 0,
"end": 400
}
|
class ____:
def maxBoxesInWarehouse(self, boxes: list[int], warehouse: list[int]) -> int:
realWarehouse = [warehouse[0]]
for i in range(1, len(warehouse)):
realWarehouse.append(min(realWarehouse[-1], warehouse[i]))
boxes.sort()
i = 0 # boxes' index
for height in reversed(realWarehouse):
if i < len(boxes) and boxes[i] <= height:
i += 1
return i
|
Solution
|
python
|
numpy__numpy
|
tools/swig/test/testVector.py
|
{
"start": 10898,
"end": 11163
}
|
class ____(VectorTestCase):
def __init__(self, methodName="runTest"):
VectorTestCase.__init__(self, methodName)
self.typeStr = "uchar"
self.typeCode = "B"
######################################################################
|
ucharTestCase
|
python
|
getsentry__sentry
|
tests/sentry/api/endpoints/test_project_repo_path_parsing.py
|
{
"start": 5183,
"end": 14792
}
|
class ____(BaseStacktraceLinkTest):
def setUp(self) -> None:
super().setUp()
with assume_test_silo_mode(SiloMode.CONTROL):
self.integration, self.oi = self.create_provider_integration_for(
self.org,
self.user,
provider="github",
name="getsentry",
external_id="1234",
metadata={"domain_name": "github.com/getsentry"},
)
self.repo = self.create_repo(
project=self.project,
name="getsentry/sentry",
provider="integrations:github",
integration_id=self.integration.id,
url="https://github.com/getsentry/sentry",
)
self.create_repo(
project=self.project,
name="getsentry/getsentry",
provider="integrations:github",
integration_id=self.integration.id,
url="https://github.com/getsentry/getsentry",
)
def test_bad_source_url(self) -> None:
source_url = "github.com/getsentry/sentry/blob/master/src/sentry/api/endpoints/project_stacktrace_link.py"
stack_path = "sentry/api/endpoints/project_stacktrace_link.py"
resp = self.make_post(source_url, stack_path)
assert resp.status_code == 400, resp.content
assert resp.data == {"sourceUrl": ["Enter a valid URL."]}
def test_wrong_file(self) -> None:
source_url = "https://github.com/getsentry/sentry/blob/master/src/sentry/api/endpoints/project_releases.py"
stack_path = "sentry/api/endpoints/project_stacktrace_link.py"
resp = self.make_post(source_url, stack_path)
assert resp.status_code == 400, resp.content
assert resp.data == {
"sourceUrl": ["Source code URL points to a different file than the stack trace"]
}
def test_no_integration(self) -> None:
# create the integration but don't install it
with assume_test_silo_mode(SiloMode.CONTROL):
self.create_provider_integration(
provider="github",
name="steve",
external_id="345",
metadata={"domain_name": "github.com/steve"},
)
source_url = "https://github.com/steve/sentry/blob/master/src/sentry/api/endpoints/project_stacktrace_link.py"
stack_path = "sentry/api/endpoints/project_stacktrace_link.py"
resp = self.make_post(source_url, stack_path)
assert resp.status_code == 400, resp.content
assert resp.data == {"sourceUrl": ["Could not find integration"]}
def test_no_repo(self) -> None:
source_url = "https://github.com/getsentry/snuba/blob/master/src/sentry/api/endpoints/project_stacktrace_link.py"
stack_path = "sentry/api/endpoints/project_stacktrace_link.py"
resp = self.make_post(source_url, stack_path)
assert resp.status_code == 400, resp.content
assert resp.data == {"sourceUrl": ["Could not find repo"]}
def test_unsupported_frame_info(self) -> None:
source_url = (
"https://github.com/getsentry/sentry/blob/master/src/project_stacktrace_link.py"
)
stack_path = "project_stacktrace_link.py"
resp = self.make_post(source_url, stack_path)
assert resp.status_code == 400, resp.content
assert resp.data == {"detail": "Unsupported frame info"}
def test_basic(self) -> None:
source_url = "https://github.com/getsentry/sentry/blob/master/src/sentry/api/endpoints/project_stacktrace_link.py"
stack_path = "sentry/api/endpoints/project_stacktrace_link.py"
resp = self.make_post(source_url, stack_path)
assert resp.status_code == 200, resp.content
assert resp.data == {
"integrationId": self.integration.id,
"repositoryId": self.repo.id,
"provider": "github",
"stackRoot": "sentry/",
"sourceRoot": "src/sentry/",
"defaultBranch": "master",
}
def test_java_path(self) -> None:
src_file = "src/com/example/foo/Bar.kt"
source_url = f"https://github.com/getsentry/sentry/blob/master/{src_file}"
filename = "Bar.kt" # The filename in Java does not contain the package name
resp = self.make_post(
source_url,
filename,
module="com.example.foo.Bar", # The module misses the extension
abs_path="Bar.kt", # abs_path includes the extension
platform="java",
)
assert resp.status_code == 200, resp.content
assert resp.data == {
"defaultBranch": "master",
"integrationId": self.integration.id,
"repositoryId": self.repo.id,
"provider": "github",
"sourceRoot": "src/com/example/foo/",
"stackRoot": "com/example/foo/",
}
def test_short_path(self) -> None:
source_url = "https://github.com/getsentry/sentry/blob/main/project_stacktrace_link.py"
stack_path = "sentry/project_stacktrace_link.py"
resp = self.make_post(source_url, stack_path)
assert resp.status_code == 200, resp.content
assert resp.data == {
"integrationId": self.integration.id,
"repositoryId": self.repo.id,
"provider": "github",
"stackRoot": "sentry/",
"sourceRoot": "",
"defaultBranch": "main",
}
def test_long_root(self) -> None:
source_url = "https://github.com/getsentry/sentry/blob/master/src/sentry/api/endpoints/project_stacktrace_link.py"
stack_path = "stuff/hey/here/sentry/api/endpoints/project_stacktrace_link.py"
resp = self.make_post(source_url, stack_path)
assert resp.status_code == 200, resp.content
assert resp.data == {
"integrationId": self.integration.id,
"repositoryId": self.repo.id,
"provider": "github",
"stackRoot": "stuff/hey/here/",
"sourceRoot": "src/",
"defaultBranch": "master",
}
def test_member_can_access(self) -> None:
source_url = "https://github.com/getsentry/sentry/blob/master/src/sentry/api/endpoints/project_stacktrace_link.py"
stack_path = "stuff/hey/here/sentry/api/endpoints/project_stacktrace_link.py"
member = self.create_user("hernando@life.com")
self.create_member(user=member, organization=self.org, role="member")
resp = self.make_post(source_url, stack_path, user=member)
assert resp.status_code == 200, resp.content
def test_backslash_short_path(self) -> None:
source_url = "https://github.com/getsentry/sentry/blob/main/project_stacktrace_link.py"
stack_path = "C:\\sentry\\project_stacktrace_link.py"
resp = self.make_post(source_url, stack_path)
assert resp.status_code == 200, resp.content
assert resp.data == {
"integrationId": self.integration.id,
"repositoryId": self.repo.id,
"provider": "github",
"stackRoot": "C:\\sentry\\",
"sourceRoot": "",
"defaultBranch": "main",
}
def test_backslash_long_path(self) -> None:
source_url = "https://github.com/getsentry/sentry/blob/main/src/sentry/api/endpoints/project_stacktrace_link.py"
stack_path = "C:\\potatos\\and\\prs\\sentry\\api\\endpoints\\project_stacktrace_link.py"
resp = self.make_post(source_url, stack_path)
assert resp.status_code == 200, resp.content
assert resp.data == {
"integrationId": self.integration.id,
"repositoryId": self.repo.id,
"provider": "github",
"stackRoot": "C:\\potatos\\and\\prs\\",
"sourceRoot": "src/",
"defaultBranch": "main",
}
def test_trailing_slash_repo_url_short_path(self) -> None:
# Ensure branch parsing is correct when repo.url has a trailing slash
self.repo.update(url=f"{self.repo.url}/")
source_url = "https://github.com/getsentry/sentry/blob/main/project_stacktrace_link.py"
stack_path = "sentry/project_stacktrace_link.py"
resp = self.make_post(source_url, stack_path)
assert resp.status_code == 200, resp.content
assert resp.data == {
"integrationId": self.integration.id,
"repositoryId": self.repo.id,
"provider": "github",
"stackRoot": "sentry/",
"sourceRoot": "",
"defaultBranch": "main",
}
def test_second_repo_trailing_slash_default_branch_main(self) -> None:
# Ensure defaultBranch is parsed as 'main' for another repo with trailing slash in repo.url
second_repo = self.create_repo(
project=self.project,
name="getsentry/example",
provider="integrations:github",
integration_id=self.integration.id,
url="https://github.com/getsentry/example/",
)
source_url = "https://github.com/getsentry/example/blob/main/src/pkg/main.py"
stack_path = "/opt/app/src/pkg/main.py"
resp = self.make_post(
source_url,
stack_path,
module="pkg.main",
abs_path="/opt/app/src/pkg/main.py",
platform="python",
)
assert resp.status_code == 200, resp.content
assert resp.data["provider"] == "github"
assert resp.data["repositoryId"] == second_repo.id
assert resp.data["defaultBranch"] == "main"
|
ProjectStacktraceLinkGithubTest
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/triggers/dms.py
|
{
"start": 4225,
"end": 5719
}
|
class ____(AwsBaseWaiterTrigger):
"""
Trigger when an AWS DMS Serverless replication completes.
:param replication_config_arn: The ARN of the replication config.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
"""
def __init__(
self,
replication_config_arn: str,
waiter_delay: int = 30,
waiter_max_attempts: int = 60,
aws_conn_id: str | None = "aws_default",
) -> None:
super().__init__(
serialized_fields={"replication_config_arn": replication_config_arn},
waiter_name="replication_complete",
waiter_delay=waiter_delay,
waiter_args={"Filters": [{"Name": "replication-config-arn", "Values": [replication_config_arn]}]},
waiter_max_attempts=waiter_max_attempts,
failure_message="Replication failed to complete.",
status_message="Status replication is",
status_queries=["Replications[0].Status"],
return_key="replication_config_arn",
return_value=replication_config_arn,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return DmsHook(
self.aws_conn_id,
verify=self.verify,
config=self.botocore_config,
)
|
DmsReplicationCompleteTrigger
|
python
|
streamlit__streamlit
|
lib/streamlit/runtime/media_file_storage.py
|
{
"start": 1303,
"end": 4375
}
|
class ____(Protocol):
@abstractmethod
def load_and_get_id(
self,
path_or_data: str | bytes,
mimetype: str,
kind: MediaFileKind,
filename: str | None = None,
) -> str:
"""Load the given file path or bytes into the manager and return
an ID that uniquely identifies it.
It's an error to pass a URL to this function. (Media stored at
external URLs can be served directly to the Streamlit frontend;
there's no need to store this data in MediaFileStorage.)
Parameters
----------
path_or_data
A path to a file, or the file's raw data as bytes.
mimetype
The media's mimetype. Used to set the Content-Type header when
serving the media over HTTP.
kind
The kind of file this is: either MEDIA, or DOWNLOADABLE.
filename : str or None
Optional filename. Used to set the filename in the response header.
Returns
-------
str
The unique ID of the media file.
Raises
------
MediaFileStorageError
Raised if the media can't be loaded (for example, if a file
path is invalid).
"""
raise NotImplementedError
@abstractmethod
def get_url(self, file_id: str) -> str:
"""Return a URL for a file in the manager.
Parameters
----------
file_id
The file's ID, returned from load_media_and_get_id().
Returns
-------
str
A URL that the frontend can load the file from. Because this
URL may expire, it should not be cached!
Raises
------
MediaFileStorageError
Raised if the manager doesn't contain an object with the given ID.
"""
raise NotImplementedError
@abstractmethod
def delete_file(self, file_id: str) -> None:
"""Delete a file from the manager.
This should be called when a given file is no longer referenced
by any connected client, so that the MediaFileStorage can free its
resources.
Calling `delete_file` on a file_id that doesn't exist is allowed,
and is a no-op. (This means that multiple `delete_file` calls with
the same file_id is not an error.)
Note: implementations can choose to ignore `delete_file` calls -
this function is a *suggestion*, not a *command*. Callers should
not rely on file deletion happening immediately (or at all).
Parameters
----------
file_id
The file's ID, returned from load_media_and_get_id().
Returns
-------
None
Raises
------
MediaFileStorageError
Raised if file deletion fails for any reason. Note that these
failures will generally not be shown on the frontend (file
deletion usually occurs on session disconnect).
"""
raise NotImplementedError
|
MediaFileStorage
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/classes/config.py
|
{
"start": 93305,
"end": 96909
}
|
class ____:
"""Use this factory class to generate the correct `xxxConfig` object for use when using the `collection.update()` method.
Each staticmethod provides options specific to the named configuration type in the function's name. Under-the-hood data validation steps
will ensure that any mis-specifications are caught before the request is sent to Weaviate. Only those configurations that are mutable are
available in this class. If you wish to update the configuration of an immutable aspect of your collection then you will have to delete
the collection and re-create it with the new configuration.
"""
NamedVectors = _NamedVectorsUpdate
Vectors = _VectorsUpdate
VectorIndex = _VectorIndexUpdate
Generative = _Generative # config is the same for create and update
Reranker = _Reranker # config is the same for create and update
@staticmethod
def inverted_index(
bm25_b: Optional[float] = None,
bm25_k1: Optional[float] = None,
cleanup_interval_seconds: Optional[int] = None,
stopwords_additions: Optional[List[str]] = None,
stopwords_preset: Optional[StopwordsPreset] = None,
stopwords_removals: Optional[List[str]] = None,
) -> _InvertedIndexConfigUpdate:
"""Create an `InvertedIndexConfigUpdate` object.
Use this method when defining the `inverted_index_config` argument in `collection.update()`.
Args:
See [the docs](https://weaviate.io/developers/weaviate/configuration/indexes#configure-the-inverted-index) for a more detailed view!
""" # noqa: D417 (missing argument descriptions in the docstring)
return _InvertedIndexConfigUpdate(
bm25=_BM25ConfigUpdate(b=bm25_b, k1=bm25_k1),
cleanupIntervalSeconds=cleanup_interval_seconds,
stopwords=_StopwordsUpdate(
preset=stopwords_preset,
additions=stopwords_additions,
removals=stopwords_removals,
),
)
@staticmethod
def replication(
factor: Optional[int] = None,
async_enabled: Optional[bool] = None,
deletion_strategy: Optional[ReplicationDeletionStrategy] = None,
) -> _ReplicationConfigUpdate:
"""Create a `ReplicationConfigUpdate` object.
Use this method when defining the `replication_config` argument in `collection.update()`.
Args:
factor: The replication factor.
async_enabled: Enable async replication.
deletion_strategy: How conflicts between different nodes about deleted objects are resolved.
"""
return _ReplicationConfigUpdate(
factor=factor,
asyncEnabled=async_enabled,
deletionStrategy=deletion_strategy,
)
@staticmethod
def multi_tenancy(
auto_tenant_creation: Optional[bool] = None,
auto_tenant_activation: Optional[bool] = None,
) -> _MultiTenancyConfigUpdate:
"""Create a `MultiTenancyConfigUpdate` object.
Use this method when defining the `multi_tenancy` argument in `collection.update()`.
Args:
auto_tenant_creation: When set, implicitly creates nonexistent tenants during object creation
auto_tenant_activation: Automatically turn tenants implicitly HOT when they are accessed. Defaults to `None`, which uses the server-defined default.
"""
return _MultiTenancyConfigUpdate(
autoTenantCreation=auto_tenant_creation,
autoTenantActivation=auto_tenant_activation,
)
|
Reconfigure
|
python
|
pypa__setuptools
|
setuptools/_distutils/compilers/C/errors.py
|
{
"start": 492,
"end": 573
}
|
class ____(Error):
"""Attempt to process an unknown file type."""
|
UnknownFileType
|
python
|
eventlet__eventlet
|
tests/patcher_test.py
|
{
"start": 612,
"end": 1718
}
|
class ____(tests.LimitedTestCase):
TEST_TIMEOUT = 3 # starting processes is time-consuming
def setUp(self):
super().setUp()
self._saved_syspath = sys.path
self.tempdir = tempfile.mkdtemp('_patcher_test')
def tearDown(self):
super().tearDown()
sys.path = self._saved_syspath
shutil.rmtree(self.tempdir)
def write_to_tempfile(self, name, contents):
filename = os.path.join(self.tempdir, name)
if not filename.endswith('.py'):
filename = filename + '.py'
with open(filename, "w") as fd:
fd.write(contents)
def launch_subprocess(self, filename):
path = os.path.join(self.tempdir, filename)
output = tests.run_python(path)
output = output.decode('utf-8')
separator = '\n'
lines = output.split(separator)
return output, lines
def run_script(self, contents, modname=None):
if modname is None:
modname = "testmod"
self.write_to_tempfile(modname, contents)
return self.launch_subprocess(modname)
|
ProcessBase
|
python
|
psf__black
|
tests/data/cases/remove_newline_after_code_block_open.py
|
{
"start": 363,
"end": 1981
}
|
class ____:
def bar(self):
print("The newline above me should be kept!")
for i in range(5):
print(f"{i}) The line above me should be kept!")
for i in range(5):
print(f"{i}) The lines above me should be kept!")
for i in range(5):
for j in range(7):
print(f"{i}) The lines above me should be kept!")
if random.randint(0, 3) == 0:
print("The new line above me will be kept!")
if random.randint(0, 3) == 0:
print("The new lines above me will be kept!")
if random.randint(0, 3) == 0:
if random.uniform(0, 1) > 0.5:
print("Two lines above me will be kept!")
while True:
print("The newline above me should be kept!")
while True:
print("The newlines above me should be kept!")
while True:
while False:
print("The newlines above me should be kept!")
with open("/path/to/file.txt", mode="w") as file:
file.write("The new line above me will be kept!")
with open("/path/to/file.txt", mode="w") as file:
file.write("The new lines above me will be kept!")
with open("/path/to/file.txt", mode="r") as read_file:
with open("/path/to/output_file.txt", mode="w") as write_file:
write_file.writelines(read_file.readlines())
# output
import random
def foo1():
print("The newline above me should be kept!")
def foo2():
print("All the newlines above me should be kept!")
def foo3():
print("No newline above me!")
print("There is a newline above me, and that's OK!")
def foo4():
# There is a comment here
print("The newline above me should not be deleted!")
|
Foo
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 519530,
"end": 519916
}
|
class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("field", "repository")
field = sgqlc.types.Field(
sgqlc.types.non_null("ProjectV2FieldConfiguration"), graphql_name="field"
)
repository = sgqlc.types.Field("Repository", graphql_name="repository")
|
ProjectV2ItemFieldRepositoryValue
|
python
|
huggingface__transformers
|
src/transformers/models/lxmert/modeling_lxmert.py
|
{
"start": 30801,
"end": 38064
}
|
class ____(LxmertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = LxmertEmbeddings(config)
self.encoder = LxmertEncoder(config)
self.pooler = LxmertPooler(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings.word_embeddings = new_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
visual_feats: Optional[torch.FloatTensor] = None,
visual_pos: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
visual_attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[LxmertModelOutput, tuple[torch.FloatTensor]]:
r"""
visual_feats (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):
This input represents visual features. They ROI pooled object features from bounding boxes using a
faster-RCNN model)
These are currently not provided by the transformers library.
visual_pos (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_pos_dim)`):
This input represents spatial features corresponding to their relative (via index) visual features. The
pre-trained LXMERT model expects these spatial features to be normalized bounding boxes on a scale of 0 to
1.
These are currently not provided by the transformers library.
visual_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if visual_feats is None:
raise ValueError("`visual_feats` cannot be `None`")
if visual_pos is None:
raise ValueError("`visual_pos` cannot be `None`")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and the dtype's smallest value for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min
# Process the visual attention mask
if visual_attention_mask is not None:
extended_visual_attention_mask = visual_attention_mask.unsqueeze(1).unsqueeze(2)
extended_visual_attention_mask = extended_visual_attention_mask.to(dtype=self.dtype)
extended_visual_attention_mask = (1.0 - extended_visual_attention_mask) * torch.finfo(self.dtype).min
else:
extended_visual_attention_mask = None
# Positional Word Embeddings
embedding_output = self.embeddings(input_ids, token_type_ids, inputs_embeds)
# Run Lxmert encoder
encoder_outputs = self.encoder(
embedding_output,
extended_attention_mask,
visual_feats=visual_feats,
visual_pos=visual_pos,
visual_attention_mask=extended_visual_attention_mask,
output_attentions=output_attentions,
)
visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2]
vision_hidden_states = visual_encoder_outputs[0]
language_hidden_states = lang_encoder_outputs[0]
all_attentions = ()
if output_attentions:
language_attentions = lang_encoder_outputs[1]
vision_attentions = visual_encoder_outputs[1]
cross_encoder_attentions = encoder_outputs[2]
all_attentions = (
language_attentions,
vision_attentions,
cross_encoder_attentions,
)
hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else ()
visual_output = vision_hidden_states[-1]
lang_output = language_hidden_states[-1]
pooled_output = self.pooler(lang_output)
if not return_dict:
return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions
return LxmertModelOutput(
pooled_output=pooled_output,
language_output=lang_output,
vision_output=visual_output,
language_hidden_states=language_hidden_states if output_hidden_states else None,
vision_hidden_states=vision_hidden_states if output_hidden_states else None,
language_attentions=language_attentions if output_attentions else None,
vision_attentions=vision_attentions if output_attentions else None,
cross_encoder_attentions=cross_encoder_attentions if output_attentions else None,
)
@auto_docstring
|
LxmertModel
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_expr.py
|
{
"start": 46129,
"end": 46284
}
|
class ____(Elemwise):
_projection_passthrough = True
_parameters = ["frame", "value"]
_defaults = {"value": None}
operation = M.fillna
|
Fillna
|
python
|
numpy__numpy
|
tools/swig/test/testSuperTensor.py
|
{
"start": 14892,
"end": 15217
}
|
class ____(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "ulongLong"
self.typeCode = "Q"
#self.result = int(self.result)
######################################################################
|
ulongLongTestCase
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/datafusion.py
|
{
"start": 25794,
"end": 29445
}
|
class ____(GoogleCloudBaseOperator):
"""
Lists Cloud Data Fusion pipelines.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionListPipelinesOperator`
:param instance_name: The name of the instance.
:param location: The Cloud Data Fusion location in which to handle the request.
:param artifact_version: Artifact version to filter instances
:param artifact_name: Artifact name to filter instances
:param namespace: If your pipeline belongs to a Basic edition instance, the namespace ID
is always default. If your pipeline belongs to an Enterprise edition instance, you
can create a namespace.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"instance_name",
"artifact_name",
"artifact_version",
"impersonation_chain",
)
operator_extra_links = (DataFusionPipelinesLink(),)
def __init__(
self,
*,
instance_name: str,
location: str,
artifact_name: str | None = None,
artifact_version: str | None = None,
namespace: str = "default",
project_id: str = PROVIDE_PROJECT_ID,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.artifact_version = artifact_version
self.artifact_name = artifact_name
self.namespace = namespace
self.instance_name = instance_name
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Listing Data Fusion pipelines")
instance = hook.get_instance(
instance_name=self.instance_name,
location=self.location,
project_id=self.project_id,
)
api_url = instance["apiEndpoint"]
service_endpoint = instance["serviceEndpoint"]
pipelines = hook.list_pipelines(
instance_url=api_url,
namespace=self.namespace,
artifact_version=self.artifact_version,
artifact_name=self.artifact_name,
)
self.log.info("Pipelines: %s", pipelines)
DataFusionPipelinesLink.persist(
context=context,
uri=service_endpoint,
namespace=self.namespace,
)
return pipelines
|
CloudDataFusionListPipelinesOperator
|
python
|
sympy__sympy
|
sympy/physics/quantum/pauli.py
|
{
"start": 4797,
"end": 6636
}
|
class ____(SigmaOpBase):
"""Pauli sigma z operator
Parameters
==========
name : str
An optional string that labels the operator. Pauli operators with
different names commute.
Examples
========
>>> from sympy.physics.quantum import represent
>>> from sympy.physics.quantum.pauli import SigmaZ
>>> sz = SigmaZ()
>>> sz ** 3
SigmaZ()
>>> represent(sz)
Matrix([
[1, 0],
[0, -1]])
"""
def __new__(cls, *args, **hints):
return SigmaOpBase.__new__(cls, *args)
def _eval_commutator_SigmaX(self, other, **hints):
if self.name != other.name:
return S.Zero
else:
return 2 * I * SigmaY(self.name)
def _eval_commutator_SigmaY(self, other, **hints):
if self.name != other.name:
return S.Zero
else:
return - 2 * I * SigmaX(self.name)
def _eval_anticommutator_SigmaX(self, other, **hints):
return S.Zero
def _eval_anticommutator_SigmaY(self, other, **hints):
return S.Zero
def _eval_adjoint(self):
return self
def _print_contents_latex(self, printer, *args):
if self.use_name:
return r'{\sigma_z^{(%s)}}' % str(self.name)
else:
return r'{\sigma_z}'
def _print_contents(self, printer, *args):
return 'SigmaZ()'
def _eval_power(self, e):
if e.is_Integer and e.is_positive:
return SigmaZ(self.name).__pow__(int(e) % 2)
def _represent_default_basis(self, **options):
format = options.get('format', 'sympy')
if format == 'sympy':
return Matrix([[1, 0], [0, -1]])
else:
raise NotImplementedError('Representation in format ' +
format + ' not implemented.')
|
SigmaZ
|
python
|
pennersr__django-allauth
|
tests/apps/socialaccount/providers/figma/tests.py
|
{
"start": 238,
"end": 741
}
|
class ____(OAuth2TestsMixin, TestCase):
provider_id = FigmaProvider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""
{
"id": "2600",
"email": "johndoe@example.com",
"handle": "John Doe",
"img_url": "https://www.example.com/image.png"
}
""",
)
def get_expected_to_str(self):
return "John Doe"
|
FigmaTests
|
python
|
cython__cython
|
Cython/Compiler/PyrexTypes.py
|
{
"start": 75479,
"end": 77772
}
|
class ____(CType):
#
# Base class for all C numeric types.
#
# rank integer Relative size
# signed integer 0 = unsigned, 1 = unspecified, 2 = explicitly signed
#
is_numeric = 1
default_value = "0"
has_attributes = True
scope = None
sign_words = ("unsigned ", "", "signed ")
def __init__(self, rank, signed = 1):
self.rank = rank
if rank > 0 and signed == SIGNED:
# Signed is meaningless for anything but char, and complicates
# type promotion.
signed = 1
self.signed = signed
def sign_and_name(self):
s = self.sign_words[self.signed]
n = rank_to_type_name[self.rank]
return s + n
def is_simple_buffer_dtype(self):
return True
def __repr__(self):
return "<CNumericType %s>" % self.sign_and_name()
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
type_name = self.sign_and_name()
if pyrex or for_display:
base_code = type_name.replace('PY_LONG_LONG', 'long long')
else:
base_code = public_decl(type_name, dll_linkage)
base_code = StringEncoding.EncodedString(base_code)
return self.base_declaration_code(base_code, entity_code)
def attributes_known(self):
if self.scope is None:
from . import Symtab
self.scope = scope = Symtab.CClassScope(
'',
None,
visibility="extern",
parent_type=self)
scope.directives = {}
scope.declare_cfunction(
"conjugate",
CFuncType(self, [CFuncTypeArg("self", self, None)], nogil=True),
pos=None,
defining=1,
cname=" ")
return True
def __lt__(self, other):
"""Sort based on rank, preferring signed over unsigned"""
if other.is_numeric:
return self.rank > other.rank and self.signed >= other.signed
# Prefer numeric types over others
return True
def py_type_name(self):
if self.rank <= 4:
return "int"
return "float"
|
CNumericType
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/runtime/caching/hashing_test.py
|
{
"start": 4410,
"end": 24623
}
|
class ____(unittest.TestCase):
def test_string(self):
assert get_hash("hello") == get_hash("hello")
assert get_hash("hello") != get_hash("hellö")
def test_int(self):
assert get_hash(145757624235) == get_hash(145757624235)
assert get_hash(10) != get_hash(11)
assert get_hash(-1) != get_hash(1)
assert get_hash(2**7) != get_hash(2**7 - 1)
assert get_hash(2**7) != get_hash(2**7 + 1)
def test_uuid(self):
uuid1 = uuid.uuid4()
uuid1_copy = uuid.UUID(uuid1.hex)
uuid2 = uuid.uuid4()
# Our hashing functionality should work with UUIDs
# regardless of UUID factory function.
uuid3 = uuid.uuid5(uuid.NAMESPACE_DNS, "streamlit.io")
uuid3_copy = uuid.UUID(uuid3.hex)
uuid4 = uuid.uuid5(uuid.NAMESPACE_DNS, "snowflake.com")
assert get_hash(uuid1) == get_hash(uuid1_copy)
assert id(uuid1) != id(uuid1_copy)
assert get_hash(uuid1) != get_hash(uuid2)
assert get_hash(uuid3) == get_hash(uuid3_copy)
assert id(uuid3) != id(uuid3_copy)
assert get_hash(uuid3) != get_hash(uuid4)
def test_datetime_naive(self):
naive_datetime1 = datetime.datetime(2007, 12, 23, 15, 45, 55)
naive_datetime1_copy = datetime.datetime(2007, 12, 23, 15, 45, 55)
naive_datetime3 = datetime.datetime(2011, 12, 21, 15, 45, 55)
assert get_hash(naive_datetime1) == get_hash(naive_datetime1_copy)
assert id(naive_datetime1) != id(naive_datetime1_copy)
assert get_hash(naive_datetime1) != get_hash(naive_datetime3)
def test_datetime_aware(self):
tz_info = datetime.timezone.utc
aware_datetime1 = datetime.datetime(2007, 12, 23, 15, 45, 55, tzinfo=tz_info)
aware_datetime1_copy = datetime.datetime(
2007, 12, 23, 15, 45, 55, tzinfo=tz_info
)
aware_datetime2 = datetime.datetime(2011, 12, 21, 15, 45, 55, tzinfo=tz_info)
# naive datetime1 is the same datetime that aware_datetime,
# but without timezone info. They should have different hashes.
naive_datetime1 = datetime.datetime(2007, 12, 23, 15, 45, 55)
assert get_hash(aware_datetime1) == get_hash(aware_datetime1_copy)
assert id(aware_datetime1) != id(aware_datetime1_copy)
assert get_hash(aware_datetime1) != get_hash(aware_datetime2)
assert get_hash(aware_datetime1) != get_hash(naive_datetime1)
@parameterized.expand(
[
"US/Pacific",
"America/Los_Angeles",
"Europe/Berlin",
"UTC",
None, # check for naive too
]
)
def test_pandas_timestamp(self, tz_info):
timestamp1 = pd.Timestamp("2017-01-01T12", tz=tz_info)
timestamp1_copy = pd.Timestamp("2017-01-01T12", tz=tz_info)
timestamp2 = pd.Timestamp("2019-01-01T12", tz=tz_info)
assert get_hash(timestamp1) == get_hash(timestamp1_copy)
assert id(timestamp1) != id(timestamp1_copy)
assert get_hash(timestamp1) != get_hash(timestamp2)
def test_mocks_do_not_result_in_infinite_recursion(self):
try:
get_hash(Mock())
get_hash(MagicMock())
except BaseException:
self.fail("get_hash raised an exception")
def test_list(self):
assert get_hash([1, 2]) == get_hash([1, 2])
assert get_hash([1, 2]) != get_hash([2, 2])
assert get_hash([1]) != get_hash(1)
# test that we can hash self-referencing lists
a = [1, 2, 3]
a.append(a)
b = [1, 2, 3]
b.append(b)
assert get_hash(a) == get_hash(b)
@parameterized.expand(
[("cache_data", cache_data), ("cache_resource", cache_resource)]
)
def test_recursive_hash_func(self, _, cache_decorator):
"""Test that if user defined hash_func returns the value of the same type
that hash_funcs tries to cache, we break the recursive cycle with predefined
placeholder"""
def hash_int(x):
return x
@cache_decorator(hash_funcs={int: hash_int})
def foo(x):
return x
assert foo(1) == foo(1)
# Note: We're able to break the recursive cycle caused by the identity
# hash func but it causes all cycles to hash to the same thing.
# https://github.com/streamlit/streamlit/issues/1659
def test_tuple(self):
assert get_hash((1, 2)) == get_hash((1, 2))
assert get_hash((1, 2)) != get_hash((2, 2))
assert get_hash((1,)) != get_hash(1)
assert get_hash((1,)) != get_hash([1])
def test_mappingproxy(self):
a = types.MappingProxyType({"a": 1})
b = types.MappingProxyType({"a": 1})
c = types.MappingProxyType({"c": 1})
assert get_hash(a) == get_hash(b)
assert get_hash(a) != get_hash(c)
def test_dict_items(self):
a = types.MappingProxyType({"a": 1}).items()
b = types.MappingProxyType({"a": 1}).items()
c = types.MappingProxyType({"c": 1}).items()
assert is_type(a, "builtins.dict_items")
assert get_hash(a) == get_hash(b)
assert get_hash(a) != get_hash(c)
def test_getset_descriptor(self):
class A:
x = 1
class B:
x = 1
a = A.__dict__["__dict__"]
b = B.__dict__["__dict__"]
assert is_type(a, "builtins.getset_descriptor")
assert get_hash(a) == get_hash(a)
assert get_hash(a) != get_hash(b)
def test_dict(self):
assert get_hash({1: 1}) == get_hash({1: 1})
assert get_hash({1: 1}) != get_hash({1: 2})
assert get_hash({1: 1}) != get_hash([(1, 1)])
dict_gen = {1: (x for x in range(1))}
with pytest.raises(UnhashableTypeError):
get_hash(dict_gen)
def test_self_reference_dict(self):
d1 = {"cat": "hat"}
d2 = {"things": [1, 2]}
assert get_hash(d1) == get_hash(d1)
assert get_hash(d1) != get_hash(d2)
# test that we can hash self-referencing dictionaries
d2 = {"book": d1}
assert get_hash(d2) != get_hash(d1)
def test_float(self):
assert get_hash(0.1) == get_hash(0.1)
assert get_hash(23.5234) != get_hash(23.5235)
def test_bool(self):
assert get_hash(True) == get_hash(True)
assert get_hash(True) != get_hash(False)
def test_none(self):
assert get_hash(None) == get_hash(None)
assert get_hash(None) != get_hash(False)
def test_builtins(self):
assert get_hash(abs) == get_hash(abs)
assert get_hash(abs) != get_hash(type)
def test_regex(self):
p2 = re.compile(".*")
p1 = re.compile(".*")
p3 = re.compile(".*", re.IGNORECASE)
assert get_hash(p1) == get_hash(p2)
assert get_hash(p1) != get_hash(p3)
def test_pandas_large_dataframe(self):
df1 = pd.DataFrame(np.zeros((_PANDAS_ROWS_LARGE, 4)), columns=list("ABCD"))
df2 = pd.DataFrame(np.ones((_PANDAS_ROWS_LARGE, 4)), columns=list("ABCD"))
df3 = pd.DataFrame(np.zeros((_PANDAS_ROWS_LARGE, 4)), columns=list("ABCD"))
assert get_hash(df1) == get_hash(df3)
assert get_hash(df1) != get_hash(df2)
@pytest.mark.usefixtures("benchmark")
def test_pandas_large_dataframe_performance(self):
df1 = pd.DataFrame(np.zeros((_PANDAS_ROWS_LARGE, 4)), columns=list("ABCD"))
self.benchmark(lambda: get_hash(df1))
@parameterized.expand(
[
(pd.DataFrame({"foo": [12]}), pd.DataFrame({"foo": [12]}), True),
(pd.DataFrame({"foo": [12]}), pd.DataFrame({"foo": [42]}), False),
(
pd.DataFrame(data={"A": [1, 2, 3], "B": [2, 3, 4]}),
pd.DataFrame(data={"A": [1, 2, 3], "B": [2, 3, 4]}),
True,
),
# Extra column
(
pd.DataFrame(data={"A": [1, 2, 3], "B": [2, 3, 4]}),
pd.DataFrame(data={"A": [1, 2, 3], "B": [2, 3, 4], "C": [1, 2, 3]}),
False,
),
# Different values
(
pd.DataFrame(data={"A": [1, 2, 3], "B": [2, 3, 4]}),
pd.DataFrame(data={"A": [1, 2, 3], "B": [2, 3, 5]}),
False,
),
# Different order
(
pd.DataFrame(data={"A": [1, 2, 3], "B": [2, 3, 4]}),
pd.DataFrame(data={"B": [1, 2, 3], "A": [2, 3, 4]}),
False,
),
# Different index
(
pd.DataFrame(data={"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3]),
pd.DataFrame(data={"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 4]),
False,
),
# Missing column
(
pd.DataFrame(data={"A": [1, 2, 3], "B": [2, 3, 4]}),
pd.DataFrame(data={"A": [1, 2, 3]}),
False,
),
# Different sort
(
pd.DataFrame(data={"A": [1, 2, 3], "B": [2, 3, 4]}).sort_values(
by=["A"]
),
pd.DataFrame(data={"A": [1, 2, 3], "B": [2, 3, 4]}).sort_values(
by=["B"], ascending=False
),
False,
),
# Different headers
(
pd.DataFrame(data={"A": [1, 2, 3], "C": [2, 3, 4]}),
pd.DataFrame(data={"A": [1, 2, 3], "B": [2, 3, 4]}),
False,
),
# Reordered columns
(
pd.DataFrame(data={"A": [1, 2, 3], "C": [2, 3, 4]}),
pd.DataFrame(data={"C": [2, 3, 4], "A": [1, 2, 3]}),
False,
),
# Slightly different dtypes
(
pd.DataFrame(
data={"A": [1, 2, 3], "C": pd.array([1, 2, 3], dtype="UInt64")}
),
pd.DataFrame(
data={"A": [1, 2, 3], "C": pd.array([1, 2, 3], dtype="Int64")}
),
False,
),
]
)
def test_pandas_dataframe(self, df1, df2, expected):
result = get_hash(df1) == get_hash(df2)
assert result == expected
def test_pandas_series(self):
series1 = pd.Series([1, 2])
series2 = pd.Series([1, 3])
series3 = pd.Series([1, 2])
assert get_hash(series1) == get_hash(series3)
assert get_hash(series1) != get_hash(series2)
series4 = pd.Series(range(_PANDAS_ROWS_LARGE))
series5 = pd.Series(range(_PANDAS_ROWS_LARGE))
assert get_hash(series4) == get_hash(series5)
@pytest.mark.require_integration
def test_polars_series(self):
import polars as pl # type: ignore[import-not-found]
series1 = pl.Series([1, 2])
series2 = pl.Series([1, 3])
series3 = pl.Series([1, 2])
assert get_hash(series1) == get_hash(series3)
assert get_hash(series1) != get_hash(series2)
series4 = pl.Series(range(_PANDAS_ROWS_LARGE))
series5 = pl.Series(range(_PANDAS_ROWS_LARGE))
assert get_hash(series4) == get_hash(series5)
@pytest.mark.require_integration
@parameterized.expand(prepare_polars_data(), skip_on_empty=True)
def test_polars_dataframe(self, df1, df2, expected):
result = get_hash(df1) == get_hash(df2)
assert result == expected
@pytest.mark.require_integration
def test_polars_large_dataframe(self):
import polars as pl
df1 = pl.DataFrame(np.zeros((_PANDAS_ROWS_LARGE, 4)), schema=list("abcd"))
df2 = pl.DataFrame(np.ones((_PANDAS_ROWS_LARGE, 4)), schema=list("abcd"))
df3 = pl.DataFrame(np.zeros((_PANDAS_ROWS_LARGE, 4)), schema=list("abcd"))
assert get_hash(df1) == get_hash(df3)
assert get_hash(df1) != get_hash(df2)
@pytest.mark.usefixtures("benchmark")
def test_polars_large_dataframe_performance(self):
# We put the try/except here to avoid the test failing if polars is not
# installed rather than marking it as `require_integration`,
# because it should participate in the benchmarking.
try:
import polars as pl
df1 = pl.DataFrame(np.zeros((_PANDAS_ROWS_LARGE, 4)), schema=list("abcd"))
self.benchmark(lambda: get_hash(df1))
except ImportError:
# Skip if polars is not installed.
pass
def test_pandas_series_similar_dtypes(self):
series1 = pd.Series([1, 2], dtype="UInt64")
series2 = pd.Series([1, 2], dtype="Int64")
assert get_hash(series1) != get_hash(series2)
def test_numpy(self):
np1 = np.zeros(10)
np2 = np.zeros(11)
np3 = np.zeros(10)
assert get_hash(np1) == get_hash(np3)
assert get_hash(np1) != get_hash(np2)
np4 = np.zeros(_NP_SIZE_LARGE)
np5 = np.zeros(_NP_SIZE_LARGE)
assert get_hash(np4) == get_hash(np5)
def test_numpy_similar_dtypes(self):
np1 = np.ones(10, dtype="u8")
np2 = np.ones(10, dtype="i8")
np3 = np.ones(10, dtype=[("a", "u8"), ("b", "i8")])
np4 = np.ones(10, dtype=[("a", "i8"), ("b", "u8")])
assert get_hash(np1) != get_hash(np2)
assert get_hash(np3) != get_hash(np4)
def test_PIL_image(self):
im1 = Image.new("RGB", (50, 50), (220, 20, 60))
im2 = Image.new("RGB", (50, 50), (30, 144, 255))
im3 = Image.new("RGB", (50, 50), (220, 20, 60))
assert get_hash(im1) == get_hash(im3)
assert get_hash(im1) != get_hash(im2)
# Check for big PIL images, they converted to numpy array with size
# bigger than _NP_SIZE_LARGE
# 1000 * 1000 * 3 = 3_000_000 > _NP_SIZE_LARGE = 1_000_000
im4 = Image.new("RGB", (1000, 1000), (100, 20, 60))
im5 = Image.new("RGB", (1000, 1000), (100, 20, 60))
im6 = Image.new("RGB", (1000, 1000), (101, 21, 61))
im4_np_array = np.frombuffer(im4.tobytes(), dtype="uint8")
assert im4_np_array.size > _NP_SIZE_LARGE
assert get_hash(im4) == get_hash(im5)
assert get_hash(im5) != get_hash(im6)
@pytest.mark.require_integration
def test_pydantic_model(self):
"""Test that Pydantic models are properly hashed.
Verifies that:
- The same model instance hashes consistently
- Two identical model instances produce the same hash
- Models with different field values produce different hashes
- Different model classes with the same field values produce different hashes
"""
import pydantic
class Foo(pydantic.BaseModel):
name: str
class Bar(pydantic.BaseModel):
name: str
m1 = Foo(name="fake_name1")
m1_again = Foo(name="fake_name1")
m2 = Foo(name="fake_name2")
m3 = Bar(name="fake_name1")
assert get_hash(m1) == get_hash(m1)
assert get_hash(m1) == get_hash(m1_again)
assert get_hash(m1) != get_hash(m2)
assert get_hash(m1) != get_hash(m3)
@parameterized.expand(
[
(BytesIO, b"123", b"456", b"123"),
(StringIO, "123", "456", "123"),
]
)
def test_io(self, io_type, io_data1, io_data2, io_data3):
io1 = io_type(io_data1)
io2 = io_type(io_data2)
io3 = io_type(io_data3)
assert get_hash(io1) == get_hash(io3)
assert get_hash(io1) != get_hash(io2)
# Changing the stream position should change the hash
io1.seek(1)
io3.seek(0)
assert get_hash(io1) != get_hash(io3)
def test_uploaded_file_io(self):
rec1 = UploadedFileRec("file1", "name", "type", b"123")
rec2 = UploadedFileRec("file1", "name", "type", b"456")
rec3 = UploadedFileRec("file1", "name", "type", b"123")
io1 = UploadedFile(
rec1, FileURLs(file_id=rec1.file_id, upload_url="u1", delete_url="d1")
)
io2 = UploadedFile(
rec2, FileURLs(file_id=rec2.file_id, upload_url="u2", delete_url="d2")
)
io3 = UploadedFile(
rec3, FileURLs(file_id=rec3.file_id, upload_url="u3", delete_url="u3")
)
assert get_hash(io1) == get_hash(io3)
assert get_hash(io1) != get_hash(io2)
# Changing the stream position should change the hash
io1.seek(1)
io3.seek(0)
assert get_hash(io1) != get_hash(io3)
def test_partial(self):
p1 = functools.partial(int, base=2)
p2 = functools.partial(int, base=3)
p3 = functools.partial(int, base=2)
assert get_hash(p1) == get_hash(p3)
assert get_hash(p1) != get_hash(p2)
def test_files(self):
temp1 = tempfile.NamedTemporaryFile()
temp2 = tempfile.NamedTemporaryFile()
with open(__file__) as f:
with open(__file__) as g:
assert get_hash(f) == get_hash(g)
assert get_hash(f) != get_hash(temp1)
assert get_hash(temp1) == get_hash(temp1)
assert get_hash(temp1) != get_hash(temp2)
def test_file_position(self):
with open(__file__) as f:
h1 = get_hash(f)
assert h1 == get_hash(f)
f.readline()
assert h1 != get_hash(f)
f.seek(0)
assert h1 == get_hash(f)
def test_magic_mock(self):
"""MagicMocks never hash to the same thing."""
# (This also tests that MagicMock can hash at all, without blowing the
# stack due to an infinite recursion.)
assert get_hash(MagicMock()) != get_hash(MagicMock())
def test_dataclass(self):
@dataclass(frozen=True, eq=True)
class Data:
foo: str
bar = Data("bar")
assert get_hash(bar)
def test_enum(self):
"""The hashing function returns the same result when called with the same
Enum members."""
class EnumClass(Enum):
ENUM_1 = auto()
ENUM_2 = auto()
# Hash values should be stable
assert get_hash(EnumClass.ENUM_1) == get_hash(EnumClass.ENUM_1)
# Different enum values should produce different hashes
assert get_hash(EnumClass.ENUM_1) != get_hash(EnumClass.ENUM_2)
def test_different_enums(self):
"""Different enum classes should have different hashes, even when the enum
values are the same."""
class EnumClassA(Enum):
ENUM_1 = "hello"
class EnumClassB(Enum):
ENUM_1 = "hello"
enum_a = EnumClassA.ENUM_1
enum_b = EnumClassB.ENUM_1
assert get_hash(enum_a) != get_hash(enum_b)
def test_reduce_not_hashable(self):
class A:
def __init__(self):
self.x = [1, 2, 3]
with pytest.raises(UnhashableTypeError):
get_hash(A().__reduce__())
def test_reduce_fallback(self):
"""Test that objects with __reduce__ method can be hashed using the fallback mechanism."""
class CustomClass:
def __init__(self, value):
self.value = value
def __reduce__(self):
return (CustomClass, (self.value,))
obj1 = CustomClass(42)
obj2 = CustomClass(42)
obj3 = CustomClass(43)
# Same objects should hash to the same value
assert get_hash(obj1) == get_hash(obj2)
# Different objects should hash to different values
assert get_hash(obj1) != get_hash(obj3)
# Test with a more complex object
class ComplexClass:
def __init__(self, name, items):
self.name = name
self.items = items
def __reduce__(self):
return (ComplexClass, (self.name, self.items))
complex_obj1 = ComplexClass("test", [1, 2, 3])
complex_obj2 = ComplexClass("test", [1, 2, 3])
complex_obj3 = ComplexClass("test", [1, 2, 4])
assert get_hash(complex_obj1) == get_hash(complex_obj2)
assert get_hash(complex_obj1) != get_hash(complex_obj3)
|
HashTest
|
python
|
ansible__ansible
|
test/lib/ansible_test/_internal/cli/parsers/key_value_parsers.py
|
{
"start": 6378,
"end": 7380
}
|
class ____(KeyValueParser):
"""Composite argument parser for Windows remote key/value pairs."""
def get_parsers(self, state: ParserState) -> dict[str, Parser]:
"""Return a dictionary of key names and value parsers."""
return dict(
provider=ChoicesParser(REMOTE_PROVIDERS),
arch=ChoicesParser(REMOTE_ARCHITECTURES),
connection=ChoicesParser(WINDOWS_CONNECTIONS),
)
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
section_name = 'remote options'
state.sections[f'target {section_name} (comma separated):'] = '\n'.join([
f' provider={ChoicesParser(REMOTE_PROVIDERS).document(state)}',
f' arch={ChoicesParser(REMOTE_ARCHITECTURES).document(state)}',
f' connection={ChoicesParser(WINDOWS_CONNECTIONS).document(state)}',
])
return f'{{{section_name}}}'
|
WindowsRemoteKeyValueParser
|
python
|
pyca__cryptography
|
src/cryptography/x509/extensions.py
|
{
"start": 54140,
"end": 54914
}
|
class ____(ExtensionType):
oid = CRLEntryExtensionOID.CRL_REASON
def __init__(self, reason: ReasonFlags) -> None:
if not isinstance(reason, ReasonFlags):
raise TypeError("reason must be an element from ReasonFlags")
self._reason = reason
def __repr__(self) -> str:
return f"<CRLReason(reason={self._reason})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, CRLReason):
return NotImplemented
return self.reason == other.reason
def __hash__(self) -> int:
return hash(self.reason)
@property
def reason(self) -> ReasonFlags:
return self._reason
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
|
CRLReason
|
python
|
openai__openai-python
|
src/openai/types/audio/transcription_create_params.py
|
{
"start": 5502,
"end": 6156
}
|
class ____(TranscriptionCreateParamsBase, total=False):
stream: Optional[Literal[False]]
"""
If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
for more information.
Note: Streaming is not supported for the `whisper-1` model and will be ignored.
"""
|
TranscriptionCreateParamsNonStreaming
|
python
|
pytorch__pytorch
|
torch/_inductor/ir.py
|
{
"start": 246971,
"end": 254474
}
|
class ____(ExternKernel):
def get_kernel_and_metadata(self) -> tuple[Kernel, Any, list[str], list[str]]:
from triton.runtime.autotuner import Autotuner
from torch._higher_order_ops.triton_kernel_wrap import kernel_side_table
kernel = kernel_side_table.get_kernel(self.kernel_idx)
configs = []
restore_value_args: list[str] = []
reset_to_zero_args: list[str] = []
if isinstance(kernel, Autotuner):
# https://github.com/triton-lang/triton/pull/5083
# changes kernel.restore_idx to kernel.restore_value
if hasattr(kernel, "restore_idx"):
restore_value_args.extend(
kernel.fn.arg_names[i] for i in kernel.restore_idx
)
else:
assert hasattr(kernel, "restore_value")
restore_value_args.extend(kernel.restore_value)
if hasattr(kernel, "reset_idx"):
for i in kernel.reset_idx:
reset_to_zero_args.append(kernel.fn.arg_names[i])
else:
assert hasattr(kernel, "reset_to_zero")
reset_to_zero_args.extend(kernel.reset_to_zero)
configs = kernel.configs
kernel = kernel.fn
# pyrefly: ignore # bad-return
return kernel, configs, restore_value_args, reset_to_zero_args
@override
def codegen(self, wrapper: PythonWrapperCodegen) -> None:
"""Overrides the parent member.
See https://github.com/pytorch/pytorch/issues/151692"""
from torch._inductor.utils import triton_version_uses_attrs_dict
(
kernel,
configs,
restore_value_args,
reset_to_zero_args,
) = self.get_kernel_and_metadata()
# Definition of kernel
(
new_name,
triton_meta,
extra_launch_args,
) = wrapper.define_user_defined_triton_kernel(
kernel,
configs,
self.kwargs,
restore_value_args,
reset_to_zero_args,
self.grid,
)
named_args = {
k: self.get_kwargs_value(k) for k in self.ordered_kwargs_for_cpp_kernel
}
arg_names = [p.name for p in kernel.params] # type: ignore[attr-defined]
constexprs = [p.num for p in kernel.params if p.is_constexpr] # type: ignore[attr-defined]
constexpr_names = OrderedSet(arg_names[i] for i in constexprs)
args: list[Any] = []
arg_types: list[Any] = []
raw_keys_filtered: list[Any] = []
raw_args_filtered: list[Any] = []
for name, arg in itertools.chain(
named_args.items(), zip(itertools.repeat(""), extra_launch_args)
):
if name in constexpr_names and triton_version_uses_attrs_dict():
# see #160000 - we don't pass in constexpr args to speed up runtime.
continue
raw_keys_filtered.append(name)
raw_args_filtered.append(arg)
if isinstance(arg, IRNode):
args.append(arg.codegen_reference())
arg_types.append(arg.get_dtype())
elif isinstance(arg, (int, float, bool, sympy.Expr)):
args.append(arg)
arg_types.append(type(arg))
elif name in constexpr_names:
# insert a dummy value for constexpr args of unsupported type
# constexprs will end up getting baked into the kernel at compile time
args.append(-1)
arg_types.append(int)
elif arg is None:
"""
Filter out None args.
see https://github.com/pytorch/pytorch/issues/115344
Two cases for a None arg:
1. The arg is already tl.constexpr, so leave it in
2. The arg is not tl.constexpr so we have to remove it
"""
if triton_version_uses_attrs_dict():
args.append(-1)
arg_types.append(int)
else:
raw_keys_filtered.pop()
raw_args_filtered.pop()
else:
raise NotImplementedError(f"Unsupported arg type: {type(arg)}: {arg}")
self.codegen_comment(wrapper, new_name)
wrapper.generate_kernel_call(
new_name,
args,
arg_types=arg_types,
raw_args=raw_args_filtered,
raw_keys=raw_keys_filtered,
triton_meta=triton_meta,
triton=True,
device=self.get_device(),
original_fxnode_name=self.fx_node.name,
)
@cache_on_self_and_args("UserDefinedTritonKernel")
def get_free_symbol_uses(
self, unbacked_only: bool = False
) -> OrderedSet[sympy.Symbol]:
# add unbacked symbols used in the grid to the ones used
# in the kwargs (the latter is generated by ExternKernel)
return super().get_free_symbol_uses(unbacked_only) | get_free_symbols(
self.grid, unbacked_only
)
def get_unbacked_symbol_defs(self) -> OrderedSet[sympy.Symbol]:
return OrderedSet()
def __init__(
self,
*,
kernel_idx: int,
grid: Any,
tma_descriptor_metadata: dict[str, Any],
kernel_args: dict[str, Any],
) -> None:
inputs: list[IRNode] = []
kwargs: dict[str, IRNode] = {}
constant_args: list[IRNode] = []
for k, v in kernel_args.items():
if isinstance(v, TensorBox):
t = InputsKernel.unwrap_storage_for_input(self.realize_input(v))
if k in tma_descriptor_metadata:
t = TMADescriptor.create(t, tma_descriptor_metadata[k])
inputs.append(t)
kwargs[k] = t
else:
constant_args.append(v)
kwargs[k] = v
assert len(inputs) != 0
self.device = inputs[0].get_device()
assert isinstance(inputs, Sequence), type(inputs)
super().__init__(
None,
NoneLayout(device=self.device),
inputs,
tuple(constant_args),
kwargs,
)
self.kernel_idx = kernel_idx
self.grid = grid
kernel, configs, _, _ = self.get_kernel_and_metadata()
# If we are autotuning, not all arguments will be passed
assert hasattr(kernel, "arg_names")
self.ordered_kwargs_for_cpp_kernel = [
arg for arg in kernel.arg_names if arg in kernel_args
]
from torch._higher_order_ops.triton_kernel_wrap import identify_mutated_tensors
autotuned_kwargs = configs[0].kwargs if len(configs) > 0 else {}
self.mutable_args = [
kernel_args[key]
for key in identify_mutated_tensors(
# pyrefly: ignore # bad-argument-type
kernel,
{**kernel_args, **autotuned_kwargs},
tma_descriptor_metadata,
)
]
self.mutation_outputs = [
MutationOutput(NoneLayout(device=self.device), buf, self)
for buf in self.mutable_args
]
V.graph.register_operation(self)
def get_outputs(self) -> list[Buffer]:
return list(self.mutation_outputs)
def get_device(self) -> Optional[torch.device]:
return self.device
|
UserDefinedTritonKernel
|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/language_models/fake_chat_models.py
|
{
"start": 5985,
"end": 6940
}
|
class ____(SimpleChatModel):
"""Fake Chat Model wrapper for testing purposes."""
@override
def _call(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> str:
return "fake response"
@override
async def _agenerate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
output_str = "fake response"
message = AIMessage(content=output_str)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
@property
def _llm_type(self) -> str:
return "fake-chat-model"
@property
def _identifying_params(self) -> dict[str, Any]:
return {"key": "fake"}
|
FakeChatModel
|
python
|
viewflow__viewflow
|
viewflow/workflow/flow/views/filters.py
|
{
"start": 345,
"end": 611
}
|
class ____(DateRangeFilter):
def filter(self, qs, value):
if not value:
if not self.parent.data.get("status"):
return qs.filter(**{f"{self.field_name}__isnull": True})
return super().filter(qs, value)
|
NullDateRangeFilter
|
python
|
pydantic__pydantic
|
pydantic/_internal/_decorators_v1.py
|
{
"start": 3867,
"end": 4075
}
|
class ____(Protocol):
"""A simple root validator, supported for V1 validators and V2 validators."""
def __call__(self, __values: RootValidatorValues) -> RootValidatorValues: ...
|
V1RootValidatorFunction
|
python
|
charliermarsh__ruff
|
crates/ruff_python_formatter/resources/test/fixtures/black/cases/preview_comments7.py
|
{
"start": 1867,
"end": 4303
}
|
class ____:
@pytest.mark.parametrize(
("post_data", "message"),
[
# metadata_version errors.
(
{},
"None is an invalid value for Metadata-Version. Error: This field is"
" required. see"
" https://packaging.python.org/specifications/core-metadata"
),
(
{"metadata_version": "-1"},
"'-1' is an invalid value for Metadata-Version. Error: Unknown Metadata"
" Version see"
" https://packaging.python.org/specifications/core-metadata"
),
# name errors.
(
{"metadata_version": "1.2"},
"'' is an invalid value for Name. Error: This field is required. see"
" https://packaging.python.org/specifications/core-metadata"
),
(
{"metadata_version": "1.2", "name": "foo-"},
"'foo-' is an invalid value for Name. Error: Must start and end with a"
" letter or numeral and contain only ascii numeric and '.', '_' and"
" '-'. see https://packaging.python.org/specifications/core-metadata"
),
# version errors.
(
{"metadata_version": "1.2", "name": "example"},
"'' is an invalid value for Version. Error: This field is required. see"
" https://packaging.python.org/specifications/core-metadata"
),
(
{"metadata_version": "1.2", "name": "example", "version": "dog"},
"'dog' is an invalid value for Version. Error: Must start and end with"
" a letter or numeral and contain only ascii numeric and '.', '_' and"
" '-'. see https://packaging.python.org/specifications/core-metadata"
)
]
)
def test_fails_invalid_post_data(
self, pyramid_config, db_request, post_data, message
):
...
square = Square(4) # type: Optional[Square]
# Regression test for https://github.com/psf/black/issues/3756.
[
(
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" # aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
),
]
[
( # aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" # aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
),
]
|
C
|
python
|
getsentry__sentry
|
tests/sentry/api/endpoints/test_organization_releases.py
|
{
"start": 49591,
"end": 82320
}
|
class ____(APITestCase):
def test_empty_release_version(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(name="foo", organization=org, teams=[team])
project2 = self.create_project(name="bar", organization=org, teams=[team])
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.post(
url, data={"version": "", "projects": [project.slug, project2.slug]}
)
assert response.status_code == 400
def test_minimal(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(name="foo", organization=org, teams=[team])
project2 = self.create_project(name="bar", organization=org, teams=[team])
project3 = self.create_project(name="bar2", organization=org, teams=[team])
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.post(
url,
data={"version": "1.2.1", "projects": [project.slug, project2.slug]},
HTTP_USER_AGENT="sentry-cli/2.77.4",
)
assert response.status_code == 201, response.content
assert response.data["version"]
release = Release.objects.get(
version=response.data["version"], user_agent="sentry-cli/2.77.4"
)
assert not release.owner_id
assert release.organization == org
assert ReleaseProject.objects.filter(release=release, project=project).exists()
assert ReleaseProject.objects.filter(release=release, project=project2).exists()
assert not ReleaseProject.objects.filter(release=release, project=project3).exists()
def test_minimal_with_id(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(name="foo", organization=org, teams=[team])
project2 = self.create_project(name="bar", organization=org, teams=[team])
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.post(
url,
data={"version": "1.2.1", "projects": [project.id, project2.id]},
HTTP_USER_AGENT="sentry-cli/2.77.4",
)
assert response.status_code == 201, response.content
assert response.data["version"]
release = Release.objects.get(
version=response.data["version"], user_agent="sentry-cli/2.77.4"
)
assert not release.owner_id
assert release.organization == org
assert ReleaseProject.objects.filter(release=release, project=project).exists()
assert ReleaseProject.objects.filter(release=release, project=project2).exists()
def test_minimal_with_slug_and_id(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(name="foo", organization=org, teams=[team])
project2 = self.create_project(name="bar", organization=org, teams=[team])
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.post(
url,
data={"version": "1.2.1", "projects": [project.id, project2.slug]},
HTTP_USER_AGENT="sentry-cli/2.77.4",
)
assert response.status_code == 201, response.content
assert response.data["version"]
release = Release.objects.get(
version=response.data["version"], user_agent="sentry-cli/2.77.4"
)
assert not release.owner_id
assert release.organization == org
assert ReleaseProject.objects.filter(release=release, project=project).exists()
assert ReleaseProject.objects.filter(release=release, project=project2).exists()
def test_duplicate(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
repo = Repository.objects.create(
provider="dummy", name="my-org/my-repository", organization_id=org.id
)
team = self.create_team(organization=org)
project = self.create_project(name="foo", organization=org, teams=[team])
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
release = Release.objects.create(version="1.2.1", organization=org)
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": org.slug}
)
with self.tasks():
response = self.client.post(
url,
data={
"version": "1.2.1",
"projects": [project.slug],
"refs": [
{
"repository": "my-org/my-repository",
"commit": "a" * 40,
"previousCommit": "c" * 40,
}
],
},
)
release_commits1 = list(
ReleaseCommit.objects.filter(release=release)
.order_by("order")
.values_list("commit__key", flat=True)
)
# check that commits are overwritten
assert release_commits1 == [
"62de626b7c7cfb8e77efb4273b1a3df4123e6216",
"58de626b7c7cfb8e77efb4273b1a3df4123e6345",
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
]
# should be 201 because project was added
assert response.status_code == 201, response.content
with self.tasks():
with patch.object(DummyRepositoryProvider, "compare_commits") as mock_compare_commits:
mock_compare_commits.return_value = [
{"id": "c" * 40, "repository": repo.name},
{"id": "d" * 40, "repository": repo.name},
{"id": "a" * 40, "repository": repo.name},
]
response2 = self.client.post(
url,
data={
"version": "1.2.1",
"projects": [project.slug],
"refs": [
{
"repository": "my-org/my-repository",
"commit": "a" * 40,
"previousCommit": "b" * 40,
}
],
},
)
release_commits2 = list(
ReleaseCommit.objects.filter(release=release)
.order_by("order")
.values_list("commit__key", flat=True)
)
# check that commits are overwritten
assert release_commits2 == [
"cccccccccccccccccccccccccccccccccccccccc",
"dddddddddddddddddddddddddddddddddddddddd",
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
]
assert response2.status_code == 208, response.content
assert Release.objects.filter(version="1.2.1", organization=org).count() == 1
# make sure project was added
assert ReleaseProject.objects.filter(release=release, project=project).exists()
def test_activity(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(name="foo", organization=org, teams=[team])
project2 = self.create_project(name="bar", organization=org, teams=[team])
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
release = Release.objects.create(
version="1.2.1", date_released=timezone.now(), organization=org
)
release.add_project(project)
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.post(url, data={"version": "1.2.1", "projects": [project.slug]})
assert response.status_code == 208, response.content
response = self.client.post(
url, data={"version": "1.2.1", "projects": [project.slug, project2.slug]}
)
# should be 201 because 1 project was added
assert response.status_code == 201, response.content
assert not Activity.objects.filter(
type=ActivityType.RELEASE.value, project=project, ident=release.version
).exists()
assert Activity.objects.filter(
type=ActivityType.RELEASE.value, project=project2, ident=release.version
).exists()
def test_activity_with_long_release(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(name="foo", organization=org, teams=[team])
project2 = self.create_project(name="bar", organization=org, teams=[team])
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
release = Release.objects.create(
version="x" * 65, date_released=timezone.now(), organization=org
)
release.add_project(project)
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.post(url, data={"version": "x" * 65, "projects": [project.slug]})
assert response.status_code == 208, response.content
response = self.client.post(
url, data={"version": "x" * 65, "projects": [project.slug, project2.slug]}
)
# should be 201 because 1 project was added
assert response.status_code == 201, response.content
assert not Activity.objects.filter(
type=ActivityType.RELEASE.value, project=project, ident=release.version[:64]
).exists()
assert Activity.objects.filter(
type=ActivityType.RELEASE.value, project=project2, ident=release.version[:64]
).exists()
def test_version_whitespace(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(name="foo", organization=org, teams=[team])
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.post(url, data={"version": "1.2.3\n", "projects": [project.slug]})
assert response.status_code == 400, response.content
response = self.client.post(url, data={"version": "\n1.2.3", "projects": [project.slug]})
assert response.status_code == 400, response.content
response = self.client.post(url, data={"version": "1.\n2.3", "projects": [project.slug]})
assert response.status_code == 400, response.content
response = self.client.post(url, data={"version": "1.2.3\f", "projects": [project.slug]})
assert response.status_code == 400, response.content
response = self.client.post(url, data={"version": "1.2.3\t", "projects": [project.slug]})
assert response.status_code == 400, response.content
response = self.client.post(url, data={"version": "1.2.3+dev", "projects": [project.slug]})
assert response.status_code == 201, response.content
assert response.data["version"] == "1.2.3+dev"
release = Release.objects.get(organization_id=org.id, version=response.data["version"])
assert not release.owner_id
def test_features(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(name="foo", organization=org, teams=[team])
self.create_member(teams=[team], user=user, organization=org)
self.create_member(teams=[team], user=self.user, organization=org)
self.login_as(user=user)
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.post(
url, data={"version": "1.2.1", "owner": self.user.email, "projects": [project.slug]}
)
assert response.status_code == 201, response.content
assert response.data["version"]
release = Release.objects.get(organization_id=org.id, version=response.data["version"])
assert release.owner_id == self.user.id
def test_commits(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(name="foo", organization=org, teams=[team])
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.post(
url,
data={
"version": "1.2.1",
"commits": [{"id": "a" * 40}, {"id": "b" * 40}],
"projects": [project.slug],
},
)
assert response.status_code == 201, (response.status_code, response.content)
assert response.data["version"]
release = Release.objects.get(organization_id=org.id, version=response.data["version"])
rc_list = list(
ReleaseCommit.objects.filter(release=release)
.select_related("commit", "commit__author")
.order_by("order")
)
assert len(rc_list) == 2
for rc in rc_list:
assert rc.organization_id
@patch("sentry.tasks.commits.fetch_commits")
def test_commits_from_provider(self, mock_fetch_commits: MagicMock) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
repo = Repository.objects.create(
organization_id=org.id, name="example/example", provider="dummy"
)
repo2 = Repository.objects.create(
organization_id=org.id, name="example/example2", provider="dummy"
)
team = self.create_team(organization=org)
project = self.create_project(name="foo", organization=org, teams=[team])
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": org.slug}
)
self.client.post(
url,
data={
"version": "1",
"refs": [
{"commit": "0" * 40, "repository": repo.name},
{"commit": "0" * 40, "repository": repo2.name},
],
"projects": [project.slug],
},
)
response = self.client.post(
url,
data={
"version": "1.2.1",
"refs": [
{"commit": "a" * 40, "repository": repo.name},
{"commit": "b" * 40, "repository": repo2.name},
],
"projects": [project.slug],
},
)
assert response.status_code == 201
mock_fetch_commits.apply_async.assert_called_with(
kwargs={
"release_id": Release.objects.get(version="1.2.1", organization=org).id,
"user_id": user.id,
"refs": [
{"commit": "a" * 40, "repository": repo.name},
{"commit": "b" * 40, "repository": repo2.name},
],
"prev_release_id": Release.objects.get(version="1", organization=org).id,
}
)
@patch("sentry.tasks.commits.fetch_commits")
def test_commits_from_provider_deprecated_head_commits(
self, mock_fetch_commits: MagicMock
) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
repo = Repository.objects.create(
organization_id=org.id, name="example/example", provider="dummy"
)
repo2 = Repository.objects.create(
organization_id=org.id, name="example/example2", provider="dummy"
)
team = self.create_team(organization=org)
project = self.create_project(name="foo", organization=org, teams=[team])
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": org.slug}
)
self.client.post(
url,
data={
"version": "1",
"headCommits": [
{"currentId": "0" * 40, "repository": repo.name},
{"currentId": "0" * 40, "repository": repo2.name},
],
"projects": [project.slug],
},
)
response = self.client.post(
url,
data={
"version": "1.2.1",
"headCommits": [
{"currentId": "a" * 40, "repository": repo.name},
{"currentId": "b" * 40, "repository": repo2.name},
],
"projects": [project.slug],
},
format="json",
)
mock_fetch_commits.apply_async.assert_called_with(
kwargs={
"release_id": Release.objects.get(version="1.2.1", organization=org).id,
"user_id": user.id,
"refs": [
{"commit": "a" * 40, "repository": repo.name, "previousCommit": None},
{"commit": "b" * 40, "repository": repo2.name, "previousCommit": None},
],
"prev_release_id": Release.objects.get(version="1", organization=org).id,
}
)
assert response.status_code == 201
def test_commits_lock_conflict(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(name="foo", organization=org, teams=[team])
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
# Simulate a concurrent request by using an existing release
# that has its commit lock taken out.
release = self.create_release(project, self.user, version="1.2.1")
lock = locks.get(Release.get_lock_key(org.id, release.id), duration=10, name="release")
lock.acquire()
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.post(
url,
data={
"version": release.version,
"commits": [{"id": "a" * 40}, {"id": "b" * 40}],
"projects": [project.slug],
},
)
assert response.status_code == 409, (response.status_code, response.content)
assert "Release commits" in response.data["detail"]
def test_bad_project_slug(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(name="foo", organization=org, teams=[team])
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.post(
url, data={"version": "1.2.1", "projects": [project.slug, "banana"]}
)
assert response.status_code == 400
assert b"Invalid project ids or slugs" in response.content
def test_project_permissions(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
team1 = self.create_team(organization=org)
team2 = self.create_team(organization=org)
project1 = self.create_project(teams=[team1], organization=org)
project2 = self.create_project(teams=[team2], organization=org)
self.create_member(teams=[team1], user=user, organization=org)
self.login_as(user=user)
release1 = Release.objects.create(
organization_id=org.id,
version="1",
date_added=datetime(2013, 8, 13, 3, 8, 24, 880386, tzinfo=UTC),
)
release1.add_project(project1)
release2 = Release.objects.create(
organization_id=org.id,
version="2",
date_added=datetime(2013, 8, 14, 3, 8, 24, 880386, tzinfo=UTC),
)
release2.add_project(project2)
release3 = Release.objects.create(
organization_id=org.id,
version="3",
date_added=datetime(2013, 8, 12, 3, 8, 24, 880386, tzinfo=UTC),
date_released=datetime(2013, 8, 15, 3, 8, 24, 880386, tzinfo=UTC),
)
release3.add_project(project1)
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.post(
url, data={"version": "1.2.1", "projects": [project1.slug, project2.slug]}
)
assert response.status_code == 400
assert b"Invalid project ids or slugs" in response.content
response = self.client.post(url, data={"version": "1.2.1", "projects": [project1.slug]})
assert response.status_code == 201, response.content
def test_api_key(self) -> None:
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
org2 = self.create_organization()
team1 = self.create_team(organization=org)
project1 = self.create_project(teams=[team1], organization=org)
release1 = Release.objects.create(
organization_id=org.id,
version="1",
date_added=datetime(2013, 8, 13, 3, 8, 24, 880386, tzinfo=UTC),
)
release1.add_project(project1)
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": org.slug}
)
# test right org, wrong permissions level
with assume_test_silo_mode(SiloMode.CONTROL):
bad_api_key = ApiKey.objects.create(organization_id=org.id, scope_list=["project:read"])
response = self.client.post(
url,
data={"version": "1.2.1", "projects": [project1.slug]},
HTTP_AUTHORIZATION=self.create_basic_auth_header(bad_api_key.key),
)
assert response.status_code == 403
# test wrong org, right permissions level
with assume_test_silo_mode(SiloMode.CONTROL):
wrong_org_api_key = ApiKey.objects.create(
organization_id=org2.id, scope_list=["project:write"]
)
response = self.client.post(
url,
data={"version": "1.2.1", "projects": [project1.slug]},
HTTP_AUTHORIZATION=self.create_basic_auth_header(wrong_org_api_key.key),
)
assert response.status_code == 403
# test right org, right permissions level
with assume_test_silo_mode(SiloMode.CONTROL):
good_api_key = ApiKey.objects.create(
organization_id=org.id, scope_list=["project:write"]
)
response = self.client.post(
url,
data={"version": "1.2.1", "projects": [project1.slug]},
HTTP_AUTHORIZATION=self.create_basic_auth_header(good_api_key.key),
)
assert response.status_code == 201, response.content
def test_org_auth_token(self) -> None:
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
org2 = self.create_organization()
team1 = self.create_team(organization=org)
project1 = self.create_project(teams=[team1], organization=org)
release1 = Release.objects.create(
organization_id=org.id,
version="1",
date_added=datetime(2013, 8, 13, 3, 8, 24, 880386, tzinfo=UTC),
)
release1.add_project(project1)
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": org.slug}
)
# test right org, wrong permissions level
with assume_test_silo_mode(SiloMode.CONTROL):
bad_token_str = generate_token(org.slug, "")
OrgAuthToken.objects.create(
organization_id=org.id,
name="token 1",
token_hashed=hash_token(bad_token_str),
token_last_characters="ABCD",
scope_list=[],
date_last_used=None,
)
response = self.client.post(
url,
data={"version": "1.2.1", "projects": [project1.slug]},
HTTP_AUTHORIZATION=f"Bearer {bad_token_str}",
)
assert response.status_code == 403
# test wrong org, right permissions level
with assume_test_silo_mode(SiloMode.CONTROL):
wrong_org_token_str = generate_token(org2.slug, "")
OrgAuthToken.objects.create(
organization_id=org2.id,
name="org2 token 1",
token_hashed=hash_token(wrong_org_token_str),
token_last_characters="ABCD",
scope_list=["org:ci"],
date_last_used=None,
)
response = self.client.post(
url,
data={"version": "1.2.1", "projects": [project1.slug]},
HTTP_AUTHORIZATION=f"Bearer {wrong_org_token_str}",
)
assert response.status_code == 403
# test right org, right permissions level
with assume_test_silo_mode(SiloMode.CONTROL):
good_token_str = generate_token(org.slug, "")
OrgAuthToken.objects.create(
organization_id=org.id,
name="token 2",
token_hashed=hash_token(good_token_str),
token_last_characters="ABCD",
scope_list=["org:ci"],
date_last_used=None,
)
with outbox_runner():
response = self.client.post(
url,
data={"version": "1.2.1", "projects": [project1.slug]},
HTTP_AUTHORIZATION=f"Bearer {good_token_str}",
)
assert response.status_code == 201, response.content
# Make sure org token usage was updated
with assume_test_silo_mode(SiloMode.CONTROL):
org_token = OrgAuthToken.objects.get(token_hashed=hash_token(good_token_str))
assert org_token.date_last_used is not None
assert org_token.project_last_used_id == project1.id
@patch("sentry.tasks.commits.fetch_commits")
def test_api_token(self, mock_fetch_commits: MagicMock) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
repo = Repository.objects.create(
organization_id=org.id, name="getsentry/sentry", provider="dummy"
)
repo2 = Repository.objects.create(
organization_id=org.id, name="getsentry/sentry-plugins", provider="dummy"
)
with assume_test_silo_mode(SiloMode.CONTROL):
api_token = ApiToken.objects.create(user=user, scope_list=["project:releases"])
team1 = self.create_team(organization=org)
self.create_member(teams=[team1], user=user, organization=org)
project1 = self.create_project(teams=[team1], organization=org)
release1 = Release.objects.create(
organization_id=org.id,
version="1",
date_added=datetime(2013, 8, 13, 3, 8, 24, 880386, tzinfo=UTC),
)
release1.add_project(project1)
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.post(
url,
data={
"version": "1.2.1",
"refs": [
{"commit": "a" * 40, "repository": repo.name, "previousCommit": "c" * 40},
{"commit": "b" * 40, "repository": repo2.name},
],
"projects": [project1.slug],
},
HTTP_AUTHORIZATION=f"Bearer {api_token.token}",
)
mock_fetch_commits.apply_async.assert_called_with(
kwargs={
"release_id": Release.objects.get(version="1.2.1", organization=org).id,
"user_id": user.id,
"refs": [
{"commit": "a" * 40, "repository": repo.name, "previousCommit": "c" * 40},
{"commit": "b" * 40, "repository": repo2.name},
],
"prev_release_id": release1.id,
}
)
assert response.status_code == 201
def test_bad_repo_name(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(name="foo", organization=org, teams=[team])
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.post(
url,
data={
"version": "1.2.1",
"projects": [project.slug],
"refs": [{"repository": "not_a_repo", "commit": "a" * 40}],
},
)
assert response.status_code == 400
assert response.data == {"refs": ["Invalid repository names: not_a_repo"]}
|
OrganizationReleaseCreateTest
|
python
|
wandb__wandb
|
tools/perf/scripts/bench_run_log.py
|
{
"start": 759,
"end": 11258
}
|
class ____:
"""Generates a payload for logging in the performance testing.
Args:
data_type: The type of data to log.
sparse_metric_count: Number of sparse metrics to log.
metric_key_size: The size (in characters) of the metric.
num_steps: Number of steps in the test.
fraction: The fraction (%) of the base payload to log per step.
is_unique_payload: If true, every step logs a unique payload
dense_metric_count: Number of dense metrics (logged every step)
sparse_stride_size: Number of steps to skip before logging the sparse metrics
"""
def __init__(
self,
*,
data_type: Literal[
"scalar", "audio", "video", "image", "table", "prefixed_scalar"
],
sparse_metric_count: int,
metric_key_size: int,
num_steps: int,
fraction: float,
is_unique_payload: bool,
dense_metric_count: int,
sparse_stride_size: int,
):
self.data_type = data_type
self.sparse_metric_count = sparse_metric_count
self.metric_key_size = metric_key_size
self.num_steps = num_steps
self.fraction = fraction
self.is_unique_payload = is_unique_payload
self.dense_metric_count = dense_metric_count
self.sparse_stride_size = sparse_stride_size
self.sparse_metrics = None
self.metrics_count_per_step = int(self.sparse_metric_count * self.fraction)
if self.is_unique_payload:
# every step use a unique payload
self.num_of_unique_payload = self.num_steps
elif self.fraction < 1.0:
# every step logs a subset of a base payload
self.num_of_unique_payload = int(
self.sparse_metric_count // self.metrics_count_per_step
)
else:
# every step logs the same set of base payload
self.num_of_unique_payload = 1
logger.info(f"dense_metric_count: {self.dense_metric_count}")
logger.info(
f"metrics_count_per_step: {self.metrics_count_per_step + self.dense_metric_count}"
)
logger.info(f"num_of_unique_payload: {self.num_of_unique_payload}")
def random_string(self, size: int) -> str:
"""Generates a random string of a given size.
Args:
size: The size of the string.
Returns:
str: A random string of the given size.
"""
return "".join(
random.choices(string.ascii_letters + string.digits + "_", k=size)
)
def generate(self) -> list[dict]:
"""Generates a list of payload for logging.
Returns:
List: A list of dictionary with payloads.
Raises:
ValueError: If the data type is invalid.
"""
if self.data_type == "audio":
return self.generate_audio()
elif self.data_type == "scalar":
return self.generate_scalar()
elif self.data_type == "table":
return self.generate_table()
elif self.data_type == "image":
return self.generate_image()
elif self.data_type == "video":
return self.generate_video()
elif self.data_type == "prefixed_scalar":
return self.generate_prefixed_scalar()
else:
raise ValueError(f"Invalid data type: {self.data_type}")
def generate_audio(self) -> list[dict[str, wandb.Audio]]:
"""Generates a payload for logging audio data.
Returns:
List: A list of dictionary with the audio data.
"""
payloads = []
for _ in range(self.num_of_unique_payload):
duration = 5 # make a 5s long audio
sample_rate = 44100
frequency = 440
t = np.linspace(0, duration, int(sample_rate * duration), endpoint=False)
audio_data = np.sin(2 * np.pi * frequency * t)
audio_obj = wandb.Audio(audio_data, sample_rate=sample_rate)
payloads.append(
{
self.random_string(self.metric_key_size): audio_obj
for _ in range(self.sparse_metric_count)
}
)
return payloads
def generate_scalar(self) -> list[dict[str, int]]:
"""Generates the payloads for logging scalar data.
Returns:
List: A list of dictionaries with the scalar data.
"""
# Generate dense metrics if applicable
dense_metrics = {
self.random_string(self.metric_key_size): random.randint(1, 10**2)
for _ in range(self.dense_metric_count)
}
# Log example dense metric if available
if dense_metrics:
example_key = next(iter(dense_metrics))
logger.info(f"Example dense metric: {example_key}")
if self.sparse_stride_size > 0:
# Generate a single payload for sparse logging every X steps
self.sparse_metrics = {
f"sparse/acc{i}": random.randint(1, 10**2)
for i in range(self.sparse_metric_count)
}
payloads = [{**dense_metrics}]
else:
# Generate payloads with sparse metrics + optional dense metrics prepended
payloads = [
{
**dense_metrics,
**{
self.random_string(self.metric_key_size): random.randint(
1, 10**2
)
for _ in range(self.metrics_count_per_step)
},
}
for _ in range(self.num_of_unique_payload)
]
return payloads
def generate_prefixed_scalar(self) -> list[dict[str, int]]:
"""Generates the payloads for logging scalar data with prefixes.
This makes all the runs in the same project to have the repeating metric names.
Returns:
List: A list of dictionaries with the scalar data.
"""
# Generate dense metrics if applicable
dense_metrics = {
f"dense/accuracy{i}": random.randint(1, 10**2)
for i in range(self.dense_metric_count)
}
# Log example dense metric if available
if dense_metrics:
example_key = next(iter(dense_metrics))
logger.info(f"Example dense metric: {example_key}")
if self.sparse_stride_size > 0:
# Generate a single payload for sparse logging every X steps
self.sparse_metrics = {
f"sparse/acc{i}": random.randint(1, 10**2)
for i in range(self.sparse_metric_count)
}
payloads = [{**dense_metrics}]
else:
# Generate payloads with sparse metrics + optional dense metrics prepended
payloads = [
{
**dense_metrics,
**{
f"eval{x}/loss{i}": random.randint(1, 10**2)
for i in range(self.metrics_count_per_step // 2)
},
**{
f"rank{x}/accuracy{i}": random.randint(1, 10**2)
for i in range(self.metrics_count_per_step // 2)
},
}
for x in range(self.num_of_unique_payload)
]
return payloads
def generate_table(self) -> list[dict[str, wandb.Table]]:
"""Generates a payload for logging 1 table.
For the table, it uses
self.sparse_metric_count as the number of columns
self.metric_key_size as the number of rows.
Returns:
List: A dictionary with the table data.
"""
payloads = []
for p in range(self.num_of_unique_payload):
num_of_columns = self.sparse_metric_count
num_of_rows = self.metric_key_size
columns = [f"Field_{i + 1}" for i in range(num_of_columns)]
data = [
[
self.random_string(self.metric_key_size)
for _ in range(num_of_columns)
]
for _ in range(num_of_rows)
]
table = wandb.Table(columns=columns, data=data)
payloads.append({f"table_{p}": table})
return payloads
def generate_image(self) -> list[dict[str, wandb.Image]]:
"""Generates a payload for logging images.
Returns:
List: A list of dictionary with image data.
"""
payloads = []
for _ in range(self.num_of_unique_payload):
# Create a random RGB image (100x100 pixels)
# Each pixel value is an integer between 0 and 255 for RGB channels
random_image = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)
image_obj = wandb.Image(random_image, caption="Random image")
payloads.append(
{
self.random_string(self.metric_key_size): image_obj
for _ in range(self.sparse_metric_count)
}
)
return payloads
def generate_video(self) -> list[dict[str, wandb.Video]]:
"""Generates a payload for logging videos.
This function creates HD videos that are 1280 x 720 with 16 frames per second as payload
for logging. It used self.metric_key_size as the video length in second.
Returns:
List: A list of dictionary with video data.
"""
payloads = []
# Video properties for HD video
frame_width = 1280
frame_height = 720
fps = 16
video_len_in_sec = self.metric_key_size
video_prefixes = ["video_acc", "video_prob", "video_loss", "video_labels"]
for i in range(self.num_of_unique_payload):
frames = np.random.randint(
0,
256,
(video_len_in_sec * fps, frame_height, frame_width, 3),
dtype=np.uint8,
)
video_obj = wandb.Video(
frames, fps=fps, caption=f"Randomly generated video {i}"
)
payloads.append(
{
f"{video_prefixes[s % 4]}/{i}_{s}": video_obj
for s in range(self.sparse_metric_count)
}
)
return payloads
|
PayloadGenerator
|
python
|
dask__distributed
|
distributed/comm/inproc.py
|
{
"start": 642,
"end": 2279
}
|
class ____:
"""
An object coordinating listeners and their addresses.
"""
def __init__(self):
self.listeners = weakref.WeakValueDictionary()
self.addr_suffixes = itertools.count(1)
self._ip = None
self.lock = threading.Lock()
@property
def ip(self):
if not self._ip:
try:
self._ip = get_ip()
except OSError:
self._ip = "127.0.0.1"
return self._ip
def add_listener(self, addr, listener):
with self.lock:
if addr in self.listeners:
raise RuntimeError(f"already listening on {addr!r}")
self.listeners[addr] = listener
def remove_listener(self, addr):
with self.lock:
try:
del self.listeners[addr]
except KeyError:
pass
def get_listener_for(self, addr):
with self.lock:
self.validate_address(addr)
return self.listeners.get(addr)
def new_address(self):
return "%s/%d/%s" % (self.ip, os.getpid(), next(self.addr_suffixes))
def validate_address(self, addr):
"""
Validate the address' IP and pid.
"""
ip, pid, suffix = addr.split("/")
if ip != self.ip or int(pid) != os.getpid():
raise ValueError(
"inproc address %r does not match host (%r) or pid (%r)"
% (addr, self.ip, os.getpid())
)
global_manager = Manager()
def new_address():
"""
Generate a new address.
"""
return "inproc://" + global_manager.new_address()
|
Manager
|
python
|
pikepdf__pikepdf
|
src/pikepdf/models/encryption.py
|
{
"start": 4719,
"end": 6151
}
|
class ____(NamedTuple):
"""Specify the encryption settings to apply when a PDF is saved."""
owner: str = ''
"""The owner password to use. This allows full control
of the file. If blank, the PDF will be encrypted and
present as "(SECURED)" in PDF viewers. If the owner password
is blank, the user password should be as well."""
user: str = ''
"""The user password to use. With this password, some
restrictions will be imposed by a typical PDF reader.
If blank, the PDF can be opened by anyone, but only modified
as allowed by the permissions in ``allow``."""
R: Literal[2, 3, 4, 5, 6] = 6
"""Select the security handler algorithm to use. Choose from:
``2``, ``3``, ``4`` or ``6``. By default, the highest version of
is selected (``6``). ``5`` is a deprecated algorithm that should
not be used."""
allow: Permissions = DEFAULT_PERMISSIONS
"""The permissions to set.
If omitted, all permissions are granted to the user."""
aes: bool = True
"""If True, request the AES algorithm. If False, use RC4.
If omitted, AES is selected whenever possible (R >= 4)."""
metadata: bool = True
"""If True, also encrypt the PDF metadata. If False,
metadata is not encrypted. Reading document metadata without
decryption may be desirable in some cases. Requires ``aes=True``.
If omitted, metadata is encrypted whenever possible."""
|
Encryption
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/models/widgets/test_slider.py
|
{
"start": 2866,
"end": 4860
}
|
class ____:
def test_value_and_value_throttled(self) -> None:
start = datetime(2021, 1, 1)
end = datetime(2021, 12, 31)
value = convert_date_to_datetime(datetime(2021, 2, 1))
s0 = mws.DateSlider(start=start, end=end)
with pytest.raises(UnsetValueError):
s0.value
with pytest.raises(UnsetValueError):
s0.value_throttled
s1 = mws.DateSlider(start=start, end=end, value=value)
assert s1.value == value
assert s1.value_throttled == value
def test_value_as_datetime_when_set_as_datetime(self) -> None:
start = datetime(2017, 8, 9, 0, 0).astimezone(timezone.utc)
end = datetime(2017, 8, 10, 0, 0).astimezone(timezone.utc)
s = mws.DateSlider(start=start, end=end, value=start)
assert s.value_as_datetime == start
def test_value_as_datetime_when_set_as_timestamp(self) -> None:
start = datetime(2017, 8, 9, 0, 0).astimezone(timezone.utc)
end = datetime(2017, 8, 10, 0, 0).astimezone(timezone.utc)
s = mws.DateSlider(start=start, end=end,
# Bokeh serializes as ms since epoch, if they get set as numbers (e.g.)
# by client side update, this is the units they will be
value=convert_datetime_type(start))
assert s.value_as_datetime == start
def test_value_as_date_when_set_as_date(self) -> None:
start = date(2017, 8, 9)
end = date(2017, 8, 10)
s = mws.DateSlider(start=start, end=end, value=end)
assert s.value_as_date == end
def test_value_as_date_when_set_as_timestamp(self) -> None:
start = date(2017, 8, 9)
end = date(2017, 8, 10)
s = mws.DateSlider(start=start, end=end,
# Bokeh serializes as ms since epoch, if they get set as numbers (e.g.)
# by client side update, this is the units they will be
value=convert_date_to_datetime(end))
assert s.value_as_date == end
|
TestDateSlider
|
python
|
django-extensions__django-extensions
|
tests/testapp/models.py
|
{
"start": 6583,
"end": 6895
}
|
class ____(models.Model):
title = models.CharField(max_length=42)
slug = AutoSlugField(populate_from="title")
category = models.CharField(max_length=20, null=True)
class Meta:
app_label = "django_extensions"
unique_together = ["slug", "category"]
|
SluggedWithUniqueTogetherTestModel
|
python
|
fastai__fastai
|
fastai/callback/data.py
|
{
"start": 345,
"end": 687
}
|
class ____(Callback):
"Collect all batches, along with `pred` and `loss`, into `self.data`. Mainly for testing"
def before_fit(self): self.data = L()
def after_batch(self):
self.data.append(self.learn.to_detach((self.xb,self.yb,self.pred,self.loss)))
# %% ../../nbs/14a_callback.data.ipynb 6
@delegates()
|
CollectDataCallback
|
python
|
huggingface__transformers
|
src/transformers/models/dab_detr/modeling_dab_detr.py
|
{
"start": 11963,
"end": 16332
}
|
class ____(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
need paper, generalized to work on images.
"""
def __init__(self, config: DabDetrConfig):
super().__init__()
self.config = config
self.embedding_dim = config.hidden_size / 2
self.temperature_height = config.temperature_height
self.temperature_width = config.temperature_width
scale = config.sine_position_embedding_scale
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, pixel_values, pixel_mask):
if pixel_mask is None:
raise ValueError("No pixel mask provided")
y_embed = pixel_mask.cumsum(1, dtype=torch.float32)
x_embed = pixel_mask.cumsum(2, dtype=torch.float32)
y_embed = y_embed / (y_embed[:, -1:, :] + 1e-6) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + 1e-6) * self.scale
# We use float32 to ensure reproducibility of the original implementation
dim_tx = torch.arange(self.embedding_dim, dtype=torch.float32, device=pixel_values.device)
# Modifying dim_tx in place to avoid extra memory allocation -> dim_tx = self.temperature_width ** (2 * (dim_tx // 2) / self.embedding_dim)
dim_tx //= 2
dim_tx.mul_(2 / self.embedding_dim)
dim_tx.copy_(self.temperature_width**dim_tx)
pos_x = x_embed[:, :, :, None] / dim_tx
# We use float32 to ensure reproducibility of the original implementation
dim_ty = torch.arange(self.embedding_dim, dtype=torch.float32, device=pixel_values.device)
# Modifying dim_ty in place to avoid extra memory allocation -> dim_ty = self.temperature_height ** (2 * (dim_ty // 2) / self.embedding_dim)
dim_ty //= 2
dim_ty.mul_(2 / self.embedding_dim)
dim_ty.copy_(self.temperature_height**dim_ty)
pos_y = y_embed[:, :, :, None] / dim_ty
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
# function to generate sine positional embedding for 4d coordinates
def gen_sine_position_embeddings(pos_tensor, hidden_size=256):
"""
This function computes position embeddings using sine and cosine functions from the input positional tensor,
which has a shape of (batch_size, num_queries, 4).
The last dimension of `pos_tensor` represents the following coordinates:
- 0: x-coord
- 1: y-coord
- 2: width
- 3: height
The output shape is (batch_size, num_queries, 512), where final dim (hidden_size*2 = 512) is the total embedding dimension
achieved by concatenating the sine and cosine values for each coordinate.
"""
scale = 2 * math.pi
dim = hidden_size // 2
dim_t = torch.arange(dim, dtype=torch.float32, device=pos_tensor.device)
dim_t = 10000 ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / dim)
x_embed = pos_tensor[:, :, 0] * scale
y_embed = pos_tensor[:, :, 1] * scale
pos_x = x_embed[:, :, None] / dim_t
pos_y = y_embed[:, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2)
pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2)
if pos_tensor.size(-1) == 4:
w_embed = pos_tensor[:, :, 2] * scale
pos_w = w_embed[:, :, None] / dim_t
pos_w = torch.stack((pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3).flatten(2)
h_embed = pos_tensor[:, :, 3] * scale
pos_h = h_embed[:, :, None] / dim_t
pos_h = torch.stack((pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3).flatten(2)
pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2)
else:
raise ValueError(f"Unknown pos_tensor shape(-1):{pos_tensor.size(-1)}")
return pos.to(pos_tensor.dtype)
def inverse_sigmoid(x, eps=1e-5):
x = x.clamp(min=0, max=1)
x1 = x.clamp(min=eps)
x2 = (1 - x).clamp(min=eps)
return torch.log(x1 / x2)
# Modified from transformers.models.detr.modeling_detr.DetrAttention
|
DabDetrSinePositionEmbedding
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 294800,
"end": 295253
}
|
class ____(sgqlc.types.Input):
"""Ordering options for saved reply connections."""
__schema__ = github_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(sgqlc.types.non_null(SavedReplyOrderField), graphql_name="field")
"""The field to order saved replies by."""
direction = sgqlc.types.Field(sgqlc.types.non_null(OrderDirection), graphql_name="direction")
"""The ordering direction."""
|
SavedReplyOrder
|
python
|
dagster-io__dagster
|
helm/dagster/schema/schema/charts/utils/kubernetes.py
|
{
"start": 5345,
"end": 5560
}
|
class ____(BaseModel):
model_config = {
"extra": "allow",
"json_schema_extra": {
"$ref": create_definition_ref("io.k8s.api.apps.v1.DeploymentStrategy")
},
}
|
DeploymentStrategy
|
python
|
langchain-ai__langchain
|
libs/core/tests/unit_tests/tracers/test_base_tracer.py
|
{
"start": 723,
"end": 22457
}
|
class ____(BaseTracer):
"""Fake tracer that records LangChain execution."""
def __init__(self) -> None:
"""Initialize the tracer."""
super().__init__()
self.runs: list[Run] = []
def _persist_run(self, run: Run) -> None:
"""Persist a run."""
self.runs.append(run)
def _compare_run_with_error(run: Any, expected_run: Any) -> None:
if run.child_runs:
assert len(expected_run.child_runs) == len(run.child_runs)
for received, expected in zip(
run.child_runs, expected_run.child_runs, strict=False
):
_compare_run_with_error(received, expected)
received = run.dict(exclude={"child_runs"})
received_err = received.pop("error")
expected = expected_run.dict(exclude={"child_runs"})
expected_err = expected.pop("error")
assert received == expected
if expected_err is not None:
assert received_err is not None
assert expected_err in received_err
else:
assert received_err is None
@freeze_time("2023-01-01")
def test_tracer_llm_run() -> None:
"""Test tracer on an LLM run."""
uuid = uuid4()
compare_run = Run( # type: ignore[call-arg]
id=uuid,
parent_run_id=None,
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "end", "time": datetime.now(timezone.utc)},
],
extra={},
serialized=SERIALIZED,
inputs={"prompts": []},
outputs=LLMResult(generations=[[]]), # type: ignore[arg-type]
error=None,
run_type="llm",
trace_id=uuid,
dotted_order=f"20230101T000000000000Z{uuid}",
)
tracer = FakeTracer()
tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid)
tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=uuid)
assert tracer.runs == [compare_run]
@freeze_time("2023-01-01")
def test_tracer_chat_model_run() -> None:
"""Test tracer on a Chat Model run."""
tracer = FakeTracer()
manager = CallbackManager(handlers=[tracer])
run_managers = manager.on_chat_model_start(
serialized=SERIALIZED_CHAT, messages=[[HumanMessage(content="")]]
)
compare_run = Run(
id=str(run_managers[0].run_id), # type: ignore[arg-type]
name="chat_model",
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "end", "time": datetime.now(timezone.utc)},
],
extra={},
serialized=SERIALIZED_CHAT,
inputs={"prompts": ["Human: "]},
outputs=LLMResult(generations=[[]]), # type: ignore[arg-type]
error=None,
run_type="llm",
trace_id=run_managers[0].run_id,
dotted_order=f"20230101T000000000000Z{run_managers[0].run_id}",
)
for run_manager in run_managers:
run_manager.on_llm_end(response=LLMResult(generations=[[]]))
assert tracer.runs == [compare_run]
@freeze_time("2023-01-01")
def test_tracer_llm_run_errors_no_start() -> None:
"""Test tracer on an LLM run without a start."""
tracer = FakeTracer()
with pytest.raises(TracerException):
tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=uuid4())
@freeze_time("2023-01-01")
def test_tracer_multiple_llm_runs() -> None:
"""Test the tracer with multiple runs."""
uuid = uuid4()
compare_run = Run(
id=uuid,
name="llm",
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "end", "time": datetime.now(timezone.utc)},
],
extra={},
serialized=SERIALIZED,
inputs={"prompts": []},
outputs=LLMResult(generations=[[]]), # type: ignore[arg-type]
error=None,
run_type="llm",
trace_id=uuid,
dotted_order=f"20230101T000000000000Z{uuid}",
)
tracer = FakeTracer()
num_runs = 10
for _ in range(num_runs):
tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid)
tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=uuid)
assert tracer.runs == [compare_run] * num_runs
@freeze_time("2023-01-01")
def test_tracer_chain_run() -> None:
"""Test tracer on a Chain run."""
uuid = uuid4()
compare_run = Run( # type: ignore[call-arg]
id=str(uuid), # type: ignore[arg-type]
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "end", "time": datetime.now(timezone.utc)},
],
extra={},
serialized={"name": "chain"},
inputs={},
outputs={},
error=None,
run_type="chain",
trace_id=uuid,
dotted_order=f"20230101T000000000000Z{uuid}",
)
tracer = FakeTracer()
tracer.on_chain_start(serialized={"name": "chain"}, inputs={}, run_id=uuid)
tracer.on_chain_end(outputs={}, run_id=uuid)
assert tracer.runs == [compare_run]
@freeze_time("2023-01-01")
def test_tracer_tool_run() -> None:
"""Test tracer on a Tool run."""
uuid = uuid4()
compare_run = Run( # type: ignore[call-arg]
id=str(uuid), # type: ignore[arg-type]
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "end", "time": datetime.now(timezone.utc)},
],
extra={},
serialized={"name": "tool"},
inputs={"input": "test"},
outputs={"output": "test"},
error=None,
run_type="tool",
trace_id=uuid,
dotted_order=f"20230101T000000000000Z{uuid}",
)
tracer = FakeTracer()
tracer.on_tool_start(serialized={"name": "tool"}, input_str="test", run_id=uuid)
tracer.on_tool_end("test", run_id=uuid)
assert tracer.runs == [compare_run]
@freeze_time("2023-01-01")
def test_tracer_nested_run() -> None:
"""Test tracer on a nested run."""
tracer = FakeTracer()
chain_uuid = uuid4()
tool_uuid = uuid4()
llm_uuid1 = uuid4()
llm_uuid2 = uuid4()
for _ in range(10):
tracer.on_chain_start(
serialized={"name": "chain"}, inputs={}, run_id=chain_uuid
)
tracer.on_tool_start(
serialized={"name": "tool"},
input_str="test",
run_id=tool_uuid,
parent_run_id=chain_uuid,
)
tracer.on_llm_start(
serialized=SERIALIZED,
prompts=[],
run_id=llm_uuid1,
parent_run_id=tool_uuid,
)
tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=llm_uuid1)
tracer.on_tool_end("test", run_id=tool_uuid)
tracer.on_llm_start(
serialized=SERIALIZED,
prompts=[],
run_id=llm_uuid2,
parent_run_id=chain_uuid,
)
tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=llm_uuid2)
tracer.on_chain_end(outputs={}, run_id=chain_uuid)
compare_run = Run( # type: ignore[call-arg]
id=str(chain_uuid), # type: ignore[arg-type]
error=None,
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "end", "time": datetime.now(timezone.utc)},
],
extra={},
serialized={"name": "chain"},
inputs={},
outputs={},
run_type="chain",
trace_id=chain_uuid,
dotted_order=f"20230101T000000000000Z{chain_uuid}",
child_runs=[
Run( # type: ignore[call-arg]
id=tool_uuid,
parent_run_id=chain_uuid,
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "end", "time": datetime.now(timezone.utc)},
],
extra={},
serialized={"name": "tool"},
inputs={"input": "test"},
outputs={"output": "test"},
error=None,
run_type="tool",
trace_id=chain_uuid,
dotted_order=f"20230101T000000000000Z{chain_uuid}.20230101T000000000000Z{tool_uuid}",
child_runs=[
Run( # type: ignore[call-arg]
id=str(llm_uuid1), # type: ignore[arg-type]
parent_run_id=str(tool_uuid), # type: ignore[arg-type]
error=None,
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "end", "time": datetime.now(timezone.utc)},
],
extra={},
serialized=SERIALIZED,
inputs={"prompts": []},
outputs=LLMResult(generations=[[]]), # type: ignore[arg-type]
run_type="llm",
trace_id=chain_uuid,
dotted_order=f"20230101T000000000000Z{chain_uuid}.20230101T000000000000Z{tool_uuid}.20230101T000000000000Z{llm_uuid1}",
)
],
),
Run( # type: ignore[call-arg]
id=str(llm_uuid2), # type: ignore[arg-type]
parent_run_id=str(chain_uuid), # type: ignore[arg-type]
error=None,
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "end", "time": datetime.now(timezone.utc)},
],
extra={},
serialized=SERIALIZED,
inputs={"prompts": []},
outputs=LLMResult(generations=[[]]), # type: ignore[arg-type]
run_type="llm",
trace_id=chain_uuid,
dotted_order=f"20230101T000000000000Z{chain_uuid}.20230101T000000000000Z{llm_uuid2}",
),
],
)
assert tracer.runs[0] == compare_run
assert tracer.runs == [compare_run] * 10
@freeze_time("2023-01-01")
def test_tracer_llm_run_on_error() -> None:
"""Test tracer on an LLM run with an error."""
exception = Exception("test")
uuid = uuid4()
compare_run = Run( # type: ignore[call-arg]
id=str(uuid), # type: ignore[arg-type]
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "error", "time": datetime.now(timezone.utc)},
],
extra={},
serialized=SERIALIZED,
inputs={"prompts": []},
outputs=None,
error=repr(exception),
run_type="llm",
trace_id=uuid,
dotted_order=f"20230101T000000000000Z{uuid}",
)
tracer = FakeTracer()
tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid)
tracer.on_llm_error(exception, run_id=uuid)
assert len(tracer.runs) == 1
_compare_run_with_error(tracer.runs[0], compare_run)
@freeze_time("2023-01-01")
def test_tracer_llm_run_on_error_callback() -> None:
"""Test tracer on an LLM run with an error and a callback."""
exception = Exception("test")
uuid = uuid4()
compare_run = Run( # type: ignore[call-arg]
id=str(uuid), # type: ignore[arg-type]
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "error", "time": datetime.now(timezone.utc)},
],
extra={},
serialized=SERIALIZED,
inputs={"prompts": []},
outputs=None,
error=repr(exception),
run_type="llm",
trace_id=uuid,
dotted_order=f"20230101T000000000000Z{uuid}",
)
class FakeTracerWithLlmErrorCallback(FakeTracer):
error_run = None
def _on_llm_error(self, run: Run) -> None:
self.error_run = run
tracer = FakeTracerWithLlmErrorCallback()
tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid)
tracer.on_llm_error(exception, run_id=uuid)
_compare_run_with_error(tracer.error_run, compare_run)
@freeze_time("2023-01-01")
def test_tracer_chain_run_on_error() -> None:
"""Test tracer on a Chain run with an error."""
exception = Exception("test")
uuid = uuid4()
compare_run = Run( # type: ignore[call-arg]
id=str(uuid), # type: ignore[arg-type]
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "error", "time": datetime.now(timezone.utc)},
],
extra={},
serialized={"name": "chain"},
inputs={},
outputs=None,
error=repr(exception),
run_type="chain",
trace_id=uuid,
dotted_order=f"20230101T000000000000Z{uuid}",
)
tracer = FakeTracer()
tracer.on_chain_start(serialized={"name": "chain"}, inputs={}, run_id=uuid)
tracer.on_chain_error(exception, run_id=uuid)
_compare_run_with_error(tracer.runs[0], compare_run)
@freeze_time("2023-01-01")
def test_tracer_tool_run_on_error() -> None:
"""Test tracer on a Tool run with an error."""
exception = Exception("test")
uuid = uuid4()
compare_run = Run( # type: ignore[call-arg]
id=str(uuid), # type: ignore[arg-type]
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "error", "time": datetime.now(timezone.utc)},
],
extra={},
serialized={"name": "tool"},
inputs={"input": "test"},
outputs=None,
error=repr(exception),
run_type="tool",
trace_id=uuid,
dotted_order=f"20230101T000000000000Z{uuid}",
)
tracer = FakeTracer()
tracer.on_tool_start(serialized={"name": "tool"}, input_str="test", run_id=uuid)
tracer.on_tool_error(exception, run_id=uuid)
_compare_run_with_error(tracer.runs[0], compare_run)
@freeze_time("2023-01-01")
def test_tracer_nested_runs_on_error() -> None:
"""Test tracer on a nested run with an error."""
exception = Exception("test")
tracer = FakeTracer()
chain_uuid = uuid4()
tool_uuid = uuid4()
llm_uuid1 = uuid4()
llm_uuid2 = uuid4()
llm_uuid3 = uuid4()
for _ in range(3):
tracer.on_chain_start(
serialized={"name": "chain"}, inputs={}, run_id=chain_uuid
)
tracer.on_llm_start(
serialized=SERIALIZED,
prompts=[],
run_id=llm_uuid1,
parent_run_id=chain_uuid,
)
tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=llm_uuid1)
tracer.on_llm_start(
serialized=SERIALIZED,
prompts=[],
run_id=llm_uuid2,
parent_run_id=chain_uuid,
)
tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=llm_uuid2)
tracer.on_tool_start(
serialized={"name": "tool"},
input_str="test",
run_id=tool_uuid,
parent_run_id=chain_uuid,
)
tracer.on_llm_start(
serialized=SERIALIZED,
prompts=[],
run_id=llm_uuid3,
parent_run_id=tool_uuid,
)
tracer.on_llm_error(exception, run_id=llm_uuid3)
tracer.on_tool_error(exception, run_id=tool_uuid)
tracer.on_chain_error(exception, run_id=chain_uuid)
compare_run = Run( # type: ignore[call-arg]
id=str(chain_uuid), # type: ignore[arg-type]
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "error", "time": datetime.now(timezone.utc)},
],
extra={},
serialized={"name": "chain"},
error=repr(exception),
inputs={},
outputs=None,
run_type="chain",
trace_id=chain_uuid,
dotted_order=f"20230101T000000000000Z{chain_uuid}",
child_runs=[
Run( # type: ignore[call-arg]
id=str(llm_uuid1), # type: ignore[arg-type]
parent_run_id=str(chain_uuid), # type: ignore[arg-type]
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "end", "time": datetime.now(timezone.utc)},
],
extra={},
serialized=SERIALIZED,
error=None,
inputs={"prompts": []},
outputs=LLMResult(generations=[[]], llm_output=None), # type: ignore[arg-type]
run_type="llm",
trace_id=chain_uuid,
dotted_order=f"20230101T000000000000Z{chain_uuid}.20230101T000000000000Z{llm_uuid1}",
),
Run( # type: ignore[call-arg]
id=str(llm_uuid2), # type: ignore[arg-type]
parent_run_id=str(chain_uuid), # type: ignore[arg-type]
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "end", "time": datetime.now(timezone.utc)},
],
extra={},
serialized=SERIALIZED,
error=None,
inputs={"prompts": []},
outputs=LLMResult(generations=[[]], llm_output=None), # type: ignore[arg-type]
run_type="llm",
trace_id=chain_uuid,
dotted_order=f"20230101T000000000000Z{chain_uuid}.20230101T000000000000Z{llm_uuid2}",
),
Run( # type: ignore[call-arg]
id=str(tool_uuid), # type: ignore[arg-type]
parent_run_id=str(chain_uuid), # type: ignore[arg-type]
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "error", "time": datetime.now(timezone.utc)},
],
extra={},
serialized={"name": "tool"},
error=repr(exception),
inputs={"input": "test"},
outputs=None,
trace_id=chain_uuid,
dotted_order=f"20230101T000000000000Z{chain_uuid}.20230101T000000000000Z{tool_uuid}",
child_runs=[
Run( # type: ignore[call-arg]
id=str(llm_uuid3), # type: ignore[arg-type]
parent_run_id=str(tool_uuid), # type: ignore[arg-type]
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
events=[
{"name": "start", "time": datetime.now(timezone.utc)},
{"name": "error", "time": datetime.now(timezone.utc)},
],
extra={},
serialized=SERIALIZED,
error=repr(exception),
inputs={"prompts": []},
outputs=None,
run_type="llm",
trace_id=chain_uuid,
dotted_order=f"20230101T000000000000Z{chain_uuid}.20230101T000000000000Z{tool_uuid}.20230101T000000000000Z{llm_uuid3}",
)
],
run_type="tool",
),
],
)
assert len(tracer.runs) == 3
for run in tracer.runs:
_compare_run_with_error(run, compare_run)
def _get_mock_client() -> Client:
mock_session = MagicMock()
return Client(session=mock_session, api_key="test")
def test_traceable_to_tracing() -> None:
has_children = False
def _collect_run(run: Any) -> None:
nonlocal has_children
has_children = bool(run.child_runs)
@as_runnable
def foo(x: int) -> int:
return x + 1
@traceable
def some_parent(a: int, b: int) -> int:
return foo.invoke(a) + foo.invoke(b)
mock_client_ = _get_mock_client()
with langsmith.run_helpers.tracing_context(enabled=True):
result = some_parent(
1, 2, langsmith_extra={"client": mock_client_, "on_end": _collect_run}
)
assert result == 5
assert has_children, "Child run not collected"
|
FakeTracer
|
python
|
spack__spack
|
lib/spack/spack/fetch_strategy.py
|
{
"start": 66220,
"end": 66340
}
|
class ____(spack.error.FetchError):
"""Raised when we can't extrapolate a version for a package."""
|
ExtrapolationError
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/storage/runs/base.py
|
{
"start": 1028,
"end": 1115
}
|
class ____(TypedDict):
count: int
runs: Sequence[DagsterRun]
@public
|
RunGroupInfo
|
python
|
getsentry__sentry
|
src/social_auth/exceptions.py
|
{
"start": 2199,
"end": 2366
}
|
class ____(AuthException):
"""State parameter is incorrect."""
def __str__(self) -> str:
return gettext("Session value state missing.")
|
AuthStateMissing
|
python
|
pytorch__pytorch
|
torch/export/dynamic_shapes.py
|
{
"start": 8477,
"end": 9010
}
|
class ____(Dim):
"""
Class for static :func:`Dim` types.
This class is only for setting and checking static dim constraints,
and the user should never interact with it.
"""
def __init__(self, value: int):
self.__name__ = str(value)
self.value = value
@property
def min(self): # type: ignore[override]
return self.value # type: ignore[attr-defined]
@property
def max(self): # type: ignore[override]
return self.value # type: ignore[attr-defined]
|
_StaticDim
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.