language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | src/sentry/api/bases/project.py | {
"start": 3356,
"end": 3657
} | class ____(ProjectPermission):
scope_map = {
"GET": ["project:read", "project:write", "project:admin"],
"POST": ["project:write", "project:admin"],
"PUT": ["project:write", "project:admin"],
"DELETE": ["project:write", "project:admin"],
}
| ProjectSettingPermission |
python | keras-team__keras | keras/src/layers/core/reversible_embedding.py | {
"start": 278,
"end": 14168
} | class ____(layers.Embedding):
"""An embedding layer which can project backwards to the input dim.
This layer is an extension of `keras.layers.Embedding` for language models.
This layer can be called "in reverse" with `reverse=True`, in which case the
layer will linearly project from `output_dim` back to `input_dim`.
By default, the reverse projection will use the transpose of the
`embeddings` weights to project to `input_dim` (weights are "tied"). If
`tie_weights=False`, the model will use a separate, trainable variable for
reverse projection.
This layer has no bias terms.
Args:
input_dim: Integer. Size of the vocabulary,
i.e. maximum integer index + 1.
output_dim: Integer. Dimension of the dense embedding.
tie_weights: Boolean, whether or not the matrix for embedding and
the matrix for the `reverse` projection should share the same
weights.
embeddings_initializer: Initializer for the `embeddings`
matrix (see `keras.initializers`).
embeddings_regularizer: Regularizer function applied to
the `embeddings` matrix (see `keras.regularizers`).
embeddings_constraint: Constraint function applied to
the `embeddings` matrix (see `keras.constraints`).
mask_zero: Boolean, whether or not the input value 0 is a special
"padding" value that should be masked out.
reverse_dtype: The dtype for the reverse projection computation.
Defaults to the `compute_dtype` of the layer.
logit_soft_cap: If `logit_soft_cap` is set and `reverse=True`, the
output logits will be scaled by
`tanh(logits / logit_soft_cap) * logit_soft_cap`. This narrows the
range of output logits and can improve training.
**kwargs: other keyword arguments passed to `keras.layers.Embedding`,
including `name`, `trainable`, `dtype` etc.
Call arguments:
inputs: The tensor inputs to the layer.
reverse: Boolean. If `True` the layer will perform a linear projection
from `output_dim` to `input_dim`, instead of a normal embedding
call. Default to `False`.
Example:
```python
batch_size = 16
vocab_size = 100
hidden_dim = 32
seq_length = 50
# Generate random inputs.
token_ids = np.random.randint(vocab_size, size=(batch_size, seq_length))
embedding = keras.layers.ReversibleEmbedding(vocab_size, hidden_dim)
# Embed tokens to shape `(batch_size, seq_length, hidden_dim)`.
hidden_states = embedding(token_ids)
# Project hidden states to shape `(batch_size, seq_length, vocab_size)`.
logits = embedding(hidden_states, reverse=True)
```
References:
- [Vaswani et al., 2017](https://arxiv.org/abs/1706.03762)
- [Press and Wolf, 2016](https://arxiv.org/abs/1608.05859)
"""
def __init__(
self,
input_dim,
output_dim,
tie_weights=True,
embeddings_initializer="uniform",
embeddings_regularizer=None,
embeddings_constraint=None,
mask_zero=False,
reverse_dtype=None,
logit_soft_cap=None,
**kwargs,
):
super().__init__(
input_dim,
output_dim,
embeddings_initializer=embeddings_initializer,
embeddings_regularizer=embeddings_regularizer,
embeddings_constraint=embeddings_constraint,
mask_zero=mask_zero,
**kwargs,
)
self.tie_weights = tie_weights
self.reverse_dtype = reverse_dtype
self.logit_soft_cap = logit_soft_cap
def build(self, inputs_shape=None):
super().build(inputs_shape)
if not self.tie_weights and self.quantization_mode not in (
"int8",
"int4",
):
self.reverse_embeddings = self.add_weight(
shape=(self.output_dim, self.input_dim),
initializer=self.embeddings_initializer,
name="reverse_embeddings",
trainable=True,
)
def call(self, inputs, reverse=False):
if not reverse:
return super().call(inputs)
else:
if self.tie_weights:
kernel = ops.transpose(ops.convert_to_tensor(self.embeddings))
else:
kernel = self.reverse_embeddings
if self.reverse_dtype is not None:
inputs = ops.cast(inputs, self.reverse_dtype)
kernel = ops.cast(kernel, self.reverse_dtype)
logits = ops.matmul(inputs, kernel)
# Optionally soft-cap logits.
if self.logit_soft_cap is not None:
soft_cap = self.logit_soft_cap
logits = ops.multiply(
ops.tanh(ops.divide(logits, soft_cap)), soft_cap
)
return logits
def compute_output_shape(self, input_shape, reverse=False):
output_shape = list(input_shape)
if reverse:
output_shape[-1] = self.input_dim
else:
output_shape += [self.output_dim]
return output_shape
def compute_output_spec(self, inputs, reverse=False):
output_shape = list(inputs.shape)
if reverse:
output_shape[-1] = self.input_dim
else:
output_shape += [self.output_dim]
return KerasTensor(output_shape, dtype=self.compute_dtype)
def get_config(self):
config = super().get_config()
config.update(
{
"tie_weights": self.tie_weights,
"reverse_dtype": self.reverse_dtype,
"logit_soft_cap": self.logit_soft_cap,
}
)
return config
@property
def variable_serialization_spec(self):
# Avoid modifying the parent's spec.
_spec = copy.deepcopy(super().variable_serialization_spec)
if not self.tie_weights:
for mode, variable_spec in _spec.items():
variable_spec.append("reverse_embeddings")
if mode in ("int4", "int8"):
variable_spec.append("reverse_embeddings_scale")
return _spec
def quantized_build(self, embeddings_shape, mode):
if mode == "int8":
self._int8_build(embeddings_shape)
elif mode == "int4":
self._int4_build(embeddings_shape)
else:
raise self._quantization_mode_error(mode)
self._is_quantized = True
def _int8_build(self, embeddings_shape):
if embeddings_shape is None:
embeddings_shape = (self.input_dim, self.output_dim)
super()._int8_build(embeddings_shape=embeddings_shape)
self.inputs_quantizer = quantizers.AbsMaxQuantizer(axis=-1)
if not self.tie_weights:
self.reverse_embeddings = self.add_weight(
name="reverse_embeddings",
shape=(self.output_dim, self.input_dim),
initializer="zeros",
dtype="int8",
trainable=False,
)
self.reverse_embeddings_scale = self.add_weight(
name="reverse_embeddings_scale",
shape=(self.input_dim,),
initializer="ones",
trainable=False,
)
def _int4_build(self, embeddings_shape):
if embeddings_shape is None:
embeddings_shape = (self.input_dim, self.output_dim)
super()._int4_build(embeddings_shape=embeddings_shape)
self.inputs_quantizer = quantizers.AbsMaxQuantizer(axis=-1)
if not self.tie_weights:
packed_rows = (self.output_dim + 1) // 2 # ceil for odd dims
self.reverse_embeddings = self.add_weight(
name="reverse_embeddings",
shape=(packed_rows, self.input_dim),
initializer="zeros",
dtype="int8",
trainable=False,
)
self.reverse_embeddings_scale = self.add_weight(
name="reverse_embeddings_scale",
shape=(self.input_dim,),
initializer="ones",
trainable=False,
)
def _int8_call(self, inputs, reverse=False):
if not reverse:
return super()._int8_call(inputs)
else:
if self.tie_weights:
kernel = ops.transpose(self._embeddings)
scale = ops.transpose(self.embeddings_scale)
else:
kernel = self.reverse_embeddings
scale = self.reverse_embeddings_scale
inputs, inputs_scale = self.inputs_quantizer(inputs)
logits = ops.matmul(inputs, kernel)
# De-scale outputs
logits = ops.cast(logits, self.compute_dtype)
logits = ops.divide(logits, ops.multiply(inputs_scale, scale))
# Optionally soft-cap logits.
if self.logit_soft_cap is not None:
soft_cap = self.logit_soft_cap
logits = ops.multiply(
ops.tanh(ops.divide(logits, soft_cap)), soft_cap
)
return logits
def _int4_call(self, inputs, reverse=False):
if not reverse:
return super()._int4_call(inputs)
else:
if self.tie_weights:
embeddings = ops.transpose(self._embeddings)
scale = ops.transpose(self.embeddings_scale)
else:
embeddings = self.reverse_embeddings
scale = self.reverse_embeddings_scale
unpacked_embeddings = quantizers.unpack_int4(
embeddings, self.output_dim, axis=0
)
inputs, inputs_scale = self.inputs_quantizer(inputs)
logits = ops.matmul(inputs, unpacked_embeddings)
# De-scale outputs
logits = ops.cast(logits, self.compute_dtype)
logits = ops.divide(logits, ops.multiply(inputs_scale, scale))
# Optionally soft-cap logits.
if self.logit_soft_cap is not None:
soft_cap = self.logit_soft_cap
logits = ops.multiply(
ops.tanh(ops.divide(logits, soft_cap)), soft_cap
)
return logits
def quantize(self, mode, type_check=True, config=None):
del config
if type_check and type(self) is not ReversibleEmbedding:
raise self._not_implemented_error(self.quantize)
embeddings_shape = (self.input_dim, self.output_dim)
if mode == "int8":
# Quantize `self._embeddings` to int8 and compute corresponding
# scale.
embeddings_value, embeddings_scale = quantizers.abs_max_quantize(
self._embeddings, axis=-1, to_numpy=True
)
embeddings_scale = ops.squeeze(embeddings_scale, axis=-1)
del self._embeddings
if not self.tie_weights:
reverse_embeddings_value, reverse_embeddings_scale = (
quantizers.abs_max_quantize(
self.reverse_embeddings, axis=0, to_numpy=True
)
)
reverse_embeddings_scale = ops.squeeze(
reverse_embeddings_scale, axis=0
)
del self.reverse_embeddings
self.quantized_build(embeddings_shape, mode)
self._embeddings.assign(embeddings_value)
self.embeddings_scale.assign(embeddings_scale)
if not self.tie_weights:
self.reverse_embeddings.assign(reverse_embeddings_value)
self.reverse_embeddings_scale.assign(reverse_embeddings_scale)
elif mode == "int4":
# Quantize to int4 values (stored in int8 dtype, range [-8, 7]).
embeddings_value, embeddings_scale = quantizers.abs_max_quantize(
self._embeddings,
axis=-1,
value_range=(-8, 7),
dtype="int8",
to_numpy=True,
)
embeddings_scale = ops.squeeze(embeddings_scale, axis=-1)
# 2. Pack two int4 values into a single int8 byte.
packed_embeddings_value, _, _ = quantizers.pack_int4(
embeddings_value, axis=-1
)
del self._embeddings
if not self.tie_weights:
reverse_embeddings_value, reverse_embeddings_scale = (
quantizers.abs_max_quantize(
self.reverse_embeddings,
axis=0,
value_range=(-8, 7),
dtype="int8",
to_numpy=True,
)
)
reverse_embeddings_scale = ops.squeeze(
reverse_embeddings_scale, axis=0
)
# Pack two int4 values into a single int8 byte.
packed_reverse_embeddings_value, _, _ = quantizers.pack_int4(
reverse_embeddings_value, axis=0
)
del self.reverse_embeddings
self.quantized_build(embeddings_shape, mode)
self._embeddings.assign(packed_embeddings_value)
self.embeddings_scale.assign(embeddings_scale)
if not self.tie_weights:
self.reverse_embeddings.assign(packed_reverse_embeddings_value)
self.reverse_embeddings_scale.assign(reverse_embeddings_scale)
else:
raise self._quantization_mode_error(mode)
# Set new dtype policy.
if self.dtype_policy.quantization_mode is None:
policy = dtype_policies.get(f"{mode}_from_{self.dtype_policy.name}")
self.dtype_policy = policy
| ReversibleEmbedding |
python | airbytehq__airbyte | airbyte-integrations/bases/connector-acceptance-test/connector_acceptance_test/config.py | {
"start": 7591,
"end": 9496
} | class ____(BaseConfig):
config_path: str = config_path
deployment_mode: Optional[str] = deployment_mode
configured_catalog_path: Optional[str] = configured_catalog_path
empty_streams: Set[EmptyStreamConfiguration] = Field(
default_factory=set, description="We validate that all streams has records. These are exceptions"
)
expect_records: Optional[ExpectedRecordsConfig] = Field(description="Expected records from the read")
validate_schema: bool = Field(True, description="Ensure that records match the schema of the corresponding stream")
validate_stream_statuses: bool = Field(None, description="Ensure that all streams emit status messages")
validate_state_messages: bool = Field(True, description="Ensure that state messages emitted as expected")
validate_primary_keys_data_type: bool = Field(True, description="Ensure correct primary keys data type")
fail_on_extra_columns: bool = Field(True, description="Fail if extra top-level properties (i.e. columns) are detected in records.")
# TODO: remove this field after https://github.com/airbytehq/airbyte/issues/8312 is done
validate_data_points: bool = Field(
False, description="Set whether we need to validate that all fields in all streams contained at least one data point"
)
expect_trace_message_on_failure: bool = Field(True, description="Ensure that a trace message is emitted when the connector crashes")
timeout_seconds: int = timeout_seconds
file_types: Optional[FileTypesConfig] = Field(
default_factory=FileTypesConfig,
description="For file-based connectors, unsupported by source file types can be configured or a test can be skipped at all",
)
client_container_config: Optional[ClientContainerConfig] = Field(
description="Information required to run a client Docker container before each test.",
)
| BasicReadTestConfig |
python | PyCQA__pylint | tests/functional/c/consider/consider_iterating_dictionary.py | {
"start": 2603,
"end": 3768
} | class ____:
def a_function(self):
class InnerClass:
def another_function(self):
def inner_function():
another_metadata = {}
print("a" not in list(another_metadata.keys())) # [consider-iterating-dictionary]
print("a" not in another_metadata.keys()) # [consider-iterating-dictionary]
print("a" in list(another_metadata.keys())) # [consider-iterating-dictionary]
print("a" in another_metadata.keys()) # [consider-iterating-dictionary]
return inner_function()
return InnerClass().another_function()
A_DICT = {"a": 1, "b": 2, "c": 3}
A_SET = {"c", "d"}
# Test bitwise operations. These should not raise msg because removing `.keys()`
# either gives error or ends in a different result
print(A_DICT.keys() | A_SET)
if "a" in A_DICT.keys() | A_SET:
pass
if "a" in A_DICT.keys() & A_SET:
pass
if 1 in A_DICT.keys() ^ [1, 2]:
pass
if "a" in A_DICT.keys() or A_SET: # [consider-iterating-dictionary]
pass
if "a" in A_DICT.keys() and A_SET: # [consider-iterating-dictionary]
pass
| AClass |
python | langchain-ai__langchain | libs/core/langchain_core/callbacks/manager.py | {
"start": 17392,
"end": 18580
} | class ____(BaseRunManager):
"""Sync Run Manager."""
def on_text(
self,
text: str,
**kwargs: Any,
) -> None:
"""Run when a text is received.
Args:
text: The received text.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
handle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_retry(
self,
retry_state: RetryCallState,
**kwargs: Any,
) -> None:
"""Run when a retry is received.
Args:
retry_state: The retry state.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
handle_event(
self.handlers,
"on_retry",
"ignore_retry",
retry_state,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
| RunManager |
python | huggingface__transformers | src/transformers/models/mask2former/image_processing_mask2former.py | {
"start": 13477,
"end": 58324
} | class ____(BaseImageProcessor):
r"""
Constructs a Mask2Former image processor. The image processor can be used to prepare image(s) and optional targets
for the model.
This image processor inherits from [`BaseImageProcessor`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the input to a certain `size`.
size (`int`, *optional*, defaults to 800):
Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a
sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of
the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size *
height / width, size)`.
size_divisor (`int`, *optional*, defaults to 32):
Some backbones need images divisible by a certain number. If not passed, it defaults to the value used in
Swin Transformer.
resample (`int`, *optional*, defaults to `Resampling.BILINEAR`):
An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`,
`PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`,
`PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set
to `True`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the input to a certain `scale`.
rescale_factor (`float`, *optional*, defaults to `1/ 255`):
Rescale the input by the given factor. Only has an effect if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the input with mean and standard deviation.
image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`):
The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean.
image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`):
The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the
ImageNet std.
ignore_index (`int`, *optional*):
Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels
denoted with 0 (background) will be replaced with `ignore_index`.
do_reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to decrement all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k).
The background label will be replaced by `ignore_index`.
num_labels (`int`, *optional*):
The number of labels in the segmentation map.
pad_size (`Dict[str, int]`, *optional*):
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch.
"""
model_input_names = ["pixel_values", "pixel_mask"]
valid_kwargs = Mask2FormerImageProcessorKwargs
@filter_out_non_signature_kwargs(extra=["max_size", *INIT_SERVICE_KWARGS])
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
size_divisor: int = 32,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_rescale: bool = True,
rescale_factor: float = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
ignore_index: Optional[int] = None,
do_reduce_labels: bool = False,
num_labels: Optional[int] = None,
pad_size: Optional[dict[str, int]] = None,
**kwargs,
):
super().__init__(**kwargs)
# We make max_size a private attribute so we can pass it as a default value in the preprocess method whilst
# `size` can still be pass in as an int
self._max_size = kwargs.pop("max_size", 1333)
size = size if size is not None else {"shortest_edge": 800, "longest_edge": self._max_size}
size = get_size_dict(size, max_size=self._max_size, default_to_square=False)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.size_divisor = size_divisor
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
self.ignore_index = ignore_index
self.do_reduce_labels = do_reduce_labels
self.num_labels = num_labels
self.pad_size = pad_size
# Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.to_dict
def to_dict(self) -> dict[str, Any]:
"""
Serializes this instance to a Python dictionary. This method calls the superclass method and then removes the
`_max_size` attribute from the dictionary.
"""
image_processor_dict = super().to_dict()
image_processor_dict.pop("_max_size", None)
return image_processor_dict
# Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.resize with get_maskformer_resize_output_image_size->get_mask2former_resize_output_image_size
def resize(
self,
image: np.ndarray,
size: dict[str, int],
size_divisor: int = 0,
resample: PILImageResampling = PILImageResampling.BILINEAR,
data_format=None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize the image to the given size. Size can be min_size (scalar) or `(height, width)` tuple. If size is an
int, smaller edge of the image will be matched to this number.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
The size of the output image.
size_divisor (`int`, *optional*, defaults to 0):
If `size_divisor` is given, the output image size will be divisible by the number.
resample (`PILImageResampling` resampling filter, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use when resizing the image.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
# Deprecated, backward compatibility
max_size = kwargs.pop("max_size", None)
size = get_size_dict(size, max_size=max_size, default_to_square=False)
if "shortest_edge" in size and "longest_edge" in size:
size, max_size = size["shortest_edge"], size["longest_edge"]
elif "height" in size and "width" in size:
size = (size["height"], size["width"])
max_size = None
else:
raise ValueError(
"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got"
f" {size.keys()}."
)
size = get_mask2former_resize_output_image_size(
image=image,
size=size,
max_size=max_size,
size_divisor=size_divisor,
default_to_square=False,
input_data_format=input_data_format,
)
image = resize(
image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs
)
return image
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale
def rescale(
self,
image: np.ndarray,
rescale_factor: float,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""
Rescale the image by the given factor. image = image * rescale_factor.
Args:
image (`np.ndarray`):
Image to rescale.
rescale_factor (`float`):
The value to use for rescaling.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. If unset, is inferred from the input image. Can be
one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
"""
return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)
# Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.convert_segmentation_map_to_binary_masks
def convert_segmentation_map_to_binary_masks(
self,
segmentation_map: np.ndarray,
instance_id_to_semantic_id: Optional[dict[int, int]] = None,
ignore_index: Optional[int] = None,
do_reduce_labels: bool = False,
):
do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels
ignore_index = ignore_index if ignore_index is not None else self.ignore_index
return convert_segmentation_map_to_binary_masks(
segmentation_map=segmentation_map,
instance_id_to_semantic_id=instance_id_to_semantic_id,
ignore_index=ignore_index,
do_reduce_labels=do_reduce_labels,
)
def __call__(self, images, segmentation_maps=None, **kwargs) -> BatchFeature:
return self.preprocess(images, segmentation_maps=segmentation_maps, **kwargs)
def _preprocess(
self,
image: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
size_divisor: Optional[int] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
if do_resize:
image = self.resize(
image, size=size, size_divisor=size_divisor, resample=resample, input_data_format=input_data_format
)
if do_rescale:
image = self.rescale(image, rescale_factor=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format)
return image
def _preprocess_image(
self,
image: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
size_divisor: Optional[int] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""Preprocesses a single image."""
# All transformations expect numpy arrays.
image = to_numpy_array(image)
if do_rescale and is_scaled_image(image):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
image = self._preprocess(
image=image,
do_resize=do_resize,
size=size,
size_divisor=size_divisor,
resample=resample,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
input_data_format=input_data_format,
)
if data_format is not None:
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
return image
def _preprocess_mask(
self,
segmentation_map: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
size_divisor: int = 0,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""Preprocesses a single mask."""
segmentation_map = to_numpy_array(segmentation_map)
# Add channel dimension if missing - needed for certain transformations
if segmentation_map.ndim == 2:
added_channel_dim = True
segmentation_map = segmentation_map[None, ...]
input_data_format = ChannelDimension.FIRST
else:
added_channel_dim = False
if input_data_format is None:
input_data_format = infer_channel_dimension_format(segmentation_map)
# TODO: (Amy)
# Remork segmentation map processing to include reducing labels and resizing which doesn't
# drop segment IDs > 255.
segmentation_map = self._preprocess(
image=segmentation_map,
do_resize=do_resize,
resample=PILImageResampling.NEAREST,
size=size,
size_divisor=size_divisor,
do_rescale=False,
do_normalize=False,
input_data_format=input_data_format,
)
# Remove extra channel dimension if added for processing
if added_channel_dim:
segmentation_map = segmentation_map.squeeze(0)
return segmentation_map
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput] = None,
instance_id_to_semantic_id: Optional[dict[int, int]] = None,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
size_divisor: Optional[int] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
ignore_index: Optional[int] = None,
do_reduce_labels: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
pad_size: Optional[dict[str, int]] = None,
) -> BatchFeature:
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False, max_size=self._max_size)
size_divisor = size_divisor if size_divisor is not None else self.size_divisor
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
ignore_index = ignore_index if ignore_index is not None else self.ignore_index
do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels
pad_size = self.pad_size if pad_size is None else pad_size
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
if segmentation_maps is not None and not valid_images(segmentation_maps):
raise ValueError(
"Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor"
)
images = make_flat_list_of_images(images)
if segmentation_maps is not None:
segmentation_maps = make_flat_list_of_images(segmentation_maps, expected_ndims=2)
if segmentation_maps is not None and len(images) != len(segmentation_maps):
raise ValueError("Images and segmentation maps must have the same length.")
images = [
self._preprocess_image(
image,
do_resize=do_resize,
size=size,
size_divisor=size_divisor,
resample=resample,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
data_format=data_format,
input_data_format=input_data_format,
)
for image in images
]
if segmentation_maps is not None:
segmentation_maps = [
self._preprocess_mask(
segmentation_map, do_resize, size, size_divisor, input_data_format=input_data_format
)
for segmentation_map in segmentation_maps
]
encoded_inputs = self.encode_inputs(
images,
segmentation_maps,
instance_id_to_semantic_id,
ignore_index,
do_reduce_labels,
return_tensors,
input_data_format=data_format,
pad_size=pad_size,
)
return encoded_inputs
# Copied from transformers.models.vilt.image_processing_vilt.ViltImageProcessor._pad_image
def _pad_image(
self,
image: np.ndarray,
output_size: tuple[int, int],
constant_values: Union[float, Iterable[float]] = 0,
data_format: Optional[ChannelDimension] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""
Pad an image with zeros to the given size.
"""
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
output_height, output_width = output_size
pad_bottom = output_height - input_height
pad_right = output_width - input_width
padding = ((0, pad_bottom), (0, pad_right))
padded_image = pad(
image,
padding,
mode=PaddingMode.CONSTANT,
constant_values=constant_values,
data_format=data_format,
input_data_format=input_data_format,
)
return padded_image
# Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.pad
def pad(
self,
images: list[np.ndarray],
constant_values: Union[float, Iterable[float]] = 0,
return_pixel_mask: bool = True,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[ChannelDimension] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
pad_size: Optional[dict[str, int]] = None,
) -> BatchFeature:
"""
Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
in the batch and optionally returns their corresponding pixel mask.
Args:
image (`np.ndarray`):
Image to pad.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
return_pixel_mask (`bool`, *optional*, defaults to `True`):
Whether to return a pixel mask.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
pad_size (`Dict[str, int]`, *optional*):
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch.
"""
pad_size = pad_size if pad_size is not None else self.pad_size
if pad_size is not None:
padded_size = (pad_size["height"], pad_size["width"])
else:
padded_size = get_max_height_width(images, input_data_format=input_data_format)
padded_images = [
self._pad_image(
image,
padded_size,
constant_values=constant_values,
data_format=data_format,
input_data_format=input_data_format,
)
for image in images
]
data = {"pixel_values": padded_images}
if return_pixel_mask:
masks = [
make_pixel_mask(image=image, output_size=padded_size, input_data_format=input_data_format)
for image in images
]
data["pixel_mask"] = masks
return BatchFeature(data=data, tensor_type=return_tensors)
def encode_inputs(
self,
pixel_values_list: list[ImageInput],
segmentation_maps: Optional[ImageInput] = None,
instance_id_to_semantic_id: Optional[Union[list[dict[int, int]], dict[int, int]]] = None,
ignore_index: Optional[int] = None,
do_reduce_labels: bool = False,
return_tensors: Optional[Union[str, TensorType]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
pad_size: Optional[dict[str, int]] = None,
):
"""
Pad images up to the largest image in a batch and create a corresponding `pixel_mask`.
Mask2Former addresses semantic segmentation with a mask classification paradigm, thus input segmentation maps
will be converted to lists of binary masks and their respective labels. Let's see an example, assuming
`segmentation_maps = [[2,6,7,9]]`, the output will contain `mask_labels =
[[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]` (four binary masks) and `class_labels = [2,6,7,9]`, the labels for
each mask.
Args:
pixel_values_list (`list[ImageInput]`):
List of images (pixel values) to be padded. Each image should be a tensor of shape `(channels, height,
width)`.
segmentation_maps (`ImageInput`, *optional*):
The corresponding semantic segmentation maps with the pixel-wise annotations.
(`bool`, *optional*, defaults to `True`):
Whether or not to pad images up to the largest image in a batch and create a pixel mask.
If left to the default, will return a pixel mask that is:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
instance_id_to_semantic_id (`list[dict[int, int]]` or `dict[int, int]`, *optional*):
A mapping between object instance ids and class ids. If passed, `segmentation_maps` is treated as an
instance segmentation map where each pixel represents an instance id. Can be provided as a single
dictionary with a global/dataset-level mapping or as a list of dictionaries (one per image), to map
instance ids in each image separately.
return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor`
objects.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
pad_size (`Dict[str, int]`, *optional*):
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **pixel_values** -- Pixel values to be fed to a model.
- **pixel_mask** -- Pixel mask to be fed to a model (when `=True` or if `pixel_mask` is in
`self.model_input_names`).
- **mask_labels** -- Optional list of mask labels of shape `(labels, height, width)` to be fed to a model
(when `annotations` are provided).
- **class_labels** -- Optional list of class labels of shape `(labels)` to be fed to a model (when
`annotations` are provided). They identify the labels of `mask_labels`, e.g. the label of
`mask_labels[i][j]` if `class_labels[i][j]`.
"""
ignore_index = self.ignore_index if ignore_index is None else ignore_index
do_reduce_labels = self.do_reduce_labels if do_reduce_labels is None else do_reduce_labels
pixel_values_list = [to_numpy_array(pixel_values) for pixel_values in pixel_values_list]
if input_data_format is None:
input_data_format = infer_channel_dimension_format(pixel_values_list[0])
encoded_inputs = self.pad(
pixel_values_list, return_tensors=return_tensors, input_data_format=input_data_format, pad_size=pad_size
)
if segmentation_maps is not None:
mask_labels = []
class_labels = []
pad_size = get_max_height_width(pixel_values_list, input_data_format=input_data_format)
# Convert to list of binary masks and labels
for idx, segmentation_map in enumerate(segmentation_maps):
segmentation_map = to_numpy_array(segmentation_map)
if isinstance(instance_id_to_semantic_id, list):
instance_id = instance_id_to_semantic_id[idx]
else:
instance_id = instance_id_to_semantic_id
# Use instance2class_id mapping per image
masks, classes = self.convert_segmentation_map_to_binary_masks(
segmentation_map, instance_id, ignore_index=ignore_index, do_reduce_labels=do_reduce_labels
)
# We add an axis to make them compatible with the transformations library
# this will be removed in the future
if masks.shape[0] > 0:
masks = [mask[None, ...] for mask in masks]
masks = [
self._pad_image(image=mask, output_size=pad_size, constant_values=ignore_index)
for mask in masks
]
masks = np.concatenate(masks, axis=0)
else:
masks = np.zeros((0, *pad_size), dtype=np.float32)
mask_labels.append(torch.from_numpy(masks))
class_labels.append(torch.from_numpy(classes))
# we cannot batch them since they don't share a common class size
encoded_inputs["mask_labels"] = mask_labels
encoded_inputs["class_labels"] = class_labels
return encoded_inputs
def post_process_semantic_segmentation(
self, outputs, target_sizes: Optional[list[tuple[int, int]]] = None
) -> "torch.Tensor":
"""
Converts the output of [`Mask2FormerForUniversalSegmentation`] into semantic segmentation maps. Only supports
PyTorch.
Args:
outputs ([`Mask2FormerForUniversalSegmentation`]):
Raw outputs of the model.
target_sizes (`list[tuple[int, int]]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction. If left to None, predictions will not be resized.
Returns:
`list[torch.Tensor]`:
A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)
corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each
`torch.Tensor` correspond to a semantic class id.
"""
class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1]
masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width]
# Scale back to preprocessed image size - (384, 384) for all models
masks_queries_logits = torch.nn.functional.interpolate(
masks_queries_logits, size=(384, 384), mode="bilinear", align_corners=False
)
# Remove the null class `[..., :-1]`
masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]
masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
# Semantic segmentation logits of shape (batch_size, num_classes, height, width)
segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs)
batch_size = class_queries_logits.shape[0]
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if batch_size != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
semantic_segmentation = []
for idx in range(batch_size):
resized_logits = torch.nn.functional.interpolate(
segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = segmentation.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
def post_process_instance_segmentation(
self,
outputs,
threshold: float = 0.5,
mask_threshold: float = 0.5,
overlap_mask_area_threshold: float = 0.8,
target_sizes: Optional[list[tuple[int, int]]] = None,
return_coco_annotation: Optional[bool] = False,
return_binary_maps: Optional[bool] = False,
) -> list[dict]:
"""
Converts the output of [`Mask2FormerForUniversalSegmentationOutput`] into instance segmentation predictions.
Only supports PyTorch. If instances could overlap, set either return_coco_annotation or return_binary_maps
to `True` to get the correct segmentation result.
Args:
outputs ([`Mask2FormerForUniversalSegmentation`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction. If left to None, predictions will not be resized.
return_coco_annotation (`bool`, *optional*, defaults to `False`):
If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format.
return_binary_maps (`bool`, *optional*, defaults to `False`):
If set to `True`, segmentation maps are returned as a concatenated tensor of binary segmentation maps
(one per detected instance).
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id`, or
`list[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to
`True`, or a tensor of shape `(num_instances, height, width)` if return_binary_maps is set to `True`.
Set to `None` if no mask if found above `threshold`.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- An integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
"""
if return_coco_annotation and return_binary_maps:
raise ValueError("return_coco_annotation and return_binary_maps can not be both set to True.")
# [batch_size, num_queries, num_classes+1]
class_queries_logits = outputs.class_queries_logits
# [batch_size, num_queries, height, width]
masks_queries_logits = outputs.masks_queries_logits
# Scale back to preprocessed image size - (384, 384) for all models
masks_queries_logits = torch.nn.functional.interpolate(
masks_queries_logits, size=(384, 384), mode="bilinear", align_corners=False
)
device = masks_queries_logits.device
num_classes = class_queries_logits.shape[-1] - 1
num_queries = class_queries_logits.shape[-2]
# Loop over items in batch size
results: list[dict[str, TensorType]] = []
for i in range(class_queries_logits.shape[0]):
mask_pred = masks_queries_logits[i]
mask_cls = class_queries_logits[i]
scores = torch.nn.functional.softmax(mask_cls, dim=-1)[:, :-1]
labels = torch.arange(num_classes, device=device).unsqueeze(0).repeat(num_queries, 1).flatten(0, 1)
scores_per_image, topk_indices = scores.flatten(0, 1).topk(num_queries, sorted=False)
labels_per_image = labels[topk_indices]
topk_indices = torch.div(topk_indices, num_classes, rounding_mode="floor")
mask_pred = mask_pred[topk_indices]
pred_masks = (mask_pred > 0).float()
# Calculate average mask prob
mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * pred_masks.flatten(1)).sum(1) / (
pred_masks.flatten(1).sum(1) + 1e-6
)
pred_scores = scores_per_image * mask_scores_per_image
pred_classes = labels_per_image
segmentation = torch.zeros((384, 384)) - 1
if target_sizes is not None:
segmentation = torch.zeros(target_sizes[i]) - 1
pred_masks = torch.nn.functional.interpolate(
pred_masks.unsqueeze(0), size=target_sizes[i], mode="nearest"
)[0]
instance_maps, segments = [], []
current_segment_id = 0
for j in range(num_queries):
score = pred_scores[j].item()
if not torch.all(pred_masks[j] == 0) and score >= threshold:
segmentation[pred_masks[j] == 1] = current_segment_id
segments.append(
{
"id": current_segment_id,
"label_id": pred_classes[j].item(),
"was_fused": False,
"score": round(score, 6),
}
)
current_segment_id += 1
instance_maps.append(pred_masks[j])
# Return segmentation map in run-length encoding (RLE) format
if return_coco_annotation:
segmentation = convert_segmentation_to_rle(segmentation)
# Return a concatenated tensor of binary instance maps
if return_binary_maps and len(instance_maps) != 0:
segmentation = torch.stack(instance_maps, dim=0)
results.append({"segmentation": segmentation, "segments_info": segments})
return results
def post_process_panoptic_segmentation(
self,
outputs,
threshold: float = 0.5,
mask_threshold: float = 0.5,
overlap_mask_area_threshold: float = 0.8,
label_ids_to_fuse: Optional[set[int]] = None,
target_sizes: Optional[list[tuple[int, int]]] = None,
) -> list[dict]:
"""
Converts the output of [`Mask2FormerForUniversalSegmentationOutput`] into image panoptic segmentation
predictions. Only supports PyTorch.
Args:
outputs ([`Mask2FormerForUniversalSegmentationOutput`]):
The outputs from [`Mask2FormerForUniversalSegmentation`].
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
label_ids_to_fuse (`Set[int]`, *optional*):
The labels in this state will have all their instances be fused together. For instance we could say
there can only be one sky in an image, but several persons, so the label ID for sky would be in that
set, but not the one for person.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction in batch. If left to None, predictions will not be
resized.
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set
to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized
to the corresponding `target_sizes` entry.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- an integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise.
Multiple instances of the same class / label were fused and assigned a single `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
"""
if label_ids_to_fuse is None:
logger.warning("`label_ids_to_fuse` unset. No instance will be fused.")
label_ids_to_fuse = set()
class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1]
masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width]
# Scale back to preprocessed image size - (384, 384) for all models
masks_queries_logits = torch.nn.functional.interpolate(
masks_queries_logits, size=(384, 384), mode="bilinear", align_corners=False
)
batch_size = class_queries_logits.shape[0]
num_labels = class_queries_logits.shape[-1] - 1
mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
# Predicted label and score of each query (batch_size, num_queries)
pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1)
# Loop over items in batch size
results: list[dict[str, TensorType]] = []
for i in range(batch_size):
mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects(
mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels
)
# No mask found
if mask_probs_item.shape[0] <= 0:
height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:]
segmentation = torch.zeros((height, width)) - 1
results.append({"segmentation": segmentation, "segments_info": []})
continue
# Get segmentation map and segment information of batch item
target_size = target_sizes[i] if target_sizes is not None else None
segmentation, segments = compute_segments(
mask_probs=mask_probs_item,
pred_scores=pred_scores_item,
pred_labels=pred_labels_item,
mask_threshold=mask_threshold,
overlap_mask_area_threshold=overlap_mask_area_threshold,
label_ids_to_fuse=label_ids_to_fuse,
target_size=target_size,
)
results.append({"segmentation": segmentation, "segments_info": segments})
return results
__all__ = ["Mask2FormerImageProcessor"]
| Mask2FormerImageProcessor |
python | huggingface__transformers | src/transformers/convert_slow_tokenizer.py | {
"start": 33268,
"end": 34032
} | class ____(SpmConverter):
def vocab(self, proto):
vocab = [
("<pad>", 0.0),
("<unk>", 0.0),
("<s>", 0.0),
("</s>", 0.0),
]
vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
return vocab
def unk_id(self, proto):
return self.original_tokenizer.unk_token_id
def post_processor(self):
return processors.TemplateProcessing(
single="__eng__ $A </s>",
pair="__eng__ $A $B </s>",
special_tokens=[
("__eng__", self.original_tokenizer.convert_tokens_to_ids("__eng__")),
("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
],
)
| SeamlessM4TConverter |
python | wandb__wandb | wandb/vendor/pygments/lexers/lisp.py | {
"start": 132102,
"end": 140665
} | class ____(RegexLexer):
"""An xtlang lexer for the `Extempore programming environment
<http://extempore.moso.com.au>`_.
This is a mixture of Scheme and xtlang, really. Keyword lists are
taken from the Extempore Emacs mode
(https://github.com/extemporelang/extempore-emacs-mode)
.. versionadded:: 2.2
"""
name = 'xtlang'
aliases = ['extempore']
filenames = ['*.xtm']
mimetypes = []
common_keywords = (
'lambda', 'define', 'if', 'else', 'cond', 'and',
'or', 'let', 'begin', 'set!', 'map', 'for-each',
)
scheme_keywords = (
'do', 'delay', 'quasiquote', 'unquote', 'unquote-splicing', 'eval',
'case', 'let*', 'letrec', 'quote',
)
xtlang_bind_keywords = (
'bind-func', 'bind-val', 'bind-lib', 'bind-type', 'bind-alias',
'bind-poly', 'bind-dylib', 'bind-lib-func', 'bind-lib-val',
)
xtlang_keywords = (
'letz', 'memzone', 'cast', 'convert', 'dotimes', 'doloop',
)
common_functions = (
'*', '+', '-', '/', '<', '<=', '=', '>', '>=', '%', 'abs', 'acos',
'angle', 'append', 'apply', 'asin', 'assoc', 'assq', 'assv',
'atan', 'boolean?', 'caaaar', 'caaadr', 'caaar', 'caadar',
'caaddr', 'caadr', 'caar', 'cadaar', 'cadadr', 'cadar',
'caddar', 'cadddr', 'caddr', 'cadr', 'car', 'cdaaar',
'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar',
'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr',
'cddr', 'cdr', 'ceiling', 'cons', 'cos', 'floor', 'length',
'list', 'log', 'max', 'member', 'min', 'modulo', 'not',
'reverse', 'round', 'sin', 'sqrt', 'substring', 'tan',
'println', 'random', 'null?', 'callback', 'now',
)
scheme_functions = (
'call-with-current-continuation', 'call-with-input-file',
'call-with-output-file', 'call-with-values', 'call/cc',
'char->integer', 'char-alphabetic?', 'char-ci<=?', 'char-ci<?',
'char-ci=?', 'char-ci>=?', 'char-ci>?', 'char-downcase',
'char-lower-case?', 'char-numeric?', 'char-ready?',
'char-upcase', 'char-upper-case?', 'char-whitespace?',
'char<=?', 'char<?', 'char=?', 'char>=?', 'char>?', 'char?',
'close-input-port', 'close-output-port', 'complex?',
'current-input-port', 'current-output-port', 'denominator',
'display', 'dynamic-wind', 'eof-object?', 'eq?', 'equal?',
'eqv?', 'even?', 'exact->inexact', 'exact?', 'exp', 'expt',
'force', 'gcd', 'imag-part', 'inexact->exact', 'inexact?',
'input-port?', 'integer->char', 'integer?',
'interaction-environment', 'lcm', 'list->string',
'list->vector', 'list-ref', 'list-tail', 'list?', 'load',
'magnitude', 'make-polar', 'make-rectangular', 'make-string',
'make-vector', 'memq', 'memv', 'negative?', 'newline',
'null-environment', 'number->string', 'number?',
'numerator', 'odd?', 'open-input-file', 'open-output-file',
'output-port?', 'pair?', 'peek-char', 'port?', 'positive?',
'procedure?', 'quotient', 'rational?', 'rationalize', 'read',
'read-char', 'real-part', 'real?',
'remainder', 'scheme-report-environment', 'set-car!', 'set-cdr!',
'string', 'string->list', 'string->number', 'string->symbol',
'string-append', 'string-ci<=?', 'string-ci<?', 'string-ci=?',
'string-ci>=?', 'string-ci>?', 'string-copy', 'string-fill!',
'string-length', 'string-ref', 'string-set!', 'string<=?',
'string<?', 'string=?', 'string>=?', 'string>?', 'string?',
'symbol->string', 'symbol?', 'transcript-off', 'transcript-on',
'truncate', 'values', 'vector', 'vector->list', 'vector-fill!',
'vector-length', 'vector?',
'with-input-from-file', 'with-output-to-file', 'write',
'write-char', 'zero?',
)
xtlang_functions = (
'toString', 'afill!', 'pfill!', 'tfill!', 'tbind', 'vfill!',
'array-fill!', 'pointer-fill!', 'tuple-fill!', 'vector-fill!', 'free',
'array', 'tuple', 'list', '~', 'cset!', 'cref', '&', 'bor',
'ang-names', '<<', '>>', 'nil', 'printf', 'sprintf', 'null', 'now',
'pset!', 'pref-ptr', 'vset!', 'vref', 'aset!', 'aref', 'aref-ptr',
'tset!', 'tref', 'tref-ptr', 'salloc', 'halloc', 'zalloc', 'alloc',
'schedule', 'exp', 'log', 'sin', 'cos', 'tan', 'asin', 'acos', 'atan',
'sqrt', 'expt', 'floor', 'ceiling', 'truncate', 'round',
'llvm_printf', 'push_zone', 'pop_zone', 'memzone', 'callback',
'llvm_sprintf', 'make-array', 'array-set!', 'array-ref',
'array-ref-ptr', 'pointer-set!', 'pointer-ref', 'pointer-ref-ptr',
'stack-alloc', 'heap-alloc', 'zone-alloc', 'make-tuple', 'tuple-set!',
'tuple-ref', 'tuple-ref-ptr', 'closure-set!', 'closure-ref', 'pref',
'pdref', 'impc_null', 'bitcast', 'void', 'ifret', 'ret->', 'clrun->',
'make-env-zone', 'make-env', '<>', 'dtof', 'ftod', 'i1tof',
'i1tod', 'i1toi8', 'i1toi32', 'i1toi64', 'i8tof', 'i8tod',
'i8toi1', 'i8toi32', 'i8toi64', 'i32tof', 'i32tod', 'i32toi1',
'i32toi8', 'i32toi64', 'i64tof', 'i64tod', 'i64toi1',
'i64toi8', 'i64toi32',
)
# valid names for Scheme identifiers (names cannot consist fully
# of numbers, but this should be good enough for now)
valid_scheme_name = r'[\w!$%&*+,/:<=>?@^~|-]+'
# valid characters in xtlang names & types
valid_xtlang_name = r'[\w.!-]+'
valid_xtlang_type = r'[]{}[\w<>,*/|!-]+'
tokens = {
# keep track of when we're exiting the xtlang form
'xtlang': [
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop'),
(r'(?<=bind-func\s)' + valid_xtlang_name, Name.Function),
(r'(?<=bind-val\s)' + valid_xtlang_name, Name.Function),
(r'(?<=bind-type\s)' + valid_xtlang_name, Name.Function),
(r'(?<=bind-alias\s)' + valid_xtlang_name, Name.Function),
(r'(?<=bind-poly\s)' + valid_xtlang_name, Name.Function),
(r'(?<=bind-lib\s)' + valid_xtlang_name, Name.Function),
(r'(?<=bind-dylib\s)' + valid_xtlang_name, Name.Function),
(r'(?<=bind-lib-func\s)' + valid_xtlang_name, Name.Function),
(r'(?<=bind-lib-val\s)' + valid_xtlang_name, Name.Function),
# type annotations
(r':' + valid_xtlang_type, Keyword.Type),
# types
(r'(<' + valid_xtlang_type + r'>|\|' + valid_xtlang_type + r'\||/' +
valid_xtlang_type + r'/|' + valid_xtlang_type + r'\*)\**',
Keyword.Type),
# keywords
(words(xtlang_keywords, prefix=r'(?<=\()'), Keyword),
# builtins
(words(xtlang_functions, prefix=r'(?<=\()'), Name.Function),
include('common'),
# variables
(valid_xtlang_name, Name.Variable),
],
'scheme': [
# quoted symbols
(r"'" + valid_scheme_name, String.Symbol),
# char literals
(r"#\\([()/'\"._!§$%& ?=+-]|[a-zA-Z0-9]+)", String.Char),
# special operators
(r"('|#|`|,@|,|\.)", Operator),
# keywords
(words(scheme_keywords, prefix=r'(?<=\()'), Keyword),
# builtins
(words(scheme_functions, prefix=r'(?<=\()'), Name.Function),
include('common'),
# variables
(valid_scheme_name, Name.Variable),
],
# common to both xtlang and Scheme
'common': [
# comments
(r';.*$', Comment.Single),
# whitespaces - usually not relevant
(r'\s+', Text),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
# binary/oct/hex literals
(r'(#b|#o|#x)[\d.]+', Number),
# strings
(r'"(\\\\|\\"|[^"])*"', String),
# true/false constants
(r'(#t|#f)', Name.Constant),
# keywords
(words(common_keywords, prefix=r'(?<=\()'), Keyword),
# builtins
(words(common_functions, prefix=r'(?<=\()'), Name.Function),
# the famous parentheses!
(r'(\(|\))', Punctuation),
],
'root': [
# go into xtlang mode
(words(xtlang_bind_keywords, prefix=r'(?<=\()', suffix=r'\b'),
Keyword, 'xtlang'),
include('scheme')
],
}
| XtlangLexer |
python | mlflow__mlflow | mlflow/data/polars_dataset.py | {
"start": 4482,
"end": 11963
} | class ____(Dataset, PyFuncConvertibleDatasetMixin):
"""A polars DataFrame for use with MLflow Tracking."""
def __init__(
self,
df: pl.DataFrame,
source: DatasetSource,
targets: str | None = None,
name: str | None = None,
digest: str | None = None,
predictions: str | None = None,
) -> None:
"""
Args:
df: A polars DataFrame.
source: Source of the DataFrame.
targets: Name of the target column. Optional.
name: Name of the dataset. E.g. "wiki_train". If unspecified, a name is automatically
generated.
digest: Digest (hash, fingerprint) of the dataset. If unspecified, a digest is
automatically computed.
predictions: Name of the column containing model predictions, if the dataset contains
model predictions. Optional. If specified, this column must be present in ``df``.
"""
if targets is not None and targets not in df.columns:
raise MlflowException(
f"DataFrame does not contain specified targets column: '{targets}'",
INVALID_PARAMETER_VALUE,
)
if predictions is not None and predictions not in df.columns:
raise MlflowException(
f"DataFrame does not contain specified predictions column: '{predictions}'",
INVALID_PARAMETER_VALUE,
)
# _df needs to be set before super init, as it is used in _compute_digest
# see Dataset.__init__()
self._df = df
super().__init__(source=source, name=name, digest=digest)
self._targets = targets
self._predictions = predictions
def _compute_digest(self) -> str:
"""Compute a digest for the dataset.
Called if the user doesn't supply a digest when constructing the dataset.
"""
return hash_polars_df(self._df)
class PolarsDatasetConfig(TypedDict):
name: str
digest: str
source: str
source_type: str
schema: str
profile: str
def to_dict(self) -> PolarsDatasetConfig:
"""Create config dictionary for the dataset.
Return a string dictionary containing the following fields: name, digest, source,
source type, schema, and profile.
"""
schema = json.dumps({"mlflow_colspec": self.schema.to_dict()} if self.schema else None)
return {
"name": self.name,
"digest": self.digest,
"source": self.source.to_json(),
"source_type": self.source._get_source_type(),
"schema": schema,
"profile": json.dumps(self.profile),
}
@property
def df(self) -> pl.DataFrame:
"""Underlying DataFrame."""
return self._df
@property
def source(self) -> DatasetSource:
"""Source of the dataset."""
return self._source
@property
def targets(self) -> str | None:
"""Name of the target column.
May be ``None`` if no target column is available.
"""
return self._targets
@property
def predictions(self) -> str | None:
"""Name of the predictions column.
May be ``None`` if no predictions column is available.
"""
return self._predictions
class PolarsDatasetProfile(TypedDict):
num_rows: int
num_elements: int
@property
def profile(self) -> PolarsDatasetProfile:
"""Profile of the dataset."""
return {
"num_rows": self._df.height,
"num_elements": self._df.height * self._df.width,
}
@cached_property
def schema(self) -> Schema | None:
"""Instance of :py:class:`mlflow.types.Schema` representing the tabular dataset.
May be ``None`` if the schema cannot be inferred from the dataset.
"""
try:
return infer_schema(self._df)
except Exception as e:
_logger.warning("Failed to infer schema for PolarsDataset. Exception: %s", e)
return None
def to_pyfunc(self) -> PyFuncInputsOutputs:
"""Convert dataset to a collection of pyfunc inputs and outputs for model evaluation."""
if self._targets:
inputs = self._df.drop(*self._targets)
outputs = self._df.select(self._targets).to_series()
return PyFuncInputsOutputs([inputs.to_pandas()], [outputs.to_pandas()])
else:
return PyFuncInputsOutputs([self._df.to_pandas()])
def to_evaluation_dataset(self, path=None, feature_names=None) -> EvaluationDataset:
"""Convert dataset to an EvaluationDataset for model evaluation."""
return EvaluationDataset(
data=self._df.to_pandas(),
targets=self._targets,
path=path,
feature_names=feature_names,
predictions=self._predictions,
name=self.name,
digest=self.digest,
)
def from_polars(
df: pl.DataFrame,
source: str | DatasetSource | None = None,
targets: str | None = None,
name: str | None = None,
digest: str | None = None,
predictions: str | None = None,
) -> PolarsDataset:
"""Construct a :py:class:`PolarsDataset <mlflow.data.polars_dataset.PolarsDataset>` instance.
Args:
df: A polars DataFrame.
source: Source from which the DataFrame was derived, e.g. a filesystem
path, an S3 URI, an HTTPS URL, a delta table name with version, or
spark table etc. ``source`` may be specified as a URI, a path-like string,
or an instance of
:py:class:`DatasetSource <mlflow.data.dataset_source.DatasetSource>`.
If unspecified, the source is assumed to be the code location
(e.g. notebook cell, script, etc.) where
:py:func:`from_polars <mlflow.data.from_polars>` is being called.
targets: An optional target column name for supervised training. This column
must be present in ``df``.
name: Name of the dataset. If unspecified, a name is generated.
digest: Dataset digest (hash). If unspecified, a digest is computed
automatically.
predictions: An optional predictions column name for model evaluation. This column
must be present in ``df``.
.. code-block:: python
:test:
:caption: Example
import mlflow
import polars as pl
x = pl.DataFrame(
[["tom", 10, 1, 1], ["nick", 15, 0, 1], ["julie", 14, 1, 1]],
schema=["Name", "Age", "Label", "ModelOutput"],
)
dataset = mlflow.data.from_polars(x, targets="Label", predictions="ModelOutput")
"""
from mlflow.data.code_dataset_source import CodeDatasetSource
from mlflow.data.dataset_source_registry import resolve_dataset_source
from mlflow.tracking.context import registry
if source is not None:
if isinstance(source, DatasetSource):
resolved_source = source
else:
resolved_source = resolve_dataset_source(source)
else:
context_tags = registry.resolve_tags()
resolved_source = CodeDatasetSource(tags=context_tags)
return PolarsDataset(
df=df,
source=resolved_source,
targets=targets,
name=name,
digest=digest,
predictions=predictions,
)
| PolarsDataset |
python | pytorch__pytorch | torch/testing/_internal/common_pruning.py | {
"start": 3735,
"end": 4796
} | class ____(nn.Module):
r"""Model with only Linear layers, some with bias, some in a Sequential and some following.
Activation functions modules in between each Linear in the Sequential, and functional
activationals are called in between each outside layer.
Used to test pruned Linear(Bias)-Activation-Linear fusion."""
def __init__(self) -> None:
super().__init__()
self.seq = nn.Sequential(
nn.Linear(7, 5, bias=True),
nn.ReLU(),
nn.Linear(5, 6, bias=False),
nn.ReLU(),
nn.Linear(6, 4, bias=True),
)
self.linear1 = nn.Linear(4, 3, bias=True)
self.linear2 = nn.Linear(3, 8, bias=False)
self.linear3 = nn.Linear(8, 10, bias=False)
self.act1 = nn.ReLU()
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.seq(x)
x = self.linear1(x)
x = F.relu(x)
x = self.linear2(x)
x = F.relu(x)
x = self.linear3(x)
x = F.relu(x)
return x
| LinearActivationFunctional |
python | run-llama__llama_index | llama-index-cli/llama_index/cli/rag/base.py | {
"start": 1522,
"end": 13132
} | class ____(BaseModel):
"""
CLI tool for chatting with output of a IngestionPipeline via a RetrieverQueryEngine.
"""
ingestion_pipeline: IngestionPipeline = Field(
description="Ingestion pipeline to run for RAG ingestion."
)
verbose: bool = Field(
description="Whether to print out verbose information during execution.",
default=False,
)
persist_dir: str = Field(
description="Directory to persist ingestion pipeline.",
default_factory=default_ragcli_persist_dir,
)
llm: LLM = Field(
description="Language model to use for response generation.",
default_factory=lambda: _try_load_openai_llm(),
)
chat_engine: Optional[CondenseQuestionChatEngine] = Field(
description="Chat engine to use for chatting.",
default=None,
)
file_extractor: Optional[Dict[str, BaseReader]] = Field(
description="File extractor to use for extracting text from files.",
default=None,
)
class Config:
arbitrary_types_allowed = True
@field_validator("chat_engine", mode="before")
def chat_engine_from_ingestion_pipeline(
cls, chat_engine: Any, values: Dict[str, Any]
) -> Optional[CondenseQuestionChatEngine]:
"""
If chat_engine is not provided, create one from ingestion_pipeline.
"""
if chat_engine is not None:
return chat_engine
ingestion_pipeline = cast(IngestionPipeline, values["ingestion_pipeline"])
if ingestion_pipeline.vector_store is None:
return None
verbose = cast(bool, values["verbose"])
llm = cast(LLM, values["llm"])
# get embed_model from transformations if possible
embed_model = None
if ingestion_pipeline.transformations is not None:
for transformation in ingestion_pipeline.transformations:
if isinstance(transformation, BaseEmbedding):
embed_model = transformation
break
Settings.llm = llm
Settings.embed_model = embed_model
retriever = VectorStoreIndex.from_vector_store(
ingestion_pipeline.vector_store, embed_model=embed_model
).as_retriever(similarity_top_k=8)
response_synthesizer = CompactAndRefine(
streaming=True, llm=llm, verbose=verbose
)
query_engine = RetrieverQueryEngine(
retriever=retriever, response_synthesizer=response_synthesizer
)
return CondenseQuestionChatEngine.from_defaults(
query_engine=query_engine, llm=llm, verbose=verbose
)
async def handle_cli(
self,
files: Optional[List[str]] = None,
question: Optional[str] = None,
chat: bool = False,
verbose: bool = False,
clear: bool = False,
create_llama: bool = False,
**kwargs: Dict[str, Any],
) -> None:
"""
Entrypoint for local document RAG CLI tool.
"""
if clear:
# delete self.persist_dir directory including all subdirectories and files
if os.path.exists(self.persist_dir):
# Ask for confirmation
response = input(
f"Are you sure you want to delete data within {self.persist_dir}? [y/N] "
)
if response.strip().lower() != "y":
print("Aborted.")
return
try:
shutil.rmtree(self.persist_dir)
except Exception as e:
print(f"Error clearing {self.persist_dir}: {e}")
return
print(f"Successfully cleared {self.persist_dir}")
self.verbose = verbose
ingestion_pipeline = cast(IngestionPipeline, self.ingestion_pipeline)
if self.verbose:
print("Saving/Loading from persist_dir: ", self.persist_dir)
if files is not None:
expanded_files = []
for pattern in files:
expanded_files.extend(iglob(pattern, recursive=True))
documents = []
for _file in expanded_files:
_file = os.path.abspath(_file)
if os.path.isdir(_file):
reader = SimpleDirectoryReader(
input_dir=_file,
filename_as_id=True,
file_extractor=self.file_extractor,
)
else:
reader = SimpleDirectoryReader(
input_files=[_file],
filename_as_id=True,
file_extractor=self.file_extractor,
)
documents.extend(reader.load_data(show_progress=verbose))
await ingestion_pipeline.arun(show_progress=verbose, documents=documents)
ingestion_pipeline.persist(persist_dir=self.persist_dir)
# Append the `--files` argument to the history file
with open(f"{self.persist_dir}/{RAG_HISTORY_FILE_NAME}", "a") as f:
for file in files:
f.write(str(file) + "\n")
if create_llama:
if shutil.which("npx") is None:
print(
"`npx` is not installed. Please install it by calling `npm install -g npx`"
)
else:
history_file_path = Path(f"{self.persist_dir}/{RAG_HISTORY_FILE_NAME}")
if not history_file_path.exists():
print(
"No data has been ingested, "
"please specify `--files` to create llama dataset."
)
else:
with open(history_file_path) as f:
stored_paths = {line.strip() for line in f if line.strip()}
if len(stored_paths) == 0:
print(
"No data has been ingested, "
"please specify `--files` to create llama dataset."
)
elif len(stored_paths) > 1:
print(
"Multiple files or folders were ingested, which is not supported by create-llama. "
"Please call `llamaindex-cli rag --clear` to clear the cache first, "
"then call `llamaindex-cli rag --files` again with a single folder or file"
)
else:
path = stored_paths.pop()
if "*" in path:
print(
"Glob pattern is not supported by create-llama. "
"Please call `llamaindex-cli rag --clear` to clear the cache first, "
"then call `llamaindex-cli rag --files` again with a single folder or file."
)
elif not os.path.exists(path):
print(
f"The path {path} does not exist. "
"Please call `llamaindex-cli rag --clear` to clear the cache first, "
"then call `llamaindex-cli rag --files` again with a single folder or file."
)
else:
print(f"Calling create-llama using data from {path} ...")
command_args = [
"npx",
"create-llama@latest",
"--frontend",
"--template",
"streaming",
"--framework",
"fastapi",
"--ui",
"shadcn",
"--vector-db",
"none",
"--engine",
"context",
"--files",
path,
]
subprocess.run(command_args, check=True)
if question is not None:
await self.handle_question(question)
if chat:
await self.start_chat_repl()
async def handle_question(self, question: str) -> None:
chat_engine = cast(CondenseQuestionChatEngine, self.chat_engine)
response = chat_engine.chat(question)
if isinstance(response, StreamingResponse):
response.print_response_stream()
else:
response = cast(Response, response)
print(response)
async def start_chat_repl(self) -> None:
"""
Start a REPL for chatting with the agent.
"""
if self.chat_engine is None:
raise ValueError("chat_engine is not defined.")
chat_engine = cast(CondenseQuestionChatEngine, self.chat_engine)
chat_engine.streaming_chat_repl()
@classmethod
def add_parser_args(
cls,
parser: Union[ArgumentParser, Any],
instance_generator: Optional[Callable[[], "RagCLI"]],
) -> None:
if instance_generator:
parser.add_argument(
"-q",
"--question",
type=str,
help="The question you want to ask.",
required=False,
)
parser.add_argument(
"-f",
"--files",
type=str,
nargs="+",
help=(
"The name of the file(s) or directory you want to ask a question about,"
'such as "file.pdf". Supports globs like "*.py".'
),
)
parser.add_argument(
"-c",
"--chat",
help="If flag is present, opens a chat REPL.",
action="store_true",
)
parser.add_argument(
"-v",
"--verbose",
help="Whether to print out verbose information during execution.",
action="store_true",
)
parser.add_argument(
"--clear",
help="Clears out all currently embedded data.",
action="store_true",
)
parser.add_argument(
"--create-llama",
help="Create a LlamaIndex application with your embedded data.",
required=False,
action="store_true",
)
parser.set_defaults(
func=lambda args: asyncio.run(
instance_generator().handle_cli(**vars(args))
)
)
def cli(self) -> None:
"""
Entrypoint for CLI tool.
"""
parser = ArgumentParser(description="LlamaIndex RAG Q&A tool.")
subparsers = parser.add_subparsers(
title="commands", dest="command", required=True
)
llamarag_parser = subparsers.add_parser(
"rag", help="Ask a question to a document / a directory of documents."
)
self.add_parser_args(llamarag_parser, lambda: self)
# Parse the command-line arguments
args = parser.parse_args()
# Call the appropriate function based on the command
args.func(args)
| RagCLI |
python | sphinx-doc__sphinx | sphinx/environment/collectors/__init__.py | {
"start": 314,
"end": 3219
} | class ____:
"""An EnvironmentCollector is a specific data collector from each document.
It gathers data and stores :py:class:`BuildEnvironment
<sphinx.environment.BuildEnvironment>` as a database.
Examples of specific data would be images, download files, section titles, metadatas, index
entries and toctrees, etc.
.. note::
This class essentially wraps a sub-set of :ref:`Sphinx event callbacks <events>`.
"""
listener_ids: dict[str, int] | None = None
def enable(self, app: Sphinx) -> None:
assert self.listener_ids is None
self.listener_ids = {
'doctree-read': app.connect('doctree-read', self.process_doc),
'env-merge-info': app.connect('env-merge-info', self.merge_other),
'env-purge-doc': app.connect('env-purge-doc', self.clear_doc),
'env-get-updated': app.connect('env-get-updated', self.get_updated_docs),
'env-get-outdated': app.connect('env-get-outdated', self.get_outdated_docs),
}
def disable(self, app: Sphinx) -> None:
assert self.listener_ids is not None
for listener_id in self.listener_ids.values():
app.disconnect(listener_id)
self.listener_ids = None
def clear_doc(self, app: Sphinx, env: BuildEnvironment, docname: str) -> None:
"""Remove specified data of a document.
This method is called on the removal of the document.
.. seealso:: :event:`env-purge-doc`
"""
raise NotImplementedError
def merge_other(
self,
app: Sphinx,
env: BuildEnvironment,
docnames: Set[str],
other: BuildEnvironment,
) -> None:
"""Merge in specified data regarding docnames from a different `BuildEnvironment`
object which coming from a subprocess in parallel builds.
.. seealso:: :event:`env-merge-info`
"""
raise NotImplementedError
def process_doc(self, app: Sphinx, doctree: nodes.document) -> None:
"""Process a document and gather specific data from it.
This method is called after the document is read.
.. seealso:: :event:`doctree-read`
"""
raise NotImplementedError
def get_updated_docs(self, app: Sphinx, env: BuildEnvironment) -> list[str]:
"""Return a list of docnames to re-read.
This method is called after reading the whole of documents.
.. seealso:: :event:`env-get-updated`
"""
return []
def get_outdated_docs(
self,
app: Sphinx,
env: BuildEnvironment,
added: set[str],
changed: set[str],
removed: set[str],
) -> list[str]:
"""Return a list of docnames to re-read.
This method is called before reading the documents.
.. seealso:: :event:`env-get-outdated`
"""
return []
| EnvironmentCollector |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/discord/tests.py | {
"start": 2015,
"end": 3552
} | class ____(DiscordTests, TestCase):
provider_id = DiscordProvider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""{
"id": "80351110224678912",
"username": "Nelly",
"discriminator": "1337",
"avatar": "8342729096ea3675442027381ff50dfe",
"verified": true,
"email": "nelly@example.com"
}""",
)
def get_expected_to_str(self):
return "Nelly#1337"
def test_display_name(self, multiple_login=False):
email = "user@example.com"
user = get_user_model()(is_active=True)
user_email(user, email)
user_username(user, "user")
user.set_password("test")
user.save()
EmailAddress.objects.create(user=user, email=email, primary=True, verified=True)
self.client.login(username=user.username, password="test")
self.login(self.get_mocked_response(), process="connect")
if multiple_login:
self.login(
self.get_mocked_response(),
with_refresh_token=False,
process="connect",
)
# get account
sa = SocialAccount.objects.filter(user=user, provider=self.provider.id).get()
# The following lines don't actually test that much, but at least
# we make sure that the code is hit.
provider_account = sa.get_provider_account()
self.assertEqual(provider_account.to_str(), "Nelly#1337")
| OldDiscordTests |
python | getsentry__sentry | src/sentry/preprod/api/models/project_preprod_build_details_models.py | {
"start": 512,
"end": 585
} | class ____(BaseModel):
has_proguard_mapping: bool = True
| AndroidAppInfo |
python | huggingface__transformers | tests/models/bart/test_modeling_bart.py | {
"start": 8249,
"end": 15788
} | class ____(unittest.TestCase):
vocab_size = 99
def _get_config_and_data(self):
input_ids = torch.tensor(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
],
dtype=torch.long,
device=torch_device,
)
batch_size = input_ids.shape[0]
config = BartConfig(
vocab_size=self.vocab_size,
d_model=24,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=2,
decoder_attention_heads=2,
encoder_ffn_dim=32,
decoder_ffn_dim=32,
max_position_embeddings=48,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
)
return config, input_ids, batch_size
def test_sequence_classification_forward(self):
config, input_ids, batch_size = self._get_config_and_data()
labels = _long_tensor([2] * batch_size).to(torch_device)
model = BartForSequenceClassification(config)
model.to(torch_device)
outputs = model(input_ids=input_ids, decoder_input_ids=input_ids, labels=labels)
expected_shape = torch.Size((batch_size, config.num_labels))
self.assertEqual(outputs["logits"].shape, expected_shape)
self.assertIsInstance(outputs["loss"].item(), float)
def test_question_answering_forward(self):
config, input_ids, batch_size = self._get_config_and_data()
sequence_labels = ids_tensor([batch_size], 2).to(torch_device)
model = BartForQuestionAnswering(config)
model.to(torch_device)
outputs = model(
input_ids=input_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.assertEqual(outputs["start_logits"].shape, input_ids.shape)
self.assertEqual(outputs["end_logits"].shape, input_ids.shape)
self.assertIsInstance(outputs["loss"].item(), float)
@timeout_decorator.timeout(1)
def test_lm_forward(self):
config, input_ids, batch_size = self._get_config_and_data()
lm_labels = ids_tensor([batch_size, input_ids.shape[1]], self.vocab_size).to(torch_device)
lm_model = BartForConditionalGeneration(config)
lm_model.to(torch_device)
outputs = lm_model(input_ids=input_ids, labels=lm_labels)
expected_shape = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape, expected_shape)
self.assertIsInstance(outputs["loss"].item(), float)
def test_lm_uneven_forward(self):
config = BartConfig(
vocab_size=self.vocab_size,
d_model=14,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=2,
decoder_attention_heads=2,
encoder_ffn_dim=8,
decoder_ffn_dim=8,
max_position_embeddings=48,
)
lm_model = BartForConditionalGeneration(config).to(torch_device)
context = torch.tensor(
[[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], device=torch_device, dtype=torch.long
)
summary = torch.tensor([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], device=torch_device, dtype=torch.long)
outputs = lm_model(input_ids=context, decoder_input_ids=summary, labels=summary)
expected_shape = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape, expected_shape)
def test_generate_beam_search(self):
input_ids = torch.tensor([[71, 82, 2], [68, 34, 2]], device=torch_device, dtype=torch.long)
config = BartConfig(
vocab_size=self.vocab_size,
d_model=24,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=2,
decoder_attention_heads=2,
encoder_ffn_dim=32,
decoder_ffn_dim=32,
max_position_embeddings=48,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
)
lm_model = BartForConditionalGeneration(config).to(torch_device)
lm_model.eval()
max_length = 5
generated_ids = lm_model.generate(
input_ids.clone(),
do_sample=True,
num_return_sequences=1,
num_beams=2,
no_repeat_ngram_size=3,
max_length=max_length,
)
self.assertEqual(generated_ids.shape, (input_ids.shape[0], max_length))
def test_shift_tokens_right(self):
input_ids = torch.tensor([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=torch.long)
shifted = shift_tokens_right(input_ids, 1, 2)
n_pad_before = input_ids.eq(1).float().sum()
n_pad_after = shifted.eq(1).float().sum()
self.assertEqual(shifted.shape, input_ids.shape)
self.assertEqual(n_pad_after, n_pad_before - 1)
self.assertTrue(torch.eq(shifted[:, 0], 2).all())
@slow
def test_tokenization(self):
tokenizer = BartTokenizer.from_pretrained("facebook/bart-large")
examples = [" Hello world", " DomDramg"] # need leading spaces for equality
fairseq_results = [
torch.tensor([0, 20920, 232, 2]),
torch.tensor([0, 11349, 495, 4040, 571, 2]),
]
for ex, desired_result in zip(examples, fairseq_results):
bart_toks = tokenizer.encode(ex, return_tensors="pt").squeeze()
assert_tensors_close(desired_result.long(), bart_toks, prefix=ex)
@require_torch_fp16
def test_generate_fp16(self):
config, input_ids, batch_size = self._get_config_and_data()
attention_mask = input_ids.ne(1).to(torch_device)
model = BartForConditionalGeneration(config).eval().to(torch_device)
model.half()
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
def test_dummy_inputs(self):
config, *_ = self._get_config_and_data()
model = BartForConditionalGeneration(config).eval().to(torch_device)
model(**model.dummy_inputs)
def test_resize_tokens_embeddings_more(self):
config, input_ids, _ = self._get_config_and_data()
def _get_embs(m):
return (m.get_input_embeddings().weight.data.clone(), m.get_output_embeddings().weight.data.clone())
model = BartForConditionalGeneration(config).eval().to(torch_device)
input, output = _get_embs(model)
self.assertTrue(torch.eq(input, output).all())
new_vocab_size = 45
model.resize_token_embeddings(new_vocab_size)
input_new, output_new = _get_embs(model)
self.assertEqual(input_new.shape, (new_vocab_size, config.d_model))
self.assertEqual(output_new.shape, (new_vocab_size, config.d_model))
self.assertTrue(torch.eq(input_new, output_new).all())
@require_torch
| BartHeadTests |
python | pallets__jinja | src/jinja2/nodes.py | {
"start": 18324,
"end": 18707
} | class ____(Literal):
"""A constant template string."""
fields = ("data",)
data: str
def as_const(self, eval_ctx: EvalContext | None = None) -> str:
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
if eval_ctx.autoescape:
return Markup(self.data)
return self.data
| TemplateData |
python | joke2k__faker | tests/providers/test_date_time.py | {
"start": 43840,
"end": 44525
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("zh-TW")
Faker.seed(0)
def test_day(self):
day = self.fake.day_of_week()
assert day in ZhTwProvider.DAY_NAMES.values()
def test_month(self):
month = self.fake.month_name()
assert month in ZhTwProvider.MONTH_NAMES.values()
def test_year(self):
year = self.fake.year()
assert isinstance(year, str)
assert year.isdigit()
assert len(year) >= 4
def test_minguo_year(self):
year = self.fake.minguo_year()
assert isinstance(year, str)
assert year.isdigit()
assert 1 <= len(year) <= 3
| TestZhTw |
python | apache__airflow | providers/databricks/tests/unit/databricks/plugins/test_databricks_workflow.py | {
"start": 15157,
"end": 18595
} | class ____:
"""Test Databricks Workflow Plugin functionality specific to Airflow 2.x."""
def test_plugin_operator_extra_links_full_functionality(self):
"""Test that all operator_extra_links are present in Airflow 2.x."""
plugin = DatabricksWorkflowPlugin()
# In Airflow 2.x, all links should be present including repair links
assert len(plugin.operator_extra_links) >= 2 # At least job run link + repair links
link_types = [type(link).__name__ for link in plugin.operator_extra_links]
assert "WorkflowJobRunLink" in link_types
# Should have repair links in 2.x
assert any("Repair" in link_type for link_type in link_types)
def test_plugin_has_appbuilder_views(self):
"""Test that appbuilder_views are configured for repair functionality in Airflow 2.x."""
plugin = DatabricksWorkflowPlugin()
# In Airflow 2.x, appbuilder_views should be present for repair functionality
assert hasattr(plugin, "appbuilder_views")
assert plugin.appbuilder_views is not None
def test_store_databricks_job_run_link_returns_early(self):
"""Test that store_databricks_job_run_link returns early in Airflow 2.x."""
ti_mock = Mock()
ti_mock.xcom_push = Mock()
context = {
"ti": ti_mock,
"dag": Mock(dag_id="test_dag"),
"dag_run": Mock(run_id="test_run"),
"task": Mock(task_id="test_task"),
}
metadata = Mock(conn_id="databricks_default", job_id=12345, run_id=67890)
store_databricks_job_run_link(context, metadata, logger)
ti_mock.xcom_push.assert_not_called()
def test_workflow_job_run_link_uses_legacy_method(self):
"""Test that WorkflowJobRunLink.get_link uses legacy method in Airflow 2.x."""
link = WorkflowJobRunLink()
operator = Mock()
operator.task_group = Mock()
operator.task_group.group_id = "test_group"
ti_key = TaskInstanceKey(dag_id="test_dag", task_id="test_task", run_id="test_run", try_number=1)
with patch(
"airflow.providers.databricks.plugins.databricks_workflow.get_task_instance"
) as mock_get_ti:
with patch(
"airflow.providers.databricks.plugins.databricks_workflow.get_xcom_result"
) as mock_get_xcom:
with patch(
"airflow.providers.databricks.plugins.databricks_workflow._get_dag"
) as mock_get_dag:
with patch(
"airflow.providers.databricks.plugins.databricks_workflow.DatabricksHook"
) as mock_hook:
mock_get_ti.return_value = Mock(key=ti_key)
mock_get_xcom.return_value = Mock(conn_id="conn_id", run_id=1, job_id=1)
mock_get_dag.return_value.get_task.return_value = Mock(task_id="test_task")
mock_hook_instance = Mock()
mock_hook_instance.host = "test-host"
mock_hook.return_value = mock_hook_instance
result = link.get_link(operator, ti_key=ti_key)
# Verify legacy method was used (should contain databricks host)
assert "test-host" in result
assert "#job/1/run/1" in result
| TestDatabricksWorkflowPluginAirflow2 |
python | realpython__materials | python-getter-setter/employee.py | {
"start": 0,
"end": 120
} | class ____:
def __init__(self, name, birth_date):
self.name = name
self.birth_date = birth_date
| Employee |
python | Netflix__metaflow | metaflow/runner/deployer.py | {
"start": 3307,
"end": 5060
} | class ____(metaclass=DeployerMeta):
"""
Use the `Deployer` class to configure and access one of the production
orchestrators supported by Metaflow.
Parameters
----------
flow_file : str
Path to the flow file to deploy, relative to current directory.
show_output : bool, default True
Show the 'stdout' and 'stderr' to the console by default.
profile : Optional[str], default None
Metaflow profile to use for the deployment. If not specified, the default
profile is used.
env : Optional[Dict[str, str]], default None
Additional environment variables to set for the deployment.
cwd : Optional[str], default None
The directory to run the subprocess in; if not specified, the current
directory is used.
file_read_timeout : int, default 3600
The timeout until which we try to read the deployer attribute file (in seconds).
**kwargs : Any
Additional arguments that you would pass to `python myflow.py` before
the deployment command.
"""
def __init__(
self,
flow_file: str,
show_output: bool = True,
profile: Optional[str] = None,
env: Optional[Dict] = None,
cwd: Optional[str] = None,
file_read_timeout: int = 3600,
**kwargs,
):
# Convert flow_file to absolute path if it's relative
if not os.path.isabs(flow_file):
self.flow_file = os.path.abspath(flow_file)
else:
self.flow_file = flow_file
self.show_output = show_output
self.profile = profile
self.env = env
self.cwd = cwd
self.file_read_timeout = file_read_timeout
self.top_level_kwargs = kwargs
| Deployer |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 307719,
"end": 308815
} | class ____:
def test_entropy(self):
# Test that dweibull entropy follows that of weibull_min.
# (Generic tests check that the dweibull entropy is consistent
# with its PDF. As for accuracy, dweibull entropy should be just
# as accurate as weibull_min entropy. Checks of accuracy against
# a reference need only be applied to the fundamental distribution -
# weibull_min.)
rng = np.random.default_rng(8486259129157041777)
c = 10**rng.normal(scale=100, size=10)
res = stats.dweibull.entropy(c)
ref = stats.weibull_min.entropy(c) - np.log(0.5)
assert_allclose(res, ref, rtol=1e-15)
def test_sf(self):
# test that for positive values the dweibull survival function is half
# the weibull_min survival function
rng = np.random.default_rng(8486259129157041777)
c = 10**rng.normal(scale=1, size=10)
x = 10 * rng.uniform()
res = stats.dweibull.sf(x, c)
ref = 0.5 * stats.weibull_min.sf(x, c)
assert_allclose(res, ref, rtol=1e-15)
| TestDweibull |
python | pandas-dev__pandas | pandas/tests/indexing/interval/test_interval_new.py | {
"start": 187,
"end": 8134
} | class ____:
@pytest.fixture
def series_with_interval_index(self):
"""
Fixture providing a Series with an IntervalIndex.
"""
return Series(np.arange(5), IntervalIndex.from_breaks(np.arange(6)))
def test_loc_with_interval(self, series_with_interval_index, indexer_sl):
# loc with single label / list of labels:
# - Intervals: only exact matches
# - scalars: those that contain it
ser = series_with_interval_index.copy()
expected = 0
result = indexer_sl(ser)[Interval(0, 1)]
assert result == expected
expected = ser.iloc[3:5]
result = indexer_sl(ser)[[Interval(3, 4), Interval(4, 5)]]
tm.assert_series_equal(expected, result)
# missing or not exact
with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='left')")):
indexer_sl(ser)[Interval(3, 5, closed="left")]
with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='right')")):
indexer_sl(ser)[Interval(3, 5)]
with pytest.raises(
KeyError, match=re.escape("Interval(-2, 0, closed='right')")
):
indexer_sl(ser)[Interval(-2, 0)]
with pytest.raises(KeyError, match=re.escape("Interval(5, 6, closed='right')")):
indexer_sl(ser)[Interval(5, 6)]
def test_loc_with_scalar(self, series_with_interval_index, indexer_sl):
# loc with single label / list of labels:
# - Intervals: only exact matches
# - scalars: those that contain it
ser = series_with_interval_index.copy()
assert indexer_sl(ser)[1] == 0
assert indexer_sl(ser)[1.5] == 1
assert indexer_sl(ser)[2] == 1
expected = ser.iloc[1:4]
tm.assert_series_equal(expected, indexer_sl(ser)[[1.5, 2.5, 3.5]])
tm.assert_series_equal(expected, indexer_sl(ser)[[2, 3, 4]])
tm.assert_series_equal(expected, indexer_sl(ser)[[1.5, 3, 4]])
expected = ser.iloc[[1, 1, 2, 1]]
tm.assert_series_equal(expected, indexer_sl(ser)[[1.5, 2, 2.5, 1.5]])
expected = ser.iloc[2:5]
tm.assert_series_equal(expected, indexer_sl(ser)[ser >= 2])
def test_loc_with_slices(self, series_with_interval_index, indexer_sl):
# loc with slices:
# - Interval objects: only works with exact matches
# - scalars: only works for non-overlapping, monotonic intervals,
# and start/stop select location based on the interval that
# contains them:
# (slice_loc(start, stop) == (idx.get_loc(start), idx.get_loc(stop))
ser = series_with_interval_index.copy()
# slice of interval
expected = ser.iloc[:3]
result = indexer_sl(ser)[Interval(0, 1) : Interval(2, 3)]
tm.assert_series_equal(expected, result)
expected = ser.iloc[3:]
result = indexer_sl(ser)[Interval(3, 4) :]
tm.assert_series_equal(expected, result)
msg = "Interval objects are not currently supported"
with pytest.raises(NotImplementedError, match=msg):
indexer_sl(ser)[Interval(3, 6) :]
with pytest.raises(NotImplementedError, match=msg):
indexer_sl(ser)[Interval(3, 4, closed="left") :]
def test_slice_step_ne1(self, series_with_interval_index):
# GH#31658 slice of scalar with step != 1
ser = series_with_interval_index.copy()
expected = ser.iloc[0:4:2]
result = ser[0:4:2]
tm.assert_series_equal(result, expected)
result2 = ser[0:4][::2]
tm.assert_series_equal(result2, expected)
def test_slice_float_start_stop(self, series_with_interval_index):
# GH#31658 slicing with integers is positional, with floats is not
# supported
ser = series_with_interval_index.copy()
msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
with pytest.raises(ValueError, match=msg):
ser[1.5:9.5:2]
def test_slice_interval_step(self, series_with_interval_index):
# GH#31658 allows for integer step!=1, not Interval step
ser = series_with_interval_index.copy()
msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
with pytest.raises(ValueError, match=msg):
ser[0 : 4 : Interval(0, 1)]
def test_loc_with_overlap(self, indexer_sl):
idx = IntervalIndex.from_tuples([(1, 5), (3, 7)])
ser = Series(range(len(idx)), index=idx)
# scalar
expected = ser
result = indexer_sl(ser)[4]
tm.assert_series_equal(expected, result)
result = indexer_sl(ser)[[4]]
tm.assert_series_equal(expected, result)
# interval
expected = 0
result = indexer_sl(ser)[Interval(1, 5)]
assert expected == result
expected = ser
result = indexer_sl(ser)[[Interval(1, 5), Interval(3, 7)]]
tm.assert_series_equal(expected, result)
with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='right')")):
indexer_sl(ser)[Interval(3, 5)]
msg = (
r"None of \[IntervalIndex\(\[\(3, 5\]\], "
r"dtype='interval\[int64, right\]'\)\] are in the \[index\]"
)
with pytest.raises(KeyError, match=msg):
indexer_sl(ser)[[Interval(3, 5)]]
# slices with interval (only exact matches)
expected = ser
result = indexer_sl(ser)[Interval(1, 5) : Interval(3, 7)]
tm.assert_series_equal(expected, result)
msg = (
"'can only get slices from an IntervalIndex if bounds are "
"non-overlapping and all monotonic increasing or decreasing'"
)
with pytest.raises(KeyError, match=msg):
indexer_sl(ser)[Interval(1, 6) : Interval(3, 8)]
if indexer_sl is tm.loc:
# slices with scalar raise for overlapping intervals
# TODO KeyError is the appropriate error?
with pytest.raises(KeyError, match=msg):
ser.loc[1:4]
def test_non_unique(self, indexer_sl):
idx = IntervalIndex.from_tuples([(1, 3), (3, 7)])
ser = Series(range(len(idx)), index=idx)
result = indexer_sl(ser)[Interval(1, 3)]
assert result == 0
result = indexer_sl(ser)[[Interval(1, 3)]]
expected = ser.iloc[0:1]
tm.assert_series_equal(expected, result)
def test_non_unique_moar(self, indexer_sl):
idx = IntervalIndex.from_tuples([(1, 3), (1, 3), (3, 7)])
ser = Series(range(len(idx)), index=idx)
expected = ser.iloc[[0, 1]]
result = indexer_sl(ser)[Interval(1, 3)]
tm.assert_series_equal(expected, result)
expected = ser
result = indexer_sl(ser)[Interval(1, 3) :]
tm.assert_series_equal(expected, result)
expected = ser.iloc[[0, 1]]
result = indexer_sl(ser)[[Interval(1, 3)]]
tm.assert_series_equal(expected, result)
def test_loc_getitem_missing_key_error_message(
self, frame_or_series, series_with_interval_index
):
# GH#27365
ser = series_with_interval_index.copy()
obj = frame_or_series(ser)
with pytest.raises(KeyError, match=r"\[6\]"):
obj.loc[[4, 5, 6]]
@pytest.mark.xfail(WASM, reason="GH 23440")
@pytest.mark.parametrize(
"intervals",
[
([Interval(-np.inf, 0.0), Interval(0.0, 1.0)]),
([Interval(-np.inf, -2.0), Interval(-2.0, -1.0)]),
([Interval(-1.0, 0.0), Interval(0.0, np.inf)]),
([Interval(1.0, 2.0), Interval(2.0, np.inf)]),
],
)
def test_repeating_interval_index_with_infs(intervals):
# GH 46658
interval_index = Index(intervals * 51)
expected = np.arange(1, 102, 2, dtype=np.intp)
result = interval_index.get_indexer_for([intervals[1]])
tm.assert_equal(result, expected)
| TestIntervalIndex |
python | keras-team__keras | keras/src/layers/convolutional/conv2d.py | {
"start": 179,
"end": 6251
} | class ____(BaseConv):
"""2D convolution layer.
This layer creates a convolution kernel that is convolved with the layer
input over a 2D spatial (or temporal) dimension (height and width) to
produce a tensor of outputs. If `use_bias` is True, a bias vector is created
and added to the outputs. Finally, if `activation` is not `None`, it is
applied to the outputs as well.
Note on numerical precision: While in general Keras operation execution
results are identical across backends up to 1e-7 precision in float32,
`Conv2D` operations may show larger variations. Due to the large
number of element-wise multiplications and additions in convolution
operations, especially with large inputs or kernel sizes, accumulated
floating-point differences can exceed this 1e-7 threshold. These variations
are particularly noticeable when using different backends (e.g., TensorFlow
vs JAX) or different hardware.
Args:
filters: int, the dimension of the output space (the number of filters
in the convolution).
kernel_size: int or tuple/list of 2 integer, specifying the size of the
convolution window.
strides: int or tuple/list of 2 integer, specifying the stride length
of the convolution. `strides > 1` is incompatible with
`dilation_rate > 1`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input. When `padding="same"` and
`strides=1`, the output has the same size as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape
`(batch_size, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch_size, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
dilation_rate: int or tuple/list of 2 integers, specifying the dilation
rate to use for dilated convolution.
groups: A positive int specifying the number of groups in which the
input is split along the channel axis. Each group is convolved
separately with `filters // groups` filters. The output is the
concatenation of all the `groups` results along the channel axis.
Input channels and `filters` must both be divisible by `groups`.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
kernel_initializer: Initializer for the convolution kernel. If `None`,
the default initializer (`"glorot_uniform"`) will be used.
bias_initializer: Initializer for the bias vector. If `None`, the
default initializer (`"zeros"`) will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape). Constraints
are not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
Input shape:
- If `data_format="channels_last"`:
A 4D tensor with shape: `(batch_size, height, width, channels)`
- If `data_format="channels_first"`:
A 4D tensor with shape: `(batch_size, channels, height, width)`
Output shape:
- If `data_format="channels_last"`:
A 4D tensor with shape: `(batch_size, new_height, new_width, filters)`
- If `data_format="channels_first"`:
A 4D tensor with shape: `(batch_size, filters, new_height, new_width)`
Returns:
A 4D tensor representing `activation(conv2d(inputs, kernel) + bias)`.
Raises:
ValueError: when both `strides > 1` and `dilation_rate > 1`.
Example:
>>> x = np.random.rand(4, 10, 10, 128)
>>> y = keras.layers.Conv2D(32, 3, activation='relu')(x)
>>> print(y.shape)
(4, 8, 8, 32)
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1),
padding="valid",
data_format=None,
dilation_rate=(1, 1),
groups=1,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
| Conv2D |
python | huggingface__transformers | tests/models/timesfm/test_modeling_timesfm.py | {
"start": 1024,
"end": 4315
} | class ____:
def __init__(
self,
parent,
patch_length: int = 32,
context_length: int = 512,
horizon_length: int = 128,
freq_size: int = 3,
num_hidden_layers: int = 1,
hidden_size: int = 16,
intermediate_size: int = 32,
head_dim: int = 8,
num_heads: int = 2,
tolerance: float = 1e-6,
rms_norm_eps: float = 1e-6,
quantiles: list[float] = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
pad_val: float = 1123581321.0,
use_positional_embedding: bool = True,
initializer_factor: float = 0.0,
is_training: bool = False,
batch_size: int = 3,
):
self.parent = parent
self.patch_length = patch_length
self.context_length = context_length
self.horizon_length = horizon_length
self.quantiles = quantiles
self.pad_val = pad_val
self.freq_size = freq_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.head_dim = head_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_heads
self.tolerance = tolerance
self.rms_norm_eps = rms_norm_eps
self.use_positional_embedding = use_positional_embedding
self.initializer_factor = initializer_factor
self.is_training = is_training
self.batch_size = batch_size
# The size of test input
self.seq_length = context_length // patch_length
self.hidden_size = hidden_size
def get_config(self):
return TimesFmConfig(
patch_length=self.patch_length,
context_length=self.context_length,
horizon_length=self.horizon_length,
quantiles=self.quantiles,
pad_val=self.pad_val,
freq_size=self.freq_size,
hidden_size=self.hidden_size,
intermediate_size=self.intermediate_size,
head_dim=self.head_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
tolerance=self.tolerance,
rms_norm_eps=self.rms_norm_eps,
use_positional_embedding=self.use_positional_embedding,
initializer_factor=self.initializer_factor,
)
def get_pipeline_config(self):
return self.get_config()
def prepare_config_and_inputs(self):
forecast_input = [
torch.tensor(np.sin(np.linspace(0, 20, 100)), dtype=torch.float32, device=torch_device),
torch.tensor(np.cos(np.linspace(0, 20, 100)), dtype=torch.float32, device=torch_device),
torch.tensor(np.tan(np.linspace(0, 20, 100)), dtype=torch.float32, device=torch_device),
]
frequency_input = torch.tensor([0, 1, 2], dtype=torch.long, device=torch_device)
return (self.get_config(), torch.stack(forecast_input, dim=0), frequency_input)
def prepare_config_and_inputs_for_common(self):
(config, forecast_input, frequency_input) = self.prepare_config_and_inputs()
inputs_dict = {
"past_values": forecast_input,
"freq": frequency_input,
}
return config, inputs_dict
@require_torch
| TimesFmModelTester |
python | dagster-io__dagster | python_modules/libraries/dagster-azure/dagster_azure/adls2/file_manager.py | {
"start": 1304,
"end": 4648
} | class ____(FileManager):
def __init__(self, adls2_client: DataLakeServiceClient, file_system: str, prefix: str):
self._client = adls2_client
self._file_system = check.str_param(file_system, "file_system")
self._prefix = check.str_param(prefix, "prefix")
self._local_handle_cache: dict[str, str] = {}
self._temp_file_manager = TempfileManager()
def copy_handle_to_local_temp(self, file_handle: ADLS2FileHandle): # pyright: ignore[reportIncompatibleMethodOverride]
self._download_if_not_cached(file_handle)
return self._get_local_path(file_handle)
def _download_if_not_cached(self, file_handle: ADLS2FileHandle):
if not self._file_handle_cached(file_handle):
# instigate download
temp_file_obj = self._temp_file_manager.tempfile()
temp_name = temp_file_obj.name
file = self._client.get_file_client(
file_system=file_handle.file_system,
file_path=file_handle.key,
)
download = file.download_file()
with open(temp_name, "wb") as file_obj:
download.readinto(file_obj)
self._local_handle_cache[file_handle.adls2_path] = temp_name
return file_handle
@contextmanager
def read(self, file_handle: ADLS2FileHandle, mode: str = "rb"): # pyright: ignore[reportIncompatibleMethodOverride]
check.inst_param(file_handle, "file_handle", ADLS2FileHandle)
check.str_param(mode, "mode")
check.param_invariant(mode in {"r", "rb"}, "mode")
self._download_if_not_cached(file_handle)
encoding = None if "b" in mode else "utf-8"
with open(self._get_local_path(file_handle), mode, encoding=encoding) as file_obj:
yield file_obj
def _file_handle_cached(self, file_handle: ADLS2FileHandle) -> bool:
return file_handle.adls2_path in self._local_handle_cache
def _get_local_path(self, file_handle: ADLS2FileHandle) -> str:
return self._local_handle_cache[file_handle.adls2_path]
def read_data(self, file_handle: ADLS2FileHandle) -> Any: # pyright: ignore[reportIncompatibleMethodOverride]
with self.read(file_handle, mode="rb") as file_obj:
return file_obj.read()
def write_data(self, data: bytes, ext: Optional[str] = None) -> ADLS2FileHandle:
check.inst_param(data, "data", bytes)
return self.write(io.BytesIO(data), mode="wb", ext=ext)
def write( # pyright: ignore[reportIncompatibleMethodOverride]
self, file_obj: io.BytesIO, mode: str = "wb", ext: Optional[str] = None
) -> ADLS2FileHandle:
check_file_like_obj(file_obj)
adls2_key = self.get_full_key(str(uuid.uuid4()) + (("." + ext) if ext is not None else ""))
adls2_file = self._client.get_file_client(
file_system=self._file_system, file_path=adls2_key
)
adls2_file.upload_data(file_obj, overwrite=True)
account_name = check.not_none(self._client.account_name, "Expected account name to be set")
return ADLS2FileHandle(account_name, self._file_system, adls2_key)
def get_full_key(self, file_key: str) -> str:
return f"{self._prefix}/{file_key}"
def delete_local_temp(self) -> None:
self._temp_file_manager.close()
| ADLS2FileManager |
python | jazzband__django-waffle | waffle/tests/test_testutils.py | {
"start": 10392,
"end": 10599
} | class ____(OverrideFlagOnClassTestsMixin,
TransactionTestCase):
"""
Run tests with Django TransactionTestCase
"""
| OverrideFlagOnClassTransactionTestCase |
python | scikit-learn__scikit-learn | sklearn/ensemble/tests/test_stacking.py | {
"start": 9808,
"end": 10023
} | class ____(RegressorMixin, BaseEstimator):
def fit(self, X, y):
self.reg = DummyRegressor()
return self.reg.fit(X, y)
def predict(self, X):
return np.ones(X.shape[0])
| NoWeightRegressor |
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_wrap_model_call.py | {
"start": 2944,
"end": 8110
} | class ____:
"""Test retry logic with wrap_model_call."""
def test_simple_retry_on_error(self) -> None:
"""Test middleware that retries once on error."""
call_count = {"value": 0}
class FailOnceThenSucceed(GenericFakeChatModel):
def _generate(self, messages, **kwargs):
call_count["value"] += 1
if call_count["value"] == 1:
raise ValueError("First call fails")
return super()._generate(messages, **kwargs)
class RetryOnceMiddleware(AgentMiddleware):
def __init__(self):
super().__init__()
self.retry_count = 0
def wrap_model_call(self, request, handler):
try:
return handler(request)
except Exception:
self.retry_count += 1
return handler(request)
retry_middleware = RetryOnceMiddleware()
model = FailOnceThenSucceed(messages=iter([AIMessage(content="Success")]))
agent = create_agent(model=model, middleware=[retry_middleware])
result = agent.invoke({"messages": [HumanMessage("Test")]})
assert retry_middleware.retry_count == 1
assert result["messages"][1].content == "Success"
def test_max_retries(self) -> None:
"""Test middleware with maximum retry limit."""
class AlwaysFailModel(GenericFakeChatModel):
def _generate(self, messages, **kwargs):
raise ValueError("Always fails")
class MaxRetriesMiddleware(AgentMiddleware):
def __init__(self, max_retries=3):
super().__init__()
self.max_retries = max_retries
self.attempts = []
def wrap_model_call(self, request, handler):
last_exception = None
for attempt in range(self.max_retries):
self.attempts.append(attempt + 1)
try:
return handler(request)
except Exception as e:
last_exception = e
continue
# Re-raise the last exception
if last_exception:
raise last_exception
retry_middleware = MaxRetriesMiddleware(max_retries=3)
model = AlwaysFailModel(messages=iter([]))
agent = create_agent(model=model, middleware=[retry_middleware])
with pytest.raises(ValueError, match="Always fails"):
agent.invoke({"messages": [HumanMessage("Test")]})
assert retry_middleware.attempts == [1, 2, 3]
def test_no_retry_propagates_error(self) -> None:
"""Test that error is propagated when middleware doesn't retry."""
class FailingModel(BaseChatModel):
"""Model that always fails."""
def _generate(self, messages, **kwargs):
raise ValueError("Model error")
@property
def _llm_type(self):
return "failing"
class NoRetryMiddleware(AgentMiddleware):
def wrap_model_call(self, request, handler):
return handler(request)
agent = create_agent(model=FailingModel(), middleware=[NoRetryMiddleware()])
with pytest.raises(ValueError, match="Model error"):
agent.invoke({"messages": [HumanMessage("Test")]})
def test_max_attempts_limit(self) -> None:
"""Test that middleware controls termination via retry limits."""
class AlwaysFailingModel(BaseChatModel):
"""Model that always fails."""
def _generate(self, messages, **kwargs):
raise ValueError("Always fails")
@property
def _llm_type(self):
return "always_failing"
class LimitedRetryMiddleware(AgentMiddleware):
"""Middleware that limits its own retries."""
def __init__(self, max_retries: int = 10):
super().__init__()
self.max_retries = max_retries
self.attempt_count = 0
def wrap_model_call(self, request, handler):
last_exception = None
for attempt in range(self.max_retries):
self.attempt_count += 1
try:
return handler(request)
except Exception as e:
last_exception = e
# Continue to retry
# All retries exhausted, re-raise the last error
if last_exception:
raise last_exception
model = AlwaysFailingModel()
middleware = LimitedRetryMiddleware(max_retries=10)
agent = create_agent(model=model, middleware=[middleware])
# Should fail with the model's error after middleware stops retrying
with pytest.raises(ValueError, match="Always fails"):
agent.invoke({"messages": [HumanMessage("Test")]})
# Should have attempted exactly 10 times as configured
assert middleware.attempt_count == 10
| TestRetryLogic |
python | getsentry__sentry | tests/acceptance/test_issue_details.py | {
"start": 410,
"end": 8869
} | class ____(AcceptanceTestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
patcher = patch("django.utils.timezone.now", return_value=now)
patcher.start()
self.addCleanup(patcher.stop)
self.user = self.create_user("foo@example.com")
self.org = self.create_organization(owner=self.user, name="Rowdy Tiger")
self.team = self.create_team(organization=self.org, name="Mariachi Band")
self.project = self.create_project(organization=self.org, teams=[self.team], name="Bengal")
self.login_as(self.user)
self.page = IssueDetailsPage(self.browser, self.client)
self.dismiss_assistant()
def create_sample_event(
self,
platform: str,
default: str | None = None,
sample_name: str | None = None,
time: datetime | None = None,
tags: list[list[str]] | None = None,
) -> Event:
event_data = load_data(platform, default=default, sample_name=sample_name)
event_data["event_id"] = "d964fdbd649a4cf8bfc35d18082b6b0e"
# Only set these properties if we were given a time.
# event processing will mark old time values as processing errors.
if time:
event_data["received"] = time.isoformat()
if tags:
event_data["tags"] = tags
# We need a fallback datetime for the event
if time is None:
time = now - timedelta(days=2)
time = time.replace(hour=0, minute=0, second=0, microsecond=0)
event_data["timestamp"] = time.isoformat()
event = self.store_event(
data=event_data, project_id=self.project.id, assert_no_errors=False
)
event.group.update(
first_seen=datetime(2015, 8, 13, 3, 8, 25, tzinfo=timezone.utc), last_seen=time
)
return event
def test_python_event(self) -> None:
tags = [
["server_name", "web02.example.org"],
["environment", "staging"],
]
self.create_sample_event(platform="python", tags=tags)
event = self.create_sample_event(platform="python")
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
# Wait for tag bars to load
self.browser.wait_until_test_id("loaded-device-name")
def test_python_rawbody_event(self) -> None:
event = self.create_sample_event(platform="python-rawbody")
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
self.browser.move_to('[data-test-id="rich-http-content-body-section-pre"]')
def test_python_formdata_event(self) -> None:
event = self.create_sample_event(platform="python-formdata")
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
def test_pii_tooltips(self) -> None:
event = self.create_sample_event(platform="pii-tooltips")
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
def test_cocoa_event(self) -> None:
event = self.create_sample_event(platform="cocoa")
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
def test_cocoa_event_frame_line_hover(self) -> None:
event = self.create_sample_event(platform="cocoa")
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
self.browser.wait_until_not(".loading")
self.browser.move_to(".traceback li:nth-child(2)")
def test_unity_event(self) -> None:
event = self.create_sample_event(default="unity", platform="csharp")
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
def test_android_event(self) -> None:
event = self.create_sample_event(platform="android")
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
def test_android_ndk_event(self) -> None:
event = self.create_sample_event(default="android-ndk", platform="android-ndk")
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
def test_aspnetcore_event(self) -> None:
event = self.create_sample_event(default="aspnetcore", platform="csharp")
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
def test_javascript_specific_event(self) -> None:
event = self.create_sample_event(platform="javascript")
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
self.browser.click('label[data-test-id="curl"]')
def test_rust_event(self) -> None:
# TODO: This should become its own "rust" platform type
event = self.create_sample_event(platform="native", sample_name="Rust")
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
def test_cordova_event(self) -> None:
event = self.create_sample_event(platform="cordova")
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
def test_stripped_event(self) -> None:
event = self.create_sample_event(platform="pii")
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
def test_empty_exception(self) -> None:
event = self.create_sample_event(platform="empty-exception")
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
def test_empty_stacktrace(self) -> None:
event = self.create_sample_event(platform="empty-stacktrace")
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
def test_activity_page(self) -> None:
event = self.create_sample_event(platform="python")
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
self.page.go_to_subtab("activity")
self.browser.wait_until_test_id("activity-item")
def test_resolved(self) -> None:
event = self.create_sample_event(platform="python")
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
self.page.resolve_issue()
def test_archived(self) -> None:
event = self.create_sample_event(platform="python")
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
self.page.archive_issue()
def test_exception_and_no_threads_event(self) -> None:
event = self.create_sample_event(platform="exceptions-and-no-threads")
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
def test_exception_with_stack_trace_and_crashed_thread_without_stack_trace_event(self) -> None:
event = self.create_sample_event(
platform="exception-with-stack-trace-and-crashed-thread-without-stack-trace"
)
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
def test_exception_without_stack_trace_and_crashed_thread_with_stack_trace_event(self) -> None:
event = self.create_sample_event(
platform="exception-without-stack-trace-and-crashed-thread-with-stack-trace"
)
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
def test_exception_with_stack_trace_and_crashed_thread_with_stack_trace_event(self) -> None:
event = self.create_sample_event(
platform="exception-with-stack-trace-and-crashed-thread-with-stack-trace"
)
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
def test_python_invalid_json_error(self) -> None:
event = self.create_sample_event(default="python-invalid-json-error", platform="native")
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
def test_exception_with_address_instruction(self) -> None:
event = self.create_sample_event(
default="exception-with-address-instruction", platform="cocoa"
)
assert event.group is not None
self.page.visit_issue(self.org.slug, event.group.id)
| IssueDetailsTest |
python | sympy__sympy | sympy/functions/special/error_functions.py | {
"start": 59306,
"end": 61621
} | class ____(TrigonometricIntegral):
r"""
Sinh integral.
Explanation
===========
This function is defined by
.. math:: \operatorname{Shi}(z) = \int_0^z \frac{\sinh{t}}{t} \mathrm{d}t.
It is an entire function.
Examples
========
>>> from sympy import Shi
>>> from sympy.abc import z
The Sinh integral is a primitive of $\sinh(z)/z$:
>>> Shi(z).diff(z)
sinh(z)/z
It is unbranched:
>>> from sympy import exp_polar, I, pi
>>> Shi(z*exp_polar(2*I*pi))
Shi(z)
The $\sinh$ integral behaves much like ordinary $\sinh$ under
multiplication by $i$:
>>> Shi(I*z)
I*Si(z)
>>> Shi(-z)
-Shi(z)
It can also be expressed in terms of exponential integrals, but beware
that the latter is branched:
>>> from sympy import expint
>>> Shi(z).rewrite(expint)
expint(1, z)/2 - expint(1, z*exp_polar(I*pi))/2 - I*pi/2
See Also
========
Si: Sine integral.
Ci: Cosine integral.
Chi: Hyperbolic cosine integral.
Ei: Exponential integral.
expint: Generalised exponential integral.
E1: Special case of the generalised exponential integral.
li: Logarithmic integral.
Li: Offset logarithmic integral.
References
==========
.. [1] https://en.wikipedia.org/wiki/Trigonometric_integral
"""
_trigfunc = sinh
_atzero = S.Zero
@classmethod
def _atinf(cls):
return S.Infinity
@classmethod
def _atneginf(cls):
return S.NegativeInfinity
@classmethod
def _minusfactor(cls, z):
return -Shi(z)
@classmethod
def _Ifactor(cls, z, sign):
return I*Si(z)*sign
def _eval_rewrite_as_expint(self, z, **kwargs):
# XXX should we polarify z?
return (E1(z) - E1(exp_polar(I*pi)*z))/2 - I*pi/2
def _eval_is_zero(self):
z = self.args[0]
if z.is_zero:
return True
def _eval_as_leading_term(self, x, logx, cdir):
arg = self.args[0].as_leading_term(x)
arg0 = arg.subs(x, 0)
if arg0 is S.NaN:
arg0 = arg.limit(x, 0, dir='-' if re(cdir).is_negative else '+')
if arg0.is_zero:
return arg
elif not arg0.is_infinite:
return self.func(arg0)
else:
return self
| Shi |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/metaclass10.py | {
"start": 261,
"end": 410
} | class ____(EnumMeta):
def __getitem__(cls: type[_EnumMemberT], name: str) -> _EnumMemberT:
return EnumMeta.__getitem__(cls, name)
| EnumMeta2 |
python | getsentry__sentry | tests/sentry/dashboards/endpoints/test_organization_dashboard_details.py | {
"start": 4265,
"end": 30407
} | class ____(OrganizationDashboardDetailsTestCase):
def test_get(self) -> None:
response = self.do_request("get", self.url(self.dashboard.id))
assert response.status_code == 200, response.content
self.assert_serialized_dashboard(response.data, self.dashboard)
assert len(response.data["widgets"]) == 2
widgets = response.data["widgets"]
assert "layout" in widgets[0]
assert "layout" in widgets[1]
self.assert_serialized_widget(widgets[0], self.widget_1)
self.assert_serialized_widget(widgets[1], self.widget_2)
widget_queries = widgets[0]["queries"]
assert len(widget_queries) == 2
self.assert_serialized_widget_query(widget_queries[0], self.widget_1_data_1)
self.assert_serialized_widget_query(widget_queries[1], self.widget_1_data_2)
assert len(widgets[1]["queries"]) == 1
self.assert_serialized_widget_query(widgets[1]["queries"][0], self.widget_2_data_1)
def test_dashboard_does_not_exist(self) -> None:
response = self.do_request("get", self.url(1234567890))
assert response.status_code == 404
assert response.data == {"detail": "The requested resource does not exist"}
def test_get_prebuilt_dashboard(self) -> None:
# Pre-built dashboards should be accessible
response = self.do_request("get", self.url("default-overview"))
assert response.status_code == 200
assert response.data["id"] == "default-overview"
def test_prebuilt_dashboard_with_discover_split_feature_flag(self) -> None:
response = self.do_request("get", self.url("default-overview"))
assert response.status_code == 200, response.data
for widget in response.data["widgets"]:
assert widget["widgetType"] in {"issue", "transaction-like", "error-events"}
def test_get_prebuilt_dashboard_tombstoned(self) -> None:
DashboardTombstone.objects.create(organization=self.organization, slug="default-overview")
# Pre-built dashboards should be accessible even when tombstoned
# This is to preserve behavior around bookmarks
response = self.do_request("get", self.url("default-overview"))
assert response.status_code == 200
assert response.data["id"] == "default-overview"
def test_features_required(self) -> None:
with self.feature({"organizations:dashboards-basic": False}):
response = self.do_request("get", self.url("default-overview"))
assert response.status_code == 404
def test_dashboard_widget_returns_limit(self) -> None:
response = self.do_request("get", self.url(self.dashboard.id))
assert response.status_code == 200, response.content
assert response.data["widgets"][0]["limit"] is None
assert response.data["widgets"][1]["limit"] == 5
def test_dashboard_widget_query_returns_field_aliases(self) -> None:
response = self.do_request("get", self.url(self.dashboard.id))
assert response.status_code == 200, response.content
assert response.data["widgets"][0]["queries"][0]["fieldAliases"][0] == "Count Alias"
assert response.data["widgets"][1]["queries"][0]["fieldAliases"] == []
def test_filters_is_empty_dict_in_response_if_not_applicable(self) -> None:
filters = {"environment": ["alpha"]}
dashboard = Dashboard.objects.create(
title="Dashboard With Filters",
created_by_id=self.user.id,
organization=self.organization,
filters=filters,
)
response = self.do_request("get", self.url(dashboard.id))
assert response.data["projects"] == []
assert response.data["environment"] == filters["environment"]
assert response.data["filters"] == {}
assert "period" not in response.data
def test_dashboard_filters_are_returned_in_response(self) -> None:
filters = {"environment": ["alpha"], "period": "24hr", "release": ["test-release"]}
dashboard = Dashboard.objects.create(
title="Dashboard With Filters",
created_by_id=self.user.id,
organization=self.organization,
filters=filters,
)
dashboard.projects.set([Project.objects.create(organization=self.organization)])
response = self.do_request("get", self.url(dashboard.id))
assert response.data["projects"] == list(dashboard.projects.values_list("id", flat=True))
assert response.data["environment"] == filters["environment"]
assert response.data["period"] == filters["period"]
assert response.data["filters"]["release"] == filters["release"]
def test_start_and_end_filters_are_returned_in_response(self) -> None:
start = (datetime.now() - timedelta(seconds=10)).isoformat()
end = datetime.now().isoformat()
filters = {"start": start, "end": end, "utc": False}
dashboard = Dashboard.objects.create(
title="Dashboard With Filters",
created_by_id=self.user.id,
organization=self.organization,
filters=filters,
)
dashboard.projects.set([Project.objects.create(organization=self.organization)])
response = self.do_request("get", self.url(dashboard.id))
assert response.data["start"].replace(tzinfo=None).isoformat() == start
assert response.data["end"].replace(tzinfo=None).isoformat() == end
assert not response.data["utc"]
def test_response_truncates_with_retention(self) -> None:
start = before_now(days=3)
end = before_now(days=2)
expected_adjusted_retention_start = before_now(days=1)
filters = {"start": start, "end": end}
dashboard = Dashboard.objects.create(
title="Dashboard With Filters",
created_by_id=self.user.id,
organization=self.organization,
filters=filters,
)
with self.options({"system.event-retention-days": 1}):
response = self.do_request("get", self.url(dashboard.id))
assert response.data["expired"]
assert (
response.data["start"].replace(second=0, microsecond=0).isoformat()
== expected_adjusted_retention_start.replace(second=0, microsecond=0).isoformat()
)
def test_dashboard_widget_type_returns_split_decision(self) -> None:
dashboard = Dashboard.objects.create(
title="Dashboard With Split Widgets",
created_by_id=self.user.id,
organization=self.organization,
)
DashboardWidget.objects.create(
dashboard=dashboard,
title="error widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
discover_widget_split=DashboardWidgetTypes.ERROR_EVENTS,
)
DashboardWidget.objects.create(
dashboard=dashboard,
title="transaction widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
discover_widget_split=DashboardWidgetTypes.TRANSACTION_LIKE,
)
DashboardWidget.objects.create(
dashboard=dashboard,
title="no split",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
)
response = self.do_request(
"get",
self.url(dashboard.id),
)
assert response.status_code == 200, response.content
assert response.data["widgets"][0]["widgetType"] == "error-events"
assert response.data["widgets"][1]["widgetType"] == "transaction-like"
assert response.data["widgets"][2]["widgetType"] == "discover"
def test_dashboard_widget_returns_dataset_source(self) -> None:
dashboard = Dashboard.objects.create(
title="Dashboard With Dataset Source",
created_by_id=self.user.id,
organization=self.organization,
)
DashboardWidget.objects.create(
dashboard=dashboard,
title="error widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
dataset_source=DatasetSourcesTypes.INFERRED.value,
)
response = self.do_request("get", self.url(dashboard.id))
assert response.status_code == 200, response.content
assert response.data["widgets"][0]["datasetSource"] == "inferred"
def test_dashboard_widget_default_dataset_source_is_unknown(self) -> None:
dashboard = Dashboard.objects.create(
title="Dashboard Without",
created_by_id=self.user.id,
organization=self.organization,
)
DashboardWidget.objects.create(
dashboard=dashboard,
title="error widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
)
response = self.do_request("get", self.url(dashboard.id))
assert response.status_code == 200, response.content
assert response.data["widgets"][0]["datasetSource"] == "unknown"
def test_dashboard_widget_query_returns_selected_aggregate(self) -> None:
widget = DashboardWidget.objects.create(
dashboard=self.dashboard,
title="Big Number Widget",
display_type=DashboardWidgetDisplayTypes.BIG_NUMBER,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
)
DashboardWidgetQuery.objects.create(
widget=widget,
fields=["count_unique(issue)", "count()"],
columns=[],
aggregates=["count_unique(issue)", "count()"],
selected_aggregate=1,
order=0,
)
response = self.do_request(
"get",
self.url(self.dashboard.id),
)
assert response.status_code == 200, response.content
assert response.data["widgets"][0]["queries"][0]["selectedAggregate"] is None
assert response.data["widgets"][2]["queries"][0]["selectedAggregate"] == 1
def test_dashboard_details_data_returns_permissions(self) -> None:
dashboard = Dashboard.objects.create(
title="Dashboard With Dataset Source",
created_by_id=self.user.id,
organization=self.organization,
)
DashboardPermissions.objects.create(dashboard=dashboard, is_editable_by_everyone=False)
response = self.do_request("get", self.url(dashboard.id))
assert response.status_code == 200, response.content
assert "permissions" in response.data
assert not response.data["permissions"]["isEditableByEveryone"]
def test_dashboard_details_data_returns_Null_permissions(self) -> None:
dashboard = Dashboard.objects.create(
title="Dashboard With Dataset Source",
created_by_id=self.user.id,
organization=self.organization,
)
response = self.do_request("get", self.url(dashboard.id))
assert response.status_code == 200, response.content
assert "permissions" in response.data
assert not response.data["permissions"]
def test_dashboard_viewable_with_no_edit_permissions(self) -> None:
dashboard = Dashboard.objects.create(
title="Dashboard With Dataset Source",
created_by_id=1142,
organization=self.organization,
)
DashboardPermissions.objects.create(is_editable_by_everyone=False, dashboard=dashboard)
user = self.create_user(id=1289)
self.create_member(user=user, organization=self.organization)
self.login_as(user)
response = self.do_request("get", self.url(dashboard.id))
assert response.status_code == 200, response.content
def test_dashboard_details_data_returns_permissions_with_teams(self) -> None:
dashboard = Dashboard.objects.create(
title="Dashboard With Dataset Source",
created_by_id=self.user.id,
organization=self.organization,
)
team1 = self.create_team(organization=self.organization)
team2 = self.create_team(organization=self.organization)
permissions = DashboardPermissions.objects.create(
dashboard=dashboard, is_editable_by_everyone=False
)
permissions.teams_with_edit_access.set([team1, team2])
response = self.do_request("get", self.url(dashboard.id))
assert response.status_code == 200, response.content
assert "permissions" in response.data
assert not response.data["permissions"]["isEditableByEveryone"]
assert "teamsWithEditAccess" in response.data["permissions"]
assert response.data["permissions"]["teamsWithEditAccess"] == [team1.id, team2.id]
def test_get_favorited_user_status(self) -> None:
self.user_1 = self.create_user(email="user1@example.com")
self.user_2 = self.create_user(email="user2@example.com")
self.create_member(user=self.user_1, organization=self.organization)
self.create_member(user=self.user_2, organization=self.organization)
self.dashboard.favorited_by = [self.user_1.id, self.user_2.id]
self.login_as(user=self.user_1)
response = self.do_request("get", self.url(self.dashboard.id))
assert response.status_code == 200
assert response.data["isFavorited"] is True
def test_get_not_favorited_user_status(self) -> None:
self.user_1 = self.create_user(email="user1@example.com")
self.create_member(user=self.user_1, organization=self.organization)
self.dashboard.favorited_by = [self.user_1.id, self.user.id]
user_3 = self.create_user()
self.create_member(user=user_3, organization=self.organization)
self.login_as(user=user_3)
response = self.do_request("get", self.url(self.dashboard.id))
assert response.status_code == 200
assert response.data["isFavorited"] is False
def test_get_favorite_status_no_dashboard_edit_access(self) -> None:
self.user_1 = self.create_user(email="user1@example.com")
self.user_2 = self.create_user(email="user2@example.com")
self.create_member(user=self.user_1, organization=self.organization)
self.create_member(user=self.user_2, organization=self.organization)
self.dashboard.favorited_by = [self.user_1.id, self.user_2.id, self.user.id]
DashboardPermissions.objects.create(is_editable_by_everyone=False, dashboard=self.dashboard)
self.login_as(user=self.user_2)
dashboard_detail_put_url = reverse(
"sentry-api-0-organization-dashboard-details",
kwargs={
"organization_id_or_slug": self.organization.slug,
"dashboard_id": self.dashboard.id,
},
)
response = self.do_request(
"put", dashboard_detail_put_url, data={"title": "New Dashboard 9"}
)
# assert user cannot edit dashboard
assert response.status_code == 403
# assert user can see if they favorited the dashboard
response = self.do_request("get", self.url(self.dashboard.id))
assert response.status_code == 200
assert response.data["isFavorited"] is True
def test_explore_url_for_transaction_widget(self) -> None:
with self.feature("organizations:transaction-widget-deprecation-explore-view"):
dashboard_deprecation = Dashboard.objects.create(
title="Dashboard With Transaction Widget",
created_by_id=self.user.id,
organization=self.organization,
)
widget_deprecation = DashboardWidget.objects.create(
dashboard=dashboard_deprecation,
title="transaction widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.TRANSACTION_LIKE,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
)
DashboardWidgetQuery.objects.create(
widget=widget_deprecation,
fields=["count()", "transaction"],
columns=["transaction"],
aggregates=["count()"],
conditions="count():>50",
orderby="-count",
order=0,
)
response = self.do_request("get", self.url(dashboard_deprecation.id))
assert response.status_code == 200
explore_url = response.data["widgets"][0]["exploreUrls"][0]
assert "http://testserver/explore/traces/" in explore_url
params = dict(parse_qs(urlsplit(response.data["widgets"][0]["exploreUrls"][0]).query))
assert params["query"] == ["(count(span.duration):>50) AND is_transaction:1"]
assert params["sort"] == ["-count(span.duration)"]
assert params["mode"] == ["aggregate"]
assert params["aggregateField"] == [
'{"groupBy":"transaction"}',
'{"yAxes":["count(span.duration)"],"chartType":1}',
]
def test_explore_url_for_table_widget(self) -> None:
with self.feature("organizations:transaction-widget-deprecation-explore-view"):
dashboard_deprecation = Dashboard.objects.create(
title="Dashboard With Transaction Widget",
created_by_id=self.user.id,
organization=self.organization,
)
widget_deprecation = DashboardWidget.objects.create(
dashboard=dashboard_deprecation,
title="table widget",
display_type=DashboardWidgetDisplayTypes.TABLE,
widget_type=DashboardWidgetTypes.TRANSACTION_LIKE,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
)
DashboardWidgetQuery.objects.create(
widget=widget_deprecation,
fields=["id", "title"],
columns=["id", "title"],
aggregates=[],
order=0,
)
response = self.do_request("get", self.url(dashboard_deprecation.id))
assert response.status_code == 200
explore_url = response.data["widgets"][0]["exploreUrls"][0]
assert "http://testserver/explore/traces/" in explore_url
params = dict(parse_qs(urlsplit(response.data["widgets"][0]["exploreUrls"][0]).query))
assert params["query"] == ["is_transaction:1"]
assert "sort" not in params
assert params["mode"] == ["samples"]
# need to sort because fields order is not guaranteed
assert params["field"].sort() == ["id", "transaction"].sort()
assert "aggregateField" not in params
def test_explore_url_for_widget_with_discover_split_param(self) -> None:
with self.feature("organizations:transaction-widget-deprecation-explore-view"):
dashboard_deprecation = Dashboard.objects.create(
title="Dashboard With Transaction Widget",
created_by_id=self.user.id,
organization=self.organization,
filters={
"release": ["1.0.0", "2.0.0"],
},
)
widget_deprecation = DashboardWidget.objects.create(
dashboard=dashboard_deprecation,
title="transaction widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
discover_widget_split=DashboardWidgetTypes.TRANSACTION_LIKE,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
)
DashboardWidgetQuery.objects.create(
widget=widget_deprecation,
fields=["count()", "transaction"],
columns=["transaction"],
aggregates=["count()"],
conditions="count():>50",
orderby="-count",
order=0,
)
response = self.do_request("get", self.url(dashboard_deprecation.id))
assert response.status_code == 200
explore_url = response.data["widgets"][0]["exploreUrls"][0]
assert "http://testserver/explore/traces/" in explore_url
params = dict(parse_qs(urlsplit(response.data["widgets"][0]["exploreUrls"][0]).query))
assert params["query"] == [
"(count(span.duration):>50) AND is_transaction:1 AND release:1.0.0,2.0.0"
]
assert params["sort"] == ["-count(span.duration)"]
assert params["mode"] == ["aggregate"]
assert params["aggregateField"] == [
'{"groupBy":"transaction"}',
'{"yAxes":["count(span.duration)"],"chartType":1}',
]
def test_explore_url_for_deformed_widget(self) -> None:
with self.feature("organizations:transaction-widget-deprecation-explore-view"):
dashboard_deprecation = Dashboard.objects.create(
title="Dashboard With Transaction Widget",
created_by_id=self.user.id,
organization=self.organization,
)
widget_deprecation = DashboardWidget.objects.create(
dashboard=dashboard_deprecation,
title="line widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.TRANSACTION_LIKE,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
)
DashboardWidgetQuery.objects.create(
widget=widget_deprecation,
fields=["query.dataset"],
columns=["query.dataset"],
aggregates=["p95(transaction.duration)"],
orderby="-p95(transaction.duration)",
conditions="transaction:/api/0/organizations/{organization_id_or_slug}/events/",
order=0,
)
response = self.do_request("get", self.url(dashboard_deprecation.id))
assert response.status_code == 200
explore_url = response.data["widgets"][0]["exploreUrls"][0]
assert "http://testserver/explore/traces/" in explore_url
params = dict(parse_qs(urlsplit(response.data["widgets"][0]["exploreUrls"][0]).query))
assert params["query"] == [
"(transaction:/api/0/organizations/{organization_id_or_slug}/events/) AND is_transaction:1"
]
assert params["sort"] == ["-p95(span.duration)"]
assert params["mode"] == ["aggregate"]
assert params["field"].sort() == ["query.dataset", "span.duration"].sort()
assert params["aggregateField"] == [
'{"groupBy":"query.dataset"}',
'{"yAxes":["p95(span.duration)"],"chartType":1}',
]
def test_changed_reason_response(self) -> None:
response = self.do_request("get", self.url(self.dashboard.id))
assert response.status_code == 200
widget = response.data["widgets"][0]
assert widget["changedReason"] is None
def test_changed_reason_response_with_data(self) -> None:
dashboard_deprecation = Dashboard.objects.create(
title="Dashboard With Transaction Widget",
created_by_id=self.user.id,
organization=self.organization,
)
widget_deprecation = DashboardWidget.objects.create(
dashboard=dashboard_deprecation,
title="line widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.TRANSACTION_LIKE,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
changed_reason=[
{
"orderby": [
{"orderby": "total.count", "reason": "fields were dropped: total.count"}
],
"equations": [],
"columns": ["total.count"],
}
],
)
DashboardWidgetQuery.objects.create(
widget=widget_deprecation,
fields=["query.dataset"],
columns=["query.dataset"],
aggregates=["p95(transaction.duration)"],
orderby="-p95(transaction.duration)",
conditions="transaction:/api/0/organizations/{organization_id_or_slug}/events/",
order=0,
)
response = self.do_request("get", self.url(dashboard_deprecation.id))
assert response.status_code == 200
widget = response.data["widgets"][0]
assert widget["changedReason"] is not None
assert isinstance(widget["changedReason"], list)
assert len(widget["changedReason"]) == 1
assert widget["changedReason"][0]["orderby"] == [
{"orderby": "total.count", "reason": "fields were dropped: total.count"}
]
assert widget["changedReason"][0]["equations"] == []
assert widget["changedReason"][0]["columns"] == ["total.count"]
| OrganizationDashboardDetailsGetTest |
python | huggingface__transformers | src/transformers/hyperparameter_search.py | {
"start": 2066,
"end": 2476
} | class ____(HyperParamSearchBackendBase):
name = "ray"
pip_package = "'ray[tune]'"
@staticmethod
def is_available():
return is_ray_tune_available()
def run(self, trainer, n_trials: int, direction: str, **kwargs):
return run_hp_search_ray(trainer, n_trials, direction, **kwargs)
def default_hp_space(self, trial):
return default_hp_space_ray(trial)
| RayTuneBackend |
python | django__django | tests/forms_tests/field_tests/test_multivaluefield.py | {
"start": 1841,
"end": 7406
} | class ____(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.field = ComplexField(widget=ComplexMultiWidget())
super().setUpClass()
def test_clean(self):
self.assertEqual(
self.field.clean(["some text", ["J", "P"], ["2007-04-25", "6:24:00"]]),
"some text,JP,2007-04-25 06:24:00",
)
def test_clean_disabled_multivalue(self):
class ComplexFieldForm(Form):
f = ComplexField(disabled=True, widget=ComplexMultiWidget)
inputs = (
"some text,JP,2007-04-25 06:24:00",
["some text", ["J", "P"], ["2007-04-25", "6:24:00"]],
)
for data in inputs:
with self.subTest(data=data):
form = ComplexFieldForm({}, initial={"f": data})
form.full_clean()
self.assertEqual(form.errors, {})
self.assertEqual(form.cleaned_data, {"f": inputs[0]})
def test_bad_choice(self):
msg = "'Select a valid choice. X is not one of the available choices.'"
with self.assertRaisesMessage(ValidationError, msg):
self.field.clean(["some text", ["X"], ["2007-04-25", "6:24:00"]])
def test_no_value(self):
"""
If insufficient data is provided, None is substituted.
"""
msg = "'This field is required.'"
with self.assertRaisesMessage(ValidationError, msg):
self.field.clean(["some text", ["JP"]])
def test_has_changed_no_initial(self):
self.assertTrue(
self.field.has_changed(
None, ["some text", ["J", "P"], ["2007-04-25", "6:24:00"]]
)
)
def test_has_changed_same(self):
self.assertFalse(
self.field.has_changed(
"some text,JP,2007-04-25 06:24:00",
["some text", ["J", "P"], ["2007-04-25", "6:24:00"]],
)
)
def test_has_changed_first_widget(self):
"""
Test when the first widget's data has changed.
"""
self.assertTrue(
self.field.has_changed(
"some text,JP,2007-04-25 06:24:00",
["other text", ["J", "P"], ["2007-04-25", "6:24:00"]],
)
)
def test_has_changed_last_widget(self):
"""
Test when the last widget's data has changed. This ensures that it is
not short circuiting while testing the widgets.
"""
self.assertTrue(
self.field.has_changed(
"some text,JP,2007-04-25 06:24:00",
["some text", ["J", "P"], ["2009-04-25", "11:44:00"]],
)
)
def test_disabled_has_changed(self):
f = MultiValueField(fields=(CharField(), CharField()), disabled=True)
self.assertIs(f.has_changed(["x", "x"], ["y", "y"]), False)
def test_form_as_table(self):
form = ComplexFieldForm()
self.assertHTMLEqual(
form.as_table(),
"""
<tr><th><label>Field1:</label></th>
<td><input type="text" name="field1_0" id="id_field1_0" required>
<select multiple name="field1_1" id="id_field1_1" required>
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="field1_2_0" id="id_field1_2_0" required>
<input type="text" name="field1_2_1" id="id_field1_2_1" required></td></tr>
""",
)
def test_form_as_table_data(self):
form = ComplexFieldForm(
{
"field1_0": "some text",
"field1_1": ["J", "P"],
"field1_2_0": "2007-04-25",
"field1_2_1": "06:24:00",
}
)
self.assertHTMLEqual(
form.as_table(),
"""
<tr><th><label>Field1:</label></th>
<td><input type="text" name="field1_0" value="some text" id="id_field1_0"
required>
<select multiple name="field1_1" id="id_field1_1" required>
<option value="J" selected>John</option>
<option value="P" selected>Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="field1_2_0" value="2007-04-25" id="id_field1_2_0"
required>
<input type="text" name="field1_2_1" value="06:24:00" id="id_field1_2_1"
required></td></tr>
""",
)
def test_form_cleaned_data(self):
form = ComplexFieldForm(
{
"field1_0": "some text",
"field1_1": ["J", "P"],
"field1_2_0": "2007-04-25",
"field1_2_1": "06:24:00",
}
)
form.is_valid()
self.assertEqual(
form.cleaned_data["field1"], "some text,JP,2007-04-25 06:24:00"
)
def test_render_required_attributes(self):
form = PartiallyRequiredForm({"f_0": "Hello", "f_1": ""})
self.assertTrue(form.is_valid())
self.assertInHTML(
'<input type="text" name="f_0" value="Hello" required id="id_f_0">',
form.as_p(),
)
self.assertInHTML('<input type="text" name="f_1" id="id_f_1">', form.as_p())
form = PartiallyRequiredForm({"f_0": "", "f_1": ""})
self.assertFalse(form.is_valid())
| MultiValueFieldTest |
python | lxml__lxml | src/lxml/html/tests/test_html5parser.py | {
"start": 13440,
"end": 13577
} | class ____:
def __init__(self, namespaceHTMLElements=True):
self.namespaceHTMLElements = namespaceHTMLElements
| DummyTreeBuilder |
python | huggingface__transformers | src/transformers/models/mra/modeling_mra.py | {
"start": 30004,
"end": 30795
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Mra
| MraPredictionHeadTransform |
python | celery__celery | t/smoke/conftest.py | {
"start": 663,
"end": 1010
} | class ____(CeleryTestSetup):
def ready(self, *args, **kwargs) -> bool:
# Force false, false, true
return super().ready(
ping=False,
control=False,
docker=True,
)
@pytest.fixture
def celery_setup_cls() -> type[CeleryTestSetup]: # type: ignore
return SmokeTestSetup
| SmokeTestSetup |
python | kamyu104__LeetCode-Solutions | Python/all-ancestors-of-a-node-in-a-directed-acyclic-graph.py | {
"start": 51,
"end": 911
} | class ____(object):
def getAncestors(self, n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: List[List[int]]
"""
def iter_dfs(adj, i, result):
lookup = [False]*len(adj)
stk = [i]
while stk:
u = stk.pop()
for v in reversed(adj[u]):
if lookup[v]:
continue
lookup[v] = True
stk.append(v)
result[v].append(i)
adj = [[] for _ in xrange(n)]
for u, v in edges:
adj[u].append(v)
result = [[] for _ in xrange(n)]
for u in xrange(n):
iter_dfs(adj, u, result)
return result
# Time: O(|V| * |E| * log(|V| * |E|))
# Space: O(|V| + |E|)
# bfs
| Solution |
python | doocs__leetcode | solution/0300-0399/0371.Sum of Two Integers/Solution.py | {
"start": 0,
"end": 262
} | class ____:
def getSum(self, a: int, b: int) -> int:
a, b = a & 0xFFFFFFFF, b & 0xFFFFFFFF
while b:
carry = ((a & b) << 1) & 0xFFFFFFFF
a, b = a ^ b, carry
return a if a < 0x80000000 else ~(a ^ 0xFFFFFFFF)
| Solution |
python | numpy__numpy | numpy/f2py/tests/test_semicolon_split.py | {
"start": 1056,
"end": 1627
} | class ____(util.F2PyTest):
suffix = ".pyf"
module_name = "callstatement"
code = f"""
python module {module_name}
usercode '''
void foo(int* x) {{
}}
'''
interface
subroutine foo(x)
intent(c) foo
integer intent(out) :: x
callprotoargument int*
callstatement {{ &
; &
x = 42; &
}}
end subroutine foo
end interface
end python module {module_name}
"""
def test_callstatement(self):
assert self.module.foo() == 42
| TestCallstatement |
python | PyCQA__pylint | tests/functional/s/super/super_init_not_called.py | {
"start": 1691,
"end": 1846
} | class ____(Base):
def __init__(self, param: int, param_two: str) -> None:
self.param = param + 1
self.param_two = param_two[::-1]
| Derived |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1493746,
"end": 1495230
} | class ____(sgqlc.types.Type, Node):
"""A record that is promoted on a GitHub Sponsors profile."""
__schema__ = github_schema
__field_names__ = ("created_at", "description", "featureable", "position", "sponsors_listing", "updated_at")
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
description = sgqlc.types.Field(String, graphql_name="description")
"""Will either be a description from the sponsorable maintainer about
why they featured this item, or the item's description itself,
such as a user's bio from their GitHub profile page.
"""
featureable = sgqlc.types.Field(sgqlc.types.non_null("SponsorsListingFeatureableItem"), graphql_name="featureable")
"""The record that is featured on the GitHub Sponsors profile."""
position = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="position")
"""The position of this featured item on the GitHub Sponsors profile
with a lower position indicating higher precedence. Starts at 1.
"""
sponsors_listing = sgqlc.types.Field(sgqlc.types.non_null(SponsorsListing), graphql_name="sponsorsListing")
"""The GitHub Sponsors profile that features this record."""
updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt")
"""Identifies the date and time when the object was last updated."""
| SponsorsListingFeaturedItem |
python | pyca__cryptography | tests/x509/test_x509.py | {
"start": 238496,
"end": 242035
} | class ____:
def test_init_empty(self):
with pytest.raises(ValueError):
x509.RelativeDistinguishedName([])
def test_init_not_nameattribute(self):
with pytest.raises(TypeError):
x509.RelativeDistinguishedName(
["not-a-NameAttribute"] # type:ignore[list-item]
)
def test_init_duplicate_attribute(self):
with pytest.raises(ValueError):
x509.RelativeDistinguishedName(
[
x509.NameAttribute(
x509.ObjectIdentifier("2.999.1"), "val1"
),
x509.NameAttribute(
x509.ObjectIdentifier("2.999.1"), "val1"
),
]
)
def test_hash(self):
rdn1 = x509.RelativeDistinguishedName(
[
x509.NameAttribute(x509.ObjectIdentifier("2.999.1"), "value1"),
x509.NameAttribute(x509.ObjectIdentifier("2.999.2"), "value2"),
]
)
rdn2 = x509.RelativeDistinguishedName(
[
x509.NameAttribute(x509.ObjectIdentifier("2.999.2"), "value2"),
x509.NameAttribute(x509.ObjectIdentifier("2.999.1"), "value1"),
]
)
rdn3 = x509.RelativeDistinguishedName(
[
x509.NameAttribute(x509.ObjectIdentifier("2.999.1"), "value1"),
x509.NameAttribute(x509.ObjectIdentifier("2.999.2"), "value3"),
]
)
assert hash(rdn1) == hash(rdn2)
assert hash(rdn1) != hash(rdn3)
def test_eq(self):
rdn1 = x509.RelativeDistinguishedName(
[
x509.NameAttribute(x509.ObjectIdentifier("2.999.1"), "value1"),
x509.NameAttribute(x509.ObjectIdentifier("2.999.2"), "value2"),
]
)
rdn2 = x509.RelativeDistinguishedName(
[
x509.NameAttribute(x509.ObjectIdentifier("2.999.2"), "value2"),
x509.NameAttribute(x509.ObjectIdentifier("2.999.1"), "value1"),
]
)
assert rdn1 == rdn2
def test_ne(self):
rdn1 = x509.RelativeDistinguishedName(
[
x509.NameAttribute(x509.ObjectIdentifier("2.999.1"), "value1"),
x509.NameAttribute(x509.ObjectIdentifier("2.999.2"), "value2"),
]
)
rdn2 = x509.RelativeDistinguishedName(
[
x509.NameAttribute(x509.ObjectIdentifier("2.999.1"), "value1"),
x509.NameAttribute(x509.ObjectIdentifier("2.999.2"), "value3"),
]
)
assert rdn1 != rdn2
assert rdn1 != object()
def test_iter_input(self):
# Order must be preserved too
attrs = [
x509.NameAttribute(x509.ObjectIdentifier("2.999.1"), "value1"),
x509.NameAttribute(x509.ObjectIdentifier("2.999.1"), "value2"),
x509.NameAttribute(x509.ObjectIdentifier("2.999.1"), "value3"),
]
rdn = x509.RelativeDistinguishedName(iter(attrs))
assert list(rdn) == attrs
assert list(rdn) == attrs
def test_get_attributes_for_oid(self):
oid = x509.ObjectIdentifier("2.999.1")
attr = x509.NameAttribute(oid, "value1")
rdn = x509.RelativeDistinguishedName([attr])
assert rdn.get_attributes_for_oid(oid) == [attr]
assert rdn.get_attributes_for_oid(x509.ObjectIdentifier("1.2.3")) == []
| TestRelativeDistinguishedName |
python | django__django | django/db/models/lookups.py | {
"start": 16530,
"end": 16904
} | class ____:
"""
Allow floats to work as query values for IntegerField. Without this, the
decimal portion of the float would always be discarded.
"""
def get_prep_lookup(self):
if isinstance(self.rhs, float):
self.rhs = math.ceil(self.rhs)
return super().get_prep_lookup()
@IntegerField.register_lookup
| IntegerFieldFloatRounding |
python | pytorch__pytorch | test/onnx/test_onnx_opset.py | {
"start": 2159,
"end": 21713
} | class ____(pytorch_test_common.ExportTestCase):
def test_opset_fallback(self):
class MyModule(Module):
def forward(self, x):
return torch.isnan(x)
ops = [{"op_name": "IsNaN"}]
ops = {9: ops, 10: ops}
x = torch.tensor([1.0, float("nan"), 2.0])
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[9, 10])
def test_topk(self):
class MyModule(Module):
def forward(self, x):
return torch.topk(x, 3)
ops_9 = [
{
"op_name": "TopK",
"attributes": [
{"name": "axis", "i": -1, "type": 2},
{"name": "k", "i": 3, "type": 2},
],
}
]
ops_10 = [
{"op_name": "Constant"},
{"op_name": "TopK", "attributes": [{"name": "axis", "i": -1, "type": 2}]},
]
ops = {9: ops_9, 10: ops_10}
x = torch.arange(1.0, 6.0, requires_grad=True)
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[9, 10])
# test with dynamic k
class MyModuleDynamic(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input, k):
return torch.topk(input, k)
ops_10 = [
{"op_name": "Constant", "attributes": [{"name": "value", "type": 4}]},
{"op_name": "Reshape"},
{"op_name": "TopK", "attributes": [{"name": "axis", "i": -1, "type": 2}]},
]
ops = {10: ops_10}
x = torch.arange(1.0, 6.0, requires_grad=True)
k = torch.tensor(3)
module = MyModuleDynamic()
check_onnx_opsets_operator(module, (x, k), ops, opset_versions=[10])
def test_maxpool(self):
module = torch.nn.MaxPool1d(2, stride=1)
ops_9 = [
{
"op_name": "MaxPool",
"attributes": [
{"name": "kernel_shape", "ints": [2], "type": 7},
{"name": "pads", "ints": [0, 0], "type": 7},
{"name": "strides", "ints": [1], "type": 7},
],
}
]
ops_10 = [
{
"op_name": "MaxPool",
"attributes": [
{"name": "ceil_mode", "i": 0, "type": 2},
{"name": "dilations", "ints": [1], "type": 7},
{"name": "kernel_shape", "ints": [2], "type": 7},
{"name": "pads", "ints": [0, 0], "type": 7},
{"name": "strides", "ints": [1], "type": 7},
],
}
]
ops = {9: ops_9, 10: ops_10}
x = torch.randn(20, 16, 50)
check_onnx_opsets_operator(module, x, ops, opset_versions=[9, 10])
# add test with dilations
module = torch.nn.MaxPool1d(2, stride=1, dilation=2)
ops_10 = [
{
"op_name": "MaxPool",
"attributes": [
{"name": "ceil_mode", "i": 0, "type": 2},
{"name": "dilations", "ints": [2], "type": 7},
{"name": "kernel_shape", "ints": [2], "type": 7},
{"name": "pads", "ints": [0, 0], "type": 7},
{"name": "strides", "ints": [1], "type": 7},
],
}
]
ops = {10: ops_10}
x = torch.randn(20, 16, 50)
check_onnx_opsets_operator(module, x, ops, opset_versions=[10])
def test_upsample(self):
class MyModule(Module):
def forward(self, x):
size = [v * 2 for v in x.size()[2:]]
size = [int(i) for i in size]
return torch.nn.functional.interpolate(x, size=size, mode="nearest")
module = MyModule()
ops8 = [
{
"op_name": "Upsample",
"attributes": [
{"name": "mode", "s": (b"nearest"), "type": 3},
{"name": "scales", "floats": [1.0, 1.0, 2.0, 2.0], "type": 6},
],
}
]
ops9 = [
{"op_name": "Constant"},
{
"op_name": "Upsample",
"attributes": [{"name": "mode", "s": (b"nearest"), "type": 3}],
},
]
ops = {8: ops8, 9: ops9}
x = torch.randn(2, 2, 2, 2)
check_onnx_opsets_operator(module, x, ops, opset_versions=[8, 9])
def test_cast_constant(self):
class MyModule(Module):
def forward(self, x):
return x - 1
module = MyModule()
ops_8 = [
{"op_name": "Constant"},
{"op_name": "Cast", "attributes": [{"name": "to", "i": 7, "type": 2}]},
{"op_name": "Sub"},
]
ops_9 = [{"op_name": "Constant"}, {"op_name": "Sub"}]
ops = {8: ops_8, 9: ops_9}
x = torch.ones(5, 6, dtype=torch.long)
check_onnx_opsets_operator(module, x, ops, opset_versions=[8, 9])
def test_slice(self):
class MyModule(Module):
def forward(self, x):
return x[0:1]
ops_9 = [
{
"op_name": "Slice",
"attributes": [
{"name": "axes", "ints": [0], "type": 7},
{"name": "ends", "ints": [1], "type": 7},
{"name": "starts", "ints": [0], "type": 7},
],
}
]
ops_10 = [
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Slice", "attributes": []},
]
ops = {9: ops_9, 10: ops_10}
x = torch.randn(3)
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[9, 10])
class DynamicSliceModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x[1 : x.size(0)]
module = DynamicSliceModel()
x = torch.rand(1, 2)
ops_10 = [
{"op_name": "Shape"},
{"op_name": "Constant"},
{"op_name": "Gather", "attributes": [{"name": "axis", "i": 0, "type": 2}]},
{"op_name": "Constant"},
{"op_name": "Constant"},
{
"op_name": "Unsqueeze",
"attributes": [{"name": "axes", "i": 0, "type": 7}],
},
{"op_name": "Constant"},
{"op_name": "Slice", "attributes": []},
]
ops = {10: ops_10}
check_onnx_opsets_operator(
module,
x,
ops,
opset_versions=[10],
input_names=["x"],
dynamic_axes={"x": [0, 1]},
)
ops_10 = [
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Slice", "attributes": []},
]
ops = {10: ops_10}
check_onnx_opsets_operator(module, x, ops, opset_versions=[10])
def test_flip(self):
class MyModule(Module):
def forward(self, x):
return torch.flip(x, dims=[0])
ops_10 = [
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Slice", "attributes": []},
]
ops = {10: ops_10}
import numpy
x = torch.tensor(numpy.arange(6.0).reshape(2, 3))
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[10])
def test_dropout(self):
class MyModule(Module):
def __init__(self) -> None:
super().__init__()
self.dropout = torch.nn.Dropout(0.5)
def forward(self, x):
return self.dropout(x)
x = torch.randn(1, 2, 3)
# we should only export the onnx Dropout op in training mode; test both modes
# test training mode
ops = [
{
"op_name": "Dropout",
"attributes": [{"name": "ratio", "f": 0.5, "type": 1}],
}
]
ops = {9: ops, 10: ops}
check_onnx_opsets_operator(
MyModule(),
x,
ops,
opset_versions=[9, 10],
training=torch.onnx.TrainingMode.TRAINING,
)
# test eval mode
ops = [{"op_name": "Identity"}]
ops = {9: ops, 10: ops}
check_onnx_opsets_operator(
MyModule(),
x,
ops,
opset_versions=[9, 10],
training=torch.onnx.TrainingMode.EVAL,
)
def test_full(self):
class MyModule(Module):
def forward(self, x):
return torch.full((3, 4), x)
ops = [
{"op_name": "Constant"},
{"op_name": "ConstantOfShape"},
{"op_name": "Add"},
]
ops = {9: ops, 10: ops}
x = torch.tensor(12.0)
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[9, 10])
def test_interpolate(self):
class MyModel(torch.nn.Module):
def forward(self, x):
size = [v * 2 for v in x.size()[2:]]
return torch.nn.functional.interpolate(x, size=size, mode="nearest")
ops_9 = [
{"op_name": "Shape"},
{"op_name": "Constant"},
{"op_name": "Gather"},
{"op_name": "Shape"},
{"op_name": "Constant"},
{"op_name": "Gather"},
{"op_name": "Constant"},
{"op_name": "Mul"},
{"op_name": "Constant"},
{"op_name": "Mul"},
{"op_name": "Unsqueeze"},
{"op_name": "Unsqueeze"},
{"op_name": "Concat"},
{"op_name": "Cast"},
{"op_name": "Shape"},
{"op_name": "Slice"},
{"op_name": "Cast"},
{"op_name": "Div"},
{"op_name": "Constant"},
{"op_name": "Concat"},
{
"op_name": "Upsample",
"attributes": [{"name": "mode", "s": (b"nearest"), "type": 3}],
},
]
ops_10 = [
{"op_name": "Shape"},
{"op_name": "Constant"},
{"op_name": "Gather"},
{"op_name": "Shape"},
{"op_name": "Constant"},
{"op_name": "Gather"},
{"op_name": "Constant"},
{"op_name": "Mul"},
{"op_name": "Constant"},
{"op_name": "Mul"},
{"op_name": "Unsqueeze"},
{"op_name": "Unsqueeze"},
{"op_name": "Concat"},
{"op_name": "Cast"},
{"op_name": "Shape"},
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Slice"},
{"op_name": "Cast"},
{"op_name": "Div"},
{"op_name": "Constant"},
{"op_name": "Concat"},
{
"op_name": "Resize",
"attributes": [{"name": "mode", "s": (b"nearest"), "type": 3}],
},
]
ops = {9: ops_9, 10: ops_10}
x = torch.randn(1, 2, 3, 4, requires_grad=True)
check_onnx_opsets_operator(
MyModel(),
x,
ops,
opset_versions=[9, 10],
input_names=["x"],
dynamic_axes={"x": [0, 1, 2, 3]},
)
ops_9 = [
{"op_name": "Constant"},
{"op_name": "Shape"},
{"op_name": "Slice"},
{"op_name": "Cast"},
{"op_name": "Div"},
{"op_name": "Constant"},
{"op_name": "Concat"},
{
"op_name": "Upsample",
"attributes": [{"name": "mode", "s": (b"nearest"), "type": 3}],
},
]
ops_10 = [
{"op_name": "Constant"},
{"op_name": "Shape"},
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Constant"},
{"op_name": "Slice"},
{"op_name": "Cast"},
{"op_name": "Div"},
{"op_name": "Constant"},
{"op_name": "Concat"},
{"op_name": "Resize"},
]
ops = {9: ops_9, 10: ops_10}
x = torch.randn(1, 2, 3, 4, requires_grad=True)
check_onnx_opsets_operator(MyModel(), x, ops, opset_versions=[9, 10])
class MyDynamicModel(torch.nn.Module):
def forward(self, x):
size = [v * 2 for v in x.size()[2:]]
# work around for now: turn the dynamic sizes into constant
size = [int(i) for i in size]
return torch.nn.functional.interpolate(x, size=size, mode="nearest")
ops_9 = [
{"op_name": "Constant"},
{
"op_name": "Upsample",
"attributes": [{"name": "mode", "s": (b"nearest"), "type": 3}],
},
]
ops_10 = [
{"op_name": "Constant"},
{
"op_name": "Resize",
"attributes": [{"name": "mode", "s": (b"nearest"), "type": 3}],
},
]
ops = {9: ops_9, 10: ops_10}
x = torch.randn(20, 16, 50)
check_onnx_opsets_operator(MyDynamicModel(), x, ops, opset_versions=[9, 10])
def test_affine_grid(self):
class MyModule(Module):
def __init__(self, align_corners):
super().__init__()
self.align_corners = align_corners
def forward(self, theta, size):
return torch.nn.functional.affine_grid(
theta, size, align_corners=self.align_corners
)
opset_version = 20
ops_2d = {
opset_version: [
{"op_name": "Constant"},
{"op_name": "Unsqueeze"},
{"op_name": "Constant"},
{"op_name": "Unsqueeze"},
{"op_name": "Constant"},
{"op_name": "Unsqueeze"},
{"op_name": "Constant"},
{"op_name": "Unsqueeze"},
{"op_name": "Concat"},
{"op_name": "AffineGrid"},
]
}
ops_3d = {
opset_version: [
{"op_name": "Constant"},
{"op_name": "Unsqueeze"},
{"op_name": "Constant"},
{"op_name": "Unsqueeze"},
{"op_name": "Constant"},
{"op_name": "Unsqueeze"},
{"op_name": "Constant"},
{"op_name": "Unsqueeze"},
{"op_name": "Constant"},
{"op_name": "Unsqueeze"},
{"op_name": "Concat"},
{"op_name": "AffineGrid"},
]
}
# 2D affine
theta_2d = torch.empty(1, 2, 3, dtype=torch.double)
size_2d = torch.Size([1, 1, 2, 2])
# 3D affine
theta_3d = torch.empty(1, 3, 4, dtype=torch.double)
size_3d = torch.Size([1, 1, 2, 2, 2])
for inputs, align_corners in itertools.product(
((theta_2d, size_2d, ops_2d), (theta_3d, size_3d, ops_3d)),
(True, False),
):
theta, size, ops = inputs
args = (
theta,
size,
)
check_onnx_opsets_operator(
MyModule(align_corners=align_corners),
args,
ops,
opset_versions=[opset_version],
training=torch.onnx.TrainingMode.TRAINING,
)
check_onnx_opsets_operator(
MyModule(align_corners=align_corners),
args,
ops,
opset_versions=[opset_version],
training=torch.onnx.TrainingMode.EVAL,
)
def test_grid_sample(self):
class MyModule(torch.nn.Module):
def __init__(self, mode, padding_mode, align_corners):
super().__init__()
self.mode = mode
self.padding_mode = padding_mode
self.align_corners = align_corners
def forward(self, x, grid):
return torch.nn.functional.grid_sample(
x,
grid,
mode=self.mode,
padding_mode=self.padding_mode,
align_corners=self.align_corners,
)
for mode, padding_mode, align_corners, opset_version in itertools.product(
("bilinear", "nearest", "bicubic"),
("zeros", "border", "reflection"),
(True, False),
(16, 20),
):
def test_eval_and_training(
ops, opset_version, mode, padding_mode, align_corners, x_shape, grid
):
args = (
torch.randn(*x_shape), # x
torch.randn(grid), # grid,
)
check_onnx_opsets_operator(
MyModule(
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
),
args,
ops,
opset_versions=[opset_version],
training=torch.onnx.TrainingMode.TRAINING,
)
check_onnx_opsets_operator(
MyModule(
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
),
args,
ops,
opset_versions=[opset_version],
training=torch.onnx.TrainingMode.EVAL,
)
ops = {opset_version: [{"op_name": "GridSample"}]}
# mode = convert_grid_sample_mode(mode) if opset_version == 20 else mode
n, c, d_in, h_in, w_in, d_out, h_out, w_out = 1, 1, 2, 3, 2, 3, 2, 4
test_eval_and_training(
ops,
opset_version,
mode,
padding_mode,
align_corners,
(n, c, h_in, w_in),
(n, h_out, w_out, 2),
)
if opset_version == 20 and mode != "bicubic":
test_eval_and_training(
ops,
opset_version,
mode,
padding_mode,
align_corners,
(n, c, d_in, h_in, w_in),
(n, d_out, h_out, w_out, 3),
)
def test_flatten(self):
class MyModule(Module):
def forward(self, x):
return torch.flatten(x)
module = MyModule()
ops_0d = [{"op_name": "Constant"}, {"op_name": "Reshape"}]
ops_1d = [{"op_name": "Identity"}]
for shape in ([], [3]):
x = torch.randn(shape)
for opset_version in [9, 10]:
ops = {opset_version: (ops_0d if len(shape) == 0 else ops_1d)}
check_onnx_opsets_operator(
module, x, ops, opset_versions=[opset_version]
)
if __name__ == "__main__":
common_utils.run_tests()
| TestONNXOpset |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 234159,
"end": 238025
} | class ____(sgqlc.types.Input):
"""A description of a set of changes to a file tree to be made as
part of a git commit, modeled as zero or more file `additions` and
zero or more file `deletions`. Both fields are optional; omitting
both will produce a commit with no file changes. `deletions` and
`additions` describe changes to files identified by their path in
the git tree using unix-style path separators, i.e. `/`. The root
of a git tree is an empty string, so paths are not slash-prefixed.
`path` values must be unique across all `additions` and
`deletions` provided. Any duplication will result in a validation
error. ### Encoding File contents must be provided in full for
each `FileAddition`. The `contents` of a `FileAddition` must be
encoded using RFC 4648 compliant base64, i.e. correct padding is
required and no characters outside the standard alphabet may be
used. Invalid base64 encoding will be rejected with a validation
error. The encoded contents may be binary. For text files, no
assumptions are made about the character encoding of the file
contents (after base64 decoding). No charset transcoding or line-
ending normalization will be performed; it is the client's
responsibility to manage the character encoding of files they
provide. However, for maximum compatibility we recommend using
UTF-8 encoding and ensuring that all files in a repository use a
consistent line-ending convention (`\n` or `\r\n`), and that all
files end with a newline. ### Modeling file changes Each of the
the five types of conceptual changes that can be made in a git
commit can be described using the `FileChanges` type as follows:
1. New file addition: create file `hello world\n` at path
`docs/README.txt`: { "additions" [ {
"path": "docs/README.txt", "contents":
base64encode("hello world\n") } ] } 2.
Existing file modification: change existing `docs/README.txt` to
have new content `new content here\n`: {
"additions" [ { "path": "docs/README.txt",
"contents": base64encode("new content here\n") }
] } 3. Existing file deletion: remove existing file
`docs/README.txt`. Note that the path is required to exist --
specifying a path that does not exist on the given branch will
abort the commit and return an error. {
"deletions" [ { "path": "docs/README.txt"
} ] } 4. File rename with no changes: rename
`docs/README.txt` with previous content `hello world\n` to the
same content at `newdocs/README.txt`: {
"deletions" [ { "path": "docs/README.txt",
} ], "additions" [ {
"path": "newdocs/README.txt", "contents":
base64encode("hello world\n") } ] }
5. File rename with changes: rename `docs/README.txt` with
previous content `hello world\n` to a file at path
`newdocs/README.txt` with content `new contents\n`: {
"deletions" [ { "path": "docs/README.txt",
} ], "additions" [ {
"path": "newdocs/README.txt", "contents":
base64encode("new contents\n") } ] }
"""
__schema__ = github_schema
__field_names__ = ("deletions", "additions")
deletions = sgqlc.types.Field(sgqlc.types.list_of(sgqlc.types.non_null("FileDeletion")), graphql_name="deletions")
"""Files to delete."""
additions = sgqlc.types.Field(sgqlc.types.list_of(sgqlc.types.non_null(FileAddition)), graphql_name="additions")
"""File to add or change."""
| FileChanges |
python | falconry__falcon | examples/look/look/images.py | {
"start": 88,
"end": 700
} | class ____:
def __init__(self, image_store):
self._image_store = image_store
def on_get(self, req, resp):
max_size = req.get_param_as_int('maxsize', min_value=1, default=-1)
images = self._image_store.list(max_size)
doc = {'images': [{'href': '/images/' + image} for image in images]}
resp.text = json.dumps(doc, ensure_ascii=False)
resp.status = falcon.HTTP_200
def on_post(self, req, resp):
name = self._image_store.save(req.stream, req.content_type)
resp.status = falcon.HTTP_201
resp.location = '/images/' + name
| Collection |
python | Textualize__textual | src/textual/widgets/_data_table.py | {
"start": 7771,
"end": 7924
} | class ____:
"""Metadata for a row in the DataTable."""
key: RowKey
height: int
label: Text | None = None
auto_height: bool = False
| Row |
python | getsentry__sentry | tests/sentry/issues/test_run.py | {
"start": 1421,
"end": 5937
} | class ____(TestCase, OccurrenceTestMixin):
def build_mock_message(
self, data: MutableMapping[str, Any] | None, topic: ArroyoTopic | None = None
) -> mock.Mock:
message = mock.Mock()
message.value.return_value = json.dumps(data)
if topic:
message.topic.return_value = topic
return message
@with_feature("organizations:profile-file-io-main-thread-ingest")
@mock.patch("sentry.issues.occurrence_consumer.save_issue_occurrence")
def test_saves_issue_occurrence(self, mock_save_issue_occurrence: mock.MagicMock) -> None:
topic = ArroyoTopic(get_topic_definition(Topic.INGEST_OCCURRENCES)["real_topic_name"])
partition_1 = Partition(topic, 0)
partition_2 = Partition(topic, 1)
mock_commit = mock.Mock()
strategy = OccurrenceStrategyFactory(
num_processes=2,
input_block_size=1,
max_batch_size=2,
max_batch_time=1,
output_block_size=1,
).create_with_partitions(
commit=mock_commit,
partitions={},
)
occurrence = self.build_occurrence(project_id=self.project.id)
payload_data = _prepare_occurrence_message(
occurrence,
{
"project_id": self.project.id,
"event_id": occurrence.event_id,
"platform": "python",
"tags": {"my_tag": "2"},
"timestamp": before_now(minutes=1).isoformat(),
"received": before_now(minutes=1).isoformat(),
},
)
message = self.build_mock_message(payload_data, topic)
strategy.submit(
Message(
BrokerValue(
KafkaPayload(b"key", message.value().encode("utf-8"), []),
partition_1,
1,
datetime.now(),
)
)
)
strategy.submit(
Message(
BrokerValue(
KafkaPayload(b"key", message.value().encode("utf-8"), []),
partition_2,
1,
datetime.now(),
)
)
)
calls = [
mock.call({partition_1: 2}),
mock.call({partition_2: 2}),
]
mock_commit.assert_has_calls(calls=calls, any_order=True)
strategy.poll()
strategy.join(1)
strategy.terminate()
assert mock_save_issue_occurrence.call_count == 2
occurrence_data = occurrence.to_dict()
# need to modify some fields because they get mutated
occurrence_data["priority"] = PriorityLevel.LOW
occurrence_data["fingerprint"] = ["cdfb5fbc0959e8e2f27a6e6027c6335b"]
mock_save_issue_occurrence.assert_called_with(occurrence_data, mock.ANY)
@with_feature("organizations:profile-file-io-main-thread-ingest")
@mock.patch("sentry.issues.run.logger")
@mock.patch("sentry.issues.occurrence_consumer.save_issue_occurrence")
def test_malformed_json_payload(
self, mock_save_issue_occurrence: mock.MagicMock, mock_logger: mock.MagicMock
) -> None:
topic = ArroyoTopic(get_topic_definition(Topic.INGEST_OCCURRENCES)["real_topic_name"])
partition = Partition(topic, 0)
mock_commit = mock.Mock()
strategy = OccurrenceStrategyFactory(
num_processes=2,
input_block_size=1,
max_batch_size=1,
max_batch_time=1,
output_block_size=1,
).create_with_partitions(
commit=mock_commit,
partitions={},
)
message = mock.Mock()
message.value.return_value = "malformed json"
message.topic.return_value = topic
strategy.submit(
Message(
BrokerValue(
KafkaPayload(b"key", message.value().encode("utf-8"), []),
partition,
1,
datetime.now(),
)
)
)
strategy.poll()
strategy.join(1)
strategy.terminate()
assert mock_save_issue_occurrence.call_count == 0
mock_logger.exception.assert_called_once_with("failed to process message payload")
# XXX: this is a TransactionTestCase because it creates database objects in a
# background thread which otherwise do not get cleaned up by django's
# transaction-based cleanup
| TestOccurrenceConsumer |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_device_request_allocation_result.py | {
"start": 383,
"end": 17712
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'admin_access': 'bool',
'binding_conditions': 'list[str]',
'binding_failure_conditions': 'list[str]',
'consumed_capacity': 'dict(str, str)',
'device': 'str',
'driver': 'str',
'pool': 'str',
'request': 'str',
'share_id': 'str',
'tolerations': 'list[V1beta1DeviceToleration]'
}
attribute_map = {
'admin_access': 'adminAccess',
'binding_conditions': 'bindingConditions',
'binding_failure_conditions': 'bindingFailureConditions',
'consumed_capacity': 'consumedCapacity',
'device': 'device',
'driver': 'driver',
'pool': 'pool',
'request': 'request',
'share_id': 'shareID',
'tolerations': 'tolerations'
}
def __init__(self, admin_access=None, binding_conditions=None, binding_failure_conditions=None, consumed_capacity=None, device=None, driver=None, pool=None, request=None, share_id=None, tolerations=None, local_vars_configuration=None): # noqa: E501
"""V1beta1DeviceRequestAllocationResult - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._admin_access = None
self._binding_conditions = None
self._binding_failure_conditions = None
self._consumed_capacity = None
self._device = None
self._driver = None
self._pool = None
self._request = None
self._share_id = None
self._tolerations = None
self.discriminator = None
if admin_access is not None:
self.admin_access = admin_access
if binding_conditions is not None:
self.binding_conditions = binding_conditions
if binding_failure_conditions is not None:
self.binding_failure_conditions = binding_failure_conditions
if consumed_capacity is not None:
self.consumed_capacity = consumed_capacity
self.device = device
self.driver = driver
self.pool = pool
self.request = request
if share_id is not None:
self.share_id = share_id
if tolerations is not None:
self.tolerations = tolerations
@property
def admin_access(self):
"""Gets the admin_access of this V1beta1DeviceRequestAllocationResult. # noqa: E501
AdminAccess indicates that this device was allocated for administrative access. See the corresponding request field for a definition of mode. This is an alpha field and requires enabling the DRAAdminAccess feature gate. Admin access is disabled if this field is unset or set to false, otherwise it is enabled. # noqa: E501
:return: The admin_access of this V1beta1DeviceRequestAllocationResult. # noqa: E501
:rtype: bool
"""
return self._admin_access
@admin_access.setter
def admin_access(self, admin_access):
"""Sets the admin_access of this V1beta1DeviceRequestAllocationResult.
AdminAccess indicates that this device was allocated for administrative access. See the corresponding request field for a definition of mode. This is an alpha field and requires enabling the DRAAdminAccess feature gate. Admin access is disabled if this field is unset or set to false, otherwise it is enabled. # noqa: E501
:param admin_access: The admin_access of this V1beta1DeviceRequestAllocationResult. # noqa: E501
:type: bool
"""
self._admin_access = admin_access
@property
def binding_conditions(self):
"""Gets the binding_conditions of this V1beta1DeviceRequestAllocationResult. # noqa: E501
BindingConditions contains a copy of the BindingConditions from the corresponding ResourceSlice at the time of allocation. This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus feature gates. # noqa: E501
:return: The binding_conditions of this V1beta1DeviceRequestAllocationResult. # noqa: E501
:rtype: list[str]
"""
return self._binding_conditions
@binding_conditions.setter
def binding_conditions(self, binding_conditions):
"""Sets the binding_conditions of this V1beta1DeviceRequestAllocationResult.
BindingConditions contains a copy of the BindingConditions from the corresponding ResourceSlice at the time of allocation. This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus feature gates. # noqa: E501
:param binding_conditions: The binding_conditions of this V1beta1DeviceRequestAllocationResult. # noqa: E501
:type: list[str]
"""
self._binding_conditions = binding_conditions
@property
def binding_failure_conditions(self):
"""Gets the binding_failure_conditions of this V1beta1DeviceRequestAllocationResult. # noqa: E501
BindingFailureConditions contains a copy of the BindingFailureConditions from the corresponding ResourceSlice at the time of allocation. This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus feature gates. # noqa: E501
:return: The binding_failure_conditions of this V1beta1DeviceRequestAllocationResult. # noqa: E501
:rtype: list[str]
"""
return self._binding_failure_conditions
@binding_failure_conditions.setter
def binding_failure_conditions(self, binding_failure_conditions):
"""Sets the binding_failure_conditions of this V1beta1DeviceRequestAllocationResult.
BindingFailureConditions contains a copy of the BindingFailureConditions from the corresponding ResourceSlice at the time of allocation. This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus feature gates. # noqa: E501
:param binding_failure_conditions: The binding_failure_conditions of this V1beta1DeviceRequestAllocationResult. # noqa: E501
:type: list[str]
"""
self._binding_failure_conditions = binding_failure_conditions
@property
def consumed_capacity(self):
"""Gets the consumed_capacity of this V1beta1DeviceRequestAllocationResult. # noqa: E501
ConsumedCapacity tracks the amount of capacity consumed per device as part of the claim request. The consumed amount may differ from the requested amount: it is rounded up to the nearest valid value based on the device’s requestPolicy if applicable (i.e., may not be less than the requested amount). The total consumed capacity for each device must not exceed the DeviceCapacity's Value. This field is populated only for devices that allow multiple allocations. All capacity entries are included, even if the consumed amount is zero. # noqa: E501
:return: The consumed_capacity of this V1beta1DeviceRequestAllocationResult. # noqa: E501
:rtype: dict(str, str)
"""
return self._consumed_capacity
@consumed_capacity.setter
def consumed_capacity(self, consumed_capacity):
"""Sets the consumed_capacity of this V1beta1DeviceRequestAllocationResult.
ConsumedCapacity tracks the amount of capacity consumed per device as part of the claim request. The consumed amount may differ from the requested amount: it is rounded up to the nearest valid value based on the device’s requestPolicy if applicable (i.e., may not be less than the requested amount). The total consumed capacity for each device must not exceed the DeviceCapacity's Value. This field is populated only for devices that allow multiple allocations. All capacity entries are included, even if the consumed amount is zero. # noqa: E501
:param consumed_capacity: The consumed_capacity of this V1beta1DeviceRequestAllocationResult. # noqa: E501
:type: dict(str, str)
"""
self._consumed_capacity = consumed_capacity
@property
def device(self):
"""Gets the device of this V1beta1DeviceRequestAllocationResult. # noqa: E501
Device references one device instance via its name in the driver's resource pool. It must be a DNS label. # noqa: E501
:return: The device of this V1beta1DeviceRequestAllocationResult. # noqa: E501
:rtype: str
"""
return self._device
@device.setter
def device(self, device):
"""Sets the device of this V1beta1DeviceRequestAllocationResult.
Device references one device instance via its name in the driver's resource pool. It must be a DNS label. # noqa: E501
:param device: The device of this V1beta1DeviceRequestAllocationResult. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and device is None: # noqa: E501
raise ValueError("Invalid value for `device`, must not be `None`") # noqa: E501
self._device = device
@property
def driver(self):
"""Gets the driver of this V1beta1DeviceRequestAllocationResult. # noqa: E501
Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node. Must be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. # noqa: E501
:return: The driver of this V1beta1DeviceRequestAllocationResult. # noqa: E501
:rtype: str
"""
return self._driver
@driver.setter
def driver(self, driver):
"""Sets the driver of this V1beta1DeviceRequestAllocationResult.
Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node. Must be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. # noqa: E501
:param driver: The driver of this V1beta1DeviceRequestAllocationResult. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and driver is None: # noqa: E501
raise ValueError("Invalid value for `driver`, must not be `None`") # noqa: E501
self._driver = driver
@property
def pool(self):
"""Gets the pool of this V1beta1DeviceRequestAllocationResult. # noqa: E501
This name together with the driver name and the device name field identify which device was allocated (`<driver name>/<pool name>/<device name>`). Must not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes. # noqa: E501
:return: The pool of this V1beta1DeviceRequestAllocationResult. # noqa: E501
:rtype: str
"""
return self._pool
@pool.setter
def pool(self, pool):
"""Sets the pool of this V1beta1DeviceRequestAllocationResult.
This name together with the driver name and the device name field identify which device was allocated (`<driver name>/<pool name>/<device name>`). Must not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes. # noqa: E501
:param pool: The pool of this V1beta1DeviceRequestAllocationResult. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and pool is None: # noqa: E501
raise ValueError("Invalid value for `pool`, must not be `None`") # noqa: E501
self._pool = pool
@property
def request(self):
"""Gets the request of this V1beta1DeviceRequestAllocationResult. # noqa: E501
Request is the name of the request in the claim which caused this device to be allocated. If it references a subrequest in the firstAvailable list on a DeviceRequest, this field must include both the name of the main request and the subrequest using the format <main request>/<subrequest>. Multiple devices may have been allocated per request. # noqa: E501
:return: The request of this V1beta1DeviceRequestAllocationResult. # noqa: E501
:rtype: str
"""
return self._request
@request.setter
def request(self, request):
"""Sets the request of this V1beta1DeviceRequestAllocationResult.
Request is the name of the request in the claim which caused this device to be allocated. If it references a subrequest in the firstAvailable list on a DeviceRequest, this field must include both the name of the main request and the subrequest using the format <main request>/<subrequest>. Multiple devices may have been allocated per request. # noqa: E501
:param request: The request of this V1beta1DeviceRequestAllocationResult. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and request is None: # noqa: E501
raise ValueError("Invalid value for `request`, must not be `None`") # noqa: E501
self._request = request
@property
def share_id(self):
"""Gets the share_id of this V1beta1DeviceRequestAllocationResult. # noqa: E501
ShareID uniquely identifies an individual allocation share of the device, used when the device supports multiple simultaneous allocations. It serves as an additional map key to differentiate concurrent shares of the same device. # noqa: E501
:return: The share_id of this V1beta1DeviceRequestAllocationResult. # noqa: E501
:rtype: str
"""
return self._share_id
@share_id.setter
def share_id(self, share_id):
"""Sets the share_id of this V1beta1DeviceRequestAllocationResult.
ShareID uniquely identifies an individual allocation share of the device, used when the device supports multiple simultaneous allocations. It serves as an additional map key to differentiate concurrent shares of the same device. # noqa: E501
:param share_id: The share_id of this V1beta1DeviceRequestAllocationResult. # noqa: E501
:type: str
"""
self._share_id = share_id
@property
def tolerations(self):
"""Gets the tolerations of this V1beta1DeviceRequestAllocationResult. # noqa: E501
A copy of all tolerations specified in the request at the time when the device got allocated. The maximum number of tolerations is 16. This is an alpha field and requires enabling the DRADeviceTaints feature gate. # noqa: E501
:return: The tolerations of this V1beta1DeviceRequestAllocationResult. # noqa: E501
:rtype: list[V1beta1DeviceToleration]
"""
return self._tolerations
@tolerations.setter
def tolerations(self, tolerations):
"""Sets the tolerations of this V1beta1DeviceRequestAllocationResult.
A copy of all tolerations specified in the request at the time when the device got allocated. The maximum number of tolerations is 16. This is an alpha field and requires enabling the DRADeviceTaints feature gate. # noqa: E501
:param tolerations: The tolerations of this V1beta1DeviceRequestAllocationResult. # noqa: E501
:type: list[V1beta1DeviceToleration]
"""
self._tolerations = tolerations
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1DeviceRequestAllocationResult):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1DeviceRequestAllocationResult):
return True
return self.to_dict() != other.to_dict()
| V1beta1DeviceRequestAllocationResult |
python | aimacode__aima-python | learning.py | {
"start": 14488,
"end": 27263
} | class ____:
"""A leaf of a decision tree holds just a result."""
def __init__(self, result):
self.result = result
def __call__(self, example):
return self.result
def display(self):
print('RESULT =', self.result)
def __repr__(self):
return repr(self.result)
def DecisionTreeLearner(dataset):
"""[Figure 18.5]"""
target, values = dataset.target, dataset.values
def decision_tree_learning(examples, attrs, parent_examples=()):
if len(examples) == 0:
return plurality_value(parent_examples)
if all_same_class(examples):
return DecisionLeaf(examples[0][target])
if len(attrs) == 0:
return plurality_value(examples)
A = choose_attribute(attrs, examples)
tree = DecisionFork(A, dataset.attr_names[A], plurality_value(examples))
for (v_k, exs) in split_by(A, examples):
subtree = decision_tree_learning(exs, remove_all(A, attrs), examples)
tree.add(v_k, subtree)
return tree
def plurality_value(examples):
"""
Return the most popular target value for this set of examples.
(If target is binary, this is the majority; otherwise plurality).
"""
popular = argmax_random_tie(values[target], key=lambda v: count(target, v, examples))
return DecisionLeaf(popular)
def count(attr, val, examples):
"""Count the number of examples that have example[attr] = val."""
return sum(e[attr] == val for e in examples)
def all_same_class(examples):
"""Are all these examples in the same target class?"""
class0 = examples[0][target]
return all(e[target] == class0 for e in examples)
def choose_attribute(attrs, examples):
"""Choose the attribute with the highest information gain."""
return argmax_random_tie(attrs, key=lambda a: information_gain(a, examples))
def information_gain(attr, examples):
"""Return the expected reduction in entropy from splitting by attr."""
def I(examples):
return information_content([count(target, v, examples) for v in values[target]])
n = len(examples)
remainder = sum((len(examples_i) / n) * I(examples_i) for (v, examples_i) in split_by(attr, examples))
return I(examples) - remainder
def split_by(attr, examples):
"""Return a list of (val, examples) pairs for each val of attr."""
return [(v, [e for e in examples if e[attr] == v]) for v in values[attr]]
return decision_tree_learning(dataset.examples, dataset.inputs)
def information_content(values):
"""Number of bits to represent the probability distribution in values."""
probabilities = normalize(remove_all(0, values))
return sum(-p * np.log2(p) for p in probabilities)
def DecisionListLearner(dataset):
"""
[Figure 18.11]
A decision list implemented as a list of (test, value) pairs.
"""
def decision_list_learning(examples):
if not examples:
return [(True, False)]
t, o, examples_t = find_examples(examples)
if not t:
raise Exception
return [(t, o)] + decision_list_learning(examples - examples_t)
def find_examples(examples):
"""
Find a set of examples that all have the same outcome under
some test. Return a tuple of the test, outcome, and examples.
"""
raise NotImplementedError
def passes(example, test):
"""Does the example pass the test?"""
raise NotImplementedError
def predict(example):
"""Predict the outcome for the first passing test."""
for test, outcome in predict.decision_list:
if passes(example, test):
return outcome
predict.decision_list = decision_list_learning(set(dataset.examples))
return predict
def NearestNeighborLearner(dataset, k=1):
"""k-NearestNeighbor: the k nearest neighbors vote."""
def predict(example):
"""Find the k closest items, and have them vote for the best."""
best = heapq.nsmallest(k, ((dataset.distance(e, example), e) for e in dataset.examples))
return mode(e[dataset.target] for (d, e) in best)
return predict
def LinearLearner(dataset, learning_rate=0.01, epochs=100):
"""
[Section 18.6.3]
Linear classifier with hard threshold.
"""
idx_i = dataset.inputs
idx_t = dataset.target
examples = dataset.examples
num_examples = len(examples)
# X transpose
X_col = [dataset.values[i] for i in idx_i] # vertical columns of X
# add dummy
ones = [1 for _ in range(len(examples))]
X_col = [ones] + X_col
# initialize random weights
num_weights = len(idx_i) + 1
w = random_weights(min_value=-0.5, max_value=0.5, num_weights=num_weights)
for epoch in range(epochs):
err = []
# pass over all examples
for example in examples:
x = [1] + example
y = np.dot(w, x)
t = example[idx_t]
err.append(t - y)
# update weights
for i in range(len(w)):
w[i] = w[i] + learning_rate * (np.dot(err, X_col[i]) / num_examples)
def predict(example):
x = [1] + example
return np.dot(w, x)
return predict
def LogisticLinearLeaner(dataset, learning_rate=0.01, epochs=100):
"""
[Section 18.6.4]
Linear classifier with logistic regression.
"""
idx_i = dataset.inputs
idx_t = dataset.target
examples = dataset.examples
num_examples = len(examples)
# X transpose
X_col = [dataset.values[i] for i in idx_i] # vertical columns of X
# add dummy
ones = [1 for _ in range(len(examples))]
X_col = [ones] + X_col
# initialize random weights
num_weights = len(idx_i) + 1
w = random_weights(min_value=-0.5, max_value=0.5, num_weights=num_weights)
for epoch in range(epochs):
err = []
h = []
# pass over all examples
for example in examples:
x = [1] + example
y = sigmoid(np.dot(w, x))
h.append(sigmoid_derivative(y))
t = example[idx_t]
err.append(t - y)
# update weights
for i in range(len(w)):
buffer = [x * y for x, y in zip(err, h)]
w[i] = w[i] + learning_rate * (np.dot(buffer, X_col[i]) / num_examples)
def predict(example):
x = [1] + example
return sigmoid(np.dot(w, x))
return predict
def NeuralNetLearner(dataset, hidden_layer_sizes=None, learning_rate=0.01, epochs=100, activation=sigmoid):
"""
Layered feed-forward network.
hidden_layer_sizes: List of number of hidden units per hidden layer
learning_rate: Learning rate of gradient descent
epochs: Number of passes over the dataset
"""
if hidden_layer_sizes is None:
hidden_layer_sizes = [3]
i_units = len(dataset.inputs)
o_units = len(dataset.values[dataset.target])
# construct a network
raw_net = network(i_units, hidden_layer_sizes, o_units, activation)
learned_net = BackPropagationLearner(dataset, raw_net, learning_rate, epochs, activation)
def predict(example):
# input nodes
i_nodes = learned_net[0]
# activate input layer
for v, n in zip(example, i_nodes):
n.value = v
# forward pass
for layer in learned_net[1:]:
for node in layer:
inc = [n.value for n in node.inputs]
in_val = dot_product(inc, node.weights)
node.value = node.activation(in_val)
# hypothesis
o_nodes = learned_net[-1]
prediction = find_max_node(o_nodes)
return prediction
return predict
def BackPropagationLearner(dataset, net, learning_rate, epochs, activation=sigmoid):
"""
[Figure 18.23]
The back-propagation algorithm for multilayer networks.
"""
# initialise weights
for layer in net:
for node in layer:
node.weights = random_weights(min_value=-0.5, max_value=0.5, num_weights=len(node.weights))
examples = dataset.examples
# As of now dataset.target gives an int instead of list,
# Changing dataset class will have effect on all the learners.
# Will be taken care of later.
o_nodes = net[-1]
i_nodes = net[0]
o_units = len(o_nodes)
idx_t = dataset.target
idx_i = dataset.inputs
n_layers = len(net)
inputs, targets = init_examples(examples, idx_i, idx_t, o_units)
for epoch in range(epochs):
# iterate over each example
for e in range(len(examples)):
i_val = inputs[e]
t_val = targets[e]
# activate input layer
for v, n in zip(i_val, i_nodes):
n.value = v
# forward pass
for layer in net[1:]:
for node in layer:
inc = [n.value for n in node.inputs]
in_val = dot_product(inc, node.weights)
node.value = node.activation(in_val)
# initialize delta
delta = [[] for _ in range(n_layers)]
# compute outer layer delta
# error for the MSE cost function
err = [t_val[i] - o_nodes[i].value for i in range(o_units)]
# calculate delta at output
if node.activation == sigmoid:
delta[-1] = [sigmoid_derivative(o_nodes[i].value) * err[i] for i in range(o_units)]
elif node.activation == relu:
delta[-1] = [relu_derivative(o_nodes[i].value) * err[i] for i in range(o_units)]
elif node.activation == tanh:
delta[-1] = [tanh_derivative(o_nodes[i].value) * err[i] for i in range(o_units)]
elif node.activation == elu:
delta[-1] = [elu_derivative(o_nodes[i].value) * err[i] for i in range(o_units)]
elif node.activation == leaky_relu:
delta[-1] = [leaky_relu_derivative(o_nodes[i].value) * err[i] for i in range(o_units)]
else:
return ValueError("Activation function unknown.")
# backward pass
h_layers = n_layers - 2
for i in range(h_layers, 0, -1):
layer = net[i]
h_units = len(layer)
nx_layer = net[i + 1]
# weights from each ith layer node to each i + 1th layer node
w = [[node.weights[k] for node in nx_layer] for k in range(h_units)]
if activation == sigmoid:
delta[i] = [sigmoid_derivative(layer[j].value) * dot_product(w[j], delta[i + 1])
for j in range(h_units)]
elif activation == relu:
delta[i] = [relu_derivative(layer[j].value) * dot_product(w[j], delta[i + 1])
for j in range(h_units)]
elif activation == tanh:
delta[i] = [tanh_derivative(layer[j].value) * dot_product(w[j], delta[i + 1])
for j in range(h_units)]
elif activation == elu:
delta[i] = [elu_derivative(layer[j].value) * dot_product(w[j], delta[i + 1])
for j in range(h_units)]
elif activation == leaky_relu:
delta[i] = [leaky_relu_derivative(layer[j].value) * dot_product(w[j], delta[i + 1])
for j in range(h_units)]
else:
return ValueError("Activation function unknown.")
# update weights
for i in range(1, n_layers):
layer = net[i]
inc = [node.value for node in net[i - 1]]
units = len(layer)
for j in range(units):
layer[j].weights = vector_add(layer[j].weights,
scalar_vector_product(learning_rate * delta[i][j], inc))
return net
def PerceptronLearner(dataset, learning_rate=0.01, epochs=100):
"""Logistic Regression, NO hidden layer"""
i_units = len(dataset.inputs)
o_units = len(dataset.values[dataset.target])
hidden_layer_sizes = []
raw_net = network(i_units, hidden_layer_sizes, o_units)
learned_net = BackPropagationLearner(dataset, raw_net, learning_rate, epochs)
def predict(example):
o_nodes = learned_net[1]
# forward pass
for node in o_nodes:
in_val = dot_product(example, node.weights)
node.value = node.activation(in_val)
# hypothesis
return find_max_node(o_nodes)
return predict
| DecisionLeaf |
python | plotly__plotly.py | plotly/io/_base_renderers.py | {
"start": 23222,
"end": 24435
} | class ____(HtmlRenderer):
def __init__(
self,
connected=True,
config=None,
auto_play=False,
post_script=None,
animation_opts=None,
):
super(SphinxGalleryHtmlRenderer, self).__init__(
connected=connected,
full_html=False,
global_init=False,
config=config,
auto_play=auto_play,
post_script=post_script,
animation_opts=animation_opts,
)
def to_mimebundle(self, fig_dict):
from plotly.io import to_html
if self.connected:
include_plotlyjs = "cdn"
include_mathjax = "cdn"
else:
include_plotlyjs = True
include_mathjax = "cdn"
html = to_html(
fig_dict,
config=self.config,
auto_play=self.auto_play,
include_plotlyjs=include_plotlyjs,
include_mathjax=include_mathjax,
full_html=self.full_html,
animation_opts=self.animation_opts,
default_width="100%",
default_height=525,
validate=False,
)
return {"text/html": html}
| SphinxGalleryHtmlRenderer |
python | catalyst-team__catalyst | examples/detection/models/yolo_x.py | {
"start": 1820,
"end": 2536
} | class ____(nn.Module):
# Standard bottleneck
def __init__(
self,
in_channels,
out_channels,
shortcut=True,
expansion=0.5,
depthwise=False,
act="silu",
):
super().__init__()
hidden_channels = int(out_channels * expansion)
Conv = DWConv if depthwise else BaseConv
self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act)
self.conv2 = Conv(hidden_channels, out_channels, 3, stride=1, act=act)
self.use_add = shortcut and in_channels == out_channels
def forward(self, x):
y = self.conv2(self.conv1(x))
if self.use_add:
y = y + x
return y
| Bottleneck |
python | huggingface__transformers | src/transformers/data/processors/squad.py | {
"start": 17265,
"end": 22937
} | class ____(DataProcessor):
"""
Processor for the SQuAD data set. overridden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and
version 2.0 of SQuAD, respectively.
"""
train_file = None
dev_file = None
def _get_example_from_tensor_dict(self, tensor_dict, evaluate=False):
if not evaluate:
answer = tensor_dict["answers"]["text"][0].numpy().decode("utf-8")
answer_start = tensor_dict["answers"]["answer_start"][0].numpy()
answers = []
else:
answers = [
{"answer_start": start.numpy(), "text": text.numpy().decode("utf-8")}
for start, text in zip(tensor_dict["answers"]["answer_start"], tensor_dict["answers"]["text"])
]
answer = None
answer_start = None
return SquadExample(
qas_id=tensor_dict["id"].numpy().decode("utf-8"),
question_text=tensor_dict["question"].numpy().decode("utf-8"),
context_text=tensor_dict["context"].numpy().decode("utf-8"),
answer_text=answer,
start_position_character=answer_start,
title=tensor_dict["title"].numpy().decode("utf-8"),
answers=answers,
)
def get_examples_from_dataset(self, dataset, evaluate=False):
"""
Creates a list of [`~data.processors.squad.SquadExample`] using a TFDS dataset.
Args:
dataset: The tfds dataset loaded from *tensorflow_datasets.load("squad")*
evaluate: Boolean specifying if in evaluation mode or in training mode
Returns:
List of SquadExample
Examples:
```python
>>> import tensorflow_datasets as tfds
>>> dataset = tfds.load("squad")
>>> training_examples = get_examples_from_dataset(dataset, evaluate=False)
>>> evaluation_examples = get_examples_from_dataset(dataset, evaluate=True)
```"""
if evaluate:
dataset = dataset["validation"]
else:
dataset = dataset["train"]
examples = []
for tensor_dict in tqdm(dataset):
examples.append(self._get_example_from_tensor_dict(tensor_dict, evaluate=evaluate))
return examples
def get_train_examples(self, data_dir, filename=None):
"""
Returns the training examples from the data directory.
Args:
data_dir: Directory containing the data files used for training and evaluating.
filename: None by default, specify this if the training file has a different name than the original one
which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively.
"""
if data_dir is None:
data_dir = ""
if self.train_file is None:
raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor")
with open(
os.path.join(data_dir, self.train_file if filename is None else filename), "r", encoding="utf-8"
) as reader:
input_data = json.load(reader)["data"]
return self._create_examples(input_data, "train")
def get_dev_examples(self, data_dir, filename=None):
"""
Returns the evaluation example from the data directory.
Args:
data_dir: Directory containing the data files used for training and evaluating.
filename: None by default, specify this if the evaluation file has a different name than the original one
which is `dev-v1.1.json` and `dev-v2.0.json` for squad versions 1.1 and 2.0 respectively.
"""
if data_dir is None:
data_dir = ""
if self.dev_file is None:
raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor")
with open(
os.path.join(data_dir, self.dev_file if filename is None else filename), "r", encoding="utf-8"
) as reader:
input_data = json.load(reader)["data"]
return self._create_examples(input_data, "dev")
def _create_examples(self, input_data, set_type):
is_training = set_type == "train"
examples = []
for entry in tqdm(input_data):
title = entry["title"]
for paragraph in entry["paragraphs"]:
context_text = paragraph["context"]
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position_character = None
answer_text = None
answers = []
is_impossible = qa.get("is_impossible", False)
if not is_impossible:
if is_training:
answer = qa["answers"][0]
answer_text = answer["text"]
start_position_character = answer["answer_start"]
else:
answers = qa["answers"]
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
context_text=context_text,
answer_text=answer_text,
start_position_character=start_position_character,
title=title,
is_impossible=is_impossible,
answers=answers,
)
examples.append(example)
return examples
| SquadProcessor |
python | PrefectHQ__prefect | src/prefect/client/schemas/objects.py | {
"start": 45215,
"end": 45624
} | class ____(PrefectBaseModel):
"""Filter criteria definition for a work queue."""
tags: Optional[list[str]] = Field(
default=None,
description="Only include flow runs with these tags in the work queue.",
)
deployment_ids: Optional[list[UUID]] = Field(
default=None,
description="Only include flow runs from these deployments in the work queue.",
)
| QueueFilter |
python | PyCQA__pylint | pylint/pyreverse/inspector.py | {
"start": 14205,
"end": 16358
} | class ____(AbstractRelationshipHandler):
"""Handle aggregation relationships where parent receives child objects."""
def handle(
self, node: nodes.AssignAttr | nodes.AssignName, parent: nodes.ClassDef
) -> None:
# If the node is not part of an assignment, pass to next handler
if not isinstance(node.parent, (nodes.AnnAssign, nodes.Assign)):
super().handle(node, parent)
return
value = node.parent.value
# Extract the name to handle both AssignAttr and AssignName nodes
name = node.attrname if isinstance(node, nodes.AssignAttr) else node.name
# Aggregation: direct assignment (self.x = x)
if isinstance(value, nodes.Name):
inferred_types = utils.infer_node(node)
element_types = extract_element_types(inferred_types)
# Resolve nodes to actual class definitions
resolved_types = resolve_to_class_def(element_types)
current = set(parent.aggregations_type[name])
parent.aggregations_type[name] = list(current | resolved_types)
return
# Aggregation: comprehensions without object creation (self.x = [existing_obj for ...])
if isinstance(
value, (nodes.ListComp, nodes.DictComp, nodes.SetComp, nodes.GeneratorExp)
):
if isinstance(value, nodes.DictComp):
element = value.value
else:
element = value.elt
# If the element is a Name, it means it's an existing object, so it's aggregation
if isinstance(element, nodes.Name):
inferred_types = utils.infer_node(node)
element_types = extract_element_types(inferred_types)
# Resolve nodes to actual class definitions
resolved_types = resolve_to_class_def(element_types)
current = set(parent.aggregations_type[name])
parent.aggregations_type[name] = list(current | resolved_types)
return
# Not an aggregation, pass to next handler
super().handle(node, parent)
| AggregationsHandler |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 28126,
"end": 28910
} | class ____(MixinSequenceOfValues):
"""
y-axis tick labels
Parameters
----------
theme_element : element_text
Notes
-----
Use the `margin` to control the gap between the ticks and the
text. e.g.
```python
theme(axis_text_y=element_text(margin={"r": 5, "units": "pt"}))
```
creates a margin of 5 points.
"""
_omit = ["margin", "ha"]
def apply_ax(self, ax: Axes):
super().apply_ax(ax)
if not ax.yaxis.get_tick_params()["labelleft"]:
return
labels = [t.label1 for t in ax.yaxis.get_major_ticks()]
self.set(labels)
def blank_ax(self, ax: Axes):
super().blank_ax(ax)
for t in ax.yaxis.get_major_ticks():
t.label1.set_visible(False)
| axis_text_y |
python | bokeh__bokeh | src/bokeh/core/property/bases.py | {
"start": 14644,
"end": 17486
} | class ____(Property[T]):
""" A base class for Properties that have type parameters, e.g. ``List(String)``.
"""
_type_params: list[Property[Any]]
def __init__(self, *type_params: TypeOrInst[Property[T]], default: Init[T] = Intrinsic, help: str | None = None) -> None:
_type_params = [ self._validate_type_param(param) for param in type_params ]
default = default if default is not Intrinsic else _type_params[0]._raw_default()
self._type_params = _type_params
super().__init__(default=default, help=help)
def __str__(self) -> str:
class_name = self.__class__.__name__
item_types = ", ".join(str(x) for x in self.type_params)
return f"{class_name}({item_types})"
def __call__(self, *, default: Init[T] = Intrinsic, help: str | None = None) -> ParameterizedProperty[T]:
""" Clone this property and allow to override ``default`` and ``help``. """
default = self._default if default is Intrinsic else default
help = self._help if help is None else help
prop = self.__class__(*self.type_params, default=default, help=help)
prop.alternatives = list(self.alternatives)
prop.assertions = list(self.assertions)
return prop
def __eq__(self, other: object) -> bool:
if isinstance(other, self.__class__):
return super().__eq__(other) and self.type_params == other.type_params
else:
return False
@staticmethod
def _validate_type_param(type_param: TypeOrInst[Property[Any]], *, help_allowed: bool = False) -> Property[Any]:
if isinstance(type_param, type):
if issubclass(type_param, Property):
return type_param()
else:
type_param = type_param.__name__
elif isinstance(type_param, Property):
if type_param._help is not None and not help_allowed:
raise ValueError("setting 'help' on type parameters doesn't make sense")
return type_param
raise ValueError(f"expected a Property as type parameter, got {type_param}")
@property
def type_params(self) -> list[Property[Any]]:
return self._type_params
@property
def has_ref(self) -> bool:
return any(type_param.has_ref for type_param in self.type_params)
def _may_have_unstable_default(self) -> bool:
return super()._may_have_unstable_default() or \
any(type_param._may_have_unstable_default() for type_param in self.type_params)
def replace(self, old: type[Property[Any]], new: Property[Any]) -> Property[Any]:
if self.__class__ == old:
return new
else:
params = [ type_param.replace(old, new) for type_param in self.type_params ]
return self.__class__(*params)
| ParameterizedProperty |
python | mlflow__mlflow | mlflow/entities/multipart_upload.py | {
"start": 1273,
"end": 1923
} | class ____:
upload_id: str | None
credentials: list[MultipartUploadCredential]
def to_proto(self):
response = ProtoCreateMultipartUpload.Response()
if self.upload_id:
response.upload_id = self.upload_id
response.credentials.extend([credential.to_proto() for credential in self.credentials])
return response
@classmethod
def from_dict(cls, dict_):
credentials = [MultipartUploadCredential.from_dict(cred) for cred in dict_["credentials"]]
return cls(
upload_id=dict_.get("upload_id"),
credentials=credentials,
)
| CreateMultipartUploadResponse |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/asyncpg.py | {
"start": 12744,
"end": 12818
} | class ____(_AsyncpgNumericCommon, sqltypes.Numeric):
pass
| AsyncpgNumeric |
python | langchain-ai__langchain | libs/core/langchain_core/callbacks/manager.py | {
"start": 34489,
"end": 35809
} | class ____(ParentRunManager, RetrieverManagerMixin):
"""Callback manager for retriever run."""
def on_retriever_end(
self,
documents: Sequence[Document],
**kwargs: Any,
) -> None:
"""Run when retriever ends running.
Args:
documents: The retrieved documents.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
handle_event(
self.handlers,
"on_retriever_end",
"ignore_retriever",
documents,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_retriever_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when retriever errors.
Args:
error: The error.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
handle_event(
self.handlers,
"on_retriever_error",
"ignore_retriever",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
| CallbackManagerForRetrieverRun |
python | pandas-dev__pandas | pandas/tests/window/test_pairwise.py | {
"start": 6830,
"end": 16169
} | class ____:
# GH 7738
@pytest.mark.parametrize("f", [lambda x: x.cov(), lambda x: x.corr()])
def test_no_flex(self, pairwise_frames, pairwise_target_frame, f):
# DataFrame methods (which do not call flex_binary_moment())
result = f(pairwise_frames)
tm.assert_index_equal(result.index, pairwise_frames.columns)
tm.assert_index_equal(result.columns, pairwise_frames.columns)
expected = f(pairwise_target_frame)
# since we have sorted the results
# we can only compare non-nans
result = result.dropna().values
expected = expected.dropna().values
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize(
"f",
[
lambda x: x.expanding().cov(pairwise=True),
lambda x: x.expanding().corr(pairwise=True),
lambda x: x.rolling(window=3).cov(pairwise=True),
lambda x: x.rolling(window=3).corr(pairwise=True),
lambda x: x.ewm(com=3).cov(pairwise=True),
lambda x: x.ewm(com=3).corr(pairwise=True),
],
)
def test_pairwise_with_self(self, pairwise_frames, pairwise_target_frame, f):
# DataFrame with itself, pairwise=True
# note that we may construct the 1st level of the MI
# in a non-monotonic way, so compare accordingly
result = f(pairwise_frames)
tm.assert_index_equal(
result.index.levels[0], pairwise_frames.index, check_names=False
)
tm.assert_index_equal(
safe_sort(result.index.levels[1]),
safe_sort(pairwise_frames.columns.unique()),
)
tm.assert_index_equal(result.columns, pairwise_frames.columns)
expected = f(pairwise_target_frame)
# since we have sorted the results
# we can only compare non-nans
result = result.dropna().values
expected = expected.dropna().values
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize(
"f",
[
lambda x: x.expanding().cov(pairwise=False),
lambda x: x.expanding().corr(pairwise=False),
lambda x: x.rolling(window=3).cov(pairwise=False),
lambda x: x.rolling(window=3).corr(pairwise=False),
lambda x: x.ewm(com=3).cov(pairwise=False),
lambda x: x.ewm(com=3).corr(pairwise=False),
],
)
def test_no_pairwise_with_self(self, pairwise_frames, pairwise_target_frame, f):
# DataFrame with itself, pairwise=False
result = f(pairwise_frames)
tm.assert_index_equal(result.index, pairwise_frames.index)
tm.assert_index_equal(result.columns, pairwise_frames.columns)
expected = f(pairwise_target_frame)
# since we have sorted the results
# we can only compare non-nans
result = result.dropna().values
expected = expected.dropna().values
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize(
"f",
[
lambda x, y: x.expanding().cov(y, pairwise=True),
lambda x, y: x.expanding().corr(y, pairwise=True),
lambda x, y: x.rolling(window=3).cov(y, pairwise=True),
# TODO: We're missing a flag somewhere in meson
pytest.param(
lambda x, y: x.rolling(window=3).corr(y, pairwise=True),
marks=pytest.mark.xfail(
not IS64, reason="Precision issues on 32 bit", strict=False
),
),
lambda x, y: x.ewm(com=3).cov(y, pairwise=True),
lambda x, y: x.ewm(com=3).corr(y, pairwise=True),
],
)
def test_pairwise_with_other(
self, pairwise_frames, pairwise_target_frame, pairwise_other_frame, f
):
# DataFrame with another DataFrame, pairwise=True
result = f(pairwise_frames, pairwise_other_frame)
tm.assert_index_equal(
result.index.levels[0], pairwise_frames.index, check_names=False
)
tm.assert_index_equal(
safe_sort(result.index.levels[1]),
safe_sort(pairwise_other_frame.columns.unique()),
)
expected = f(pairwise_target_frame, pairwise_other_frame)
# since we have sorted the results
# we can only compare non-nans
result = result.dropna().values
expected = expected.dropna().values
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
@pytest.mark.filterwarnings("ignore:RuntimeWarning")
@pytest.mark.parametrize(
"f",
[
lambda x, y: x.expanding().cov(y, pairwise=False),
lambda x, y: x.expanding().corr(y, pairwise=False),
lambda x, y: x.rolling(window=3).cov(y, pairwise=False),
lambda x, y: x.rolling(window=3).corr(y, pairwise=False),
lambda x, y: x.ewm(com=3).cov(y, pairwise=False),
lambda x, y: x.ewm(com=3).corr(y, pairwise=False),
],
)
def test_no_pairwise_with_other(self, pairwise_frames, pairwise_other_frame, f):
# DataFrame with another DataFrame, pairwise=False
result = (
f(pairwise_frames, pairwise_other_frame)
if pairwise_frames.columns.is_unique
else None
)
if result is not None:
# we can have int and str columns
expected_index = pairwise_frames.index.union(pairwise_other_frame.index)
expected_columns = pairwise_frames.columns.union(
pairwise_other_frame.columns
)
tm.assert_index_equal(result.index, expected_index)
tm.assert_index_equal(result.columns, expected_columns)
else:
with pytest.raises(ValueError, match="'arg1' columns are not unique"):
f(pairwise_frames, pairwise_other_frame)
with pytest.raises(ValueError, match="'arg2' columns are not unique"):
f(pairwise_other_frame, pairwise_frames)
@pytest.mark.parametrize(
"f",
[
lambda x, y: x.expanding().cov(y),
lambda x, y: x.expanding().corr(y),
lambda x, y: x.rolling(window=3).cov(y),
lambda x, y: x.rolling(window=3).corr(y),
lambda x, y: x.ewm(com=3).cov(y),
lambda x, y: x.ewm(com=3).corr(y),
],
)
def test_pairwise_with_series(self, pairwise_frames, pairwise_target_frame, f):
# DataFrame with a Series
result = f(pairwise_frames, Series([1, 1, 3, 8]))
tm.assert_index_equal(result.index, pairwise_frames.index)
tm.assert_index_equal(result.columns, pairwise_frames.columns)
expected = f(pairwise_target_frame, Series([1, 1, 3, 8]))
# since we have sorted the results
# we can only compare non-nans
result = result.dropna().values
expected = expected.dropna().values
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
result = f(Series([1, 1, 3, 8]), pairwise_frames)
tm.assert_index_equal(result.index, pairwise_frames.index)
tm.assert_index_equal(result.columns, pairwise_frames.columns)
expected = f(Series([1, 1, 3, 8]), pairwise_target_frame)
# since we have sorted the results
# we can only compare non-nans
result = result.dropna().values
expected = expected.dropna().values
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_corr_freq_memory_error(self):
# GH 31789
s = Series(range(5), index=date_range("2020", periods=5))
result = s.rolling("12h").corr(s)
expected = Series([np.nan] * 5, index=date_range("2020", periods=5))
tm.assert_series_equal(result, expected)
def test_cov_mulittindex(self):
# GH 34440
columns = MultiIndex.from_product([list("ab"), list("xy"), list("AB")])
index = range(3)
df = DataFrame(np.arange(24).reshape(3, 8), index=index, columns=columns)
result = df.ewm(alpha=0.1).cov()
index = MultiIndex.from_product([range(3), list("ab"), list("xy"), list("AB")])
columns = MultiIndex.from_product([list("ab"), list("xy"), list("AB")])
expected = DataFrame(
np.vstack(
(
np.full((8, 8), np.nan),
np.full((8, 8), 32.000000),
np.full((8, 8), 63.881919),
)
),
index=index,
columns=columns,
)
tm.assert_frame_equal(result, expected)
def test_multindex_columns_pairwise_func(self):
# GH 21157
columns = MultiIndex.from_arrays([["M", "N"], ["P", "Q"]], names=["a", "b"])
df = DataFrame(np.ones((5, 2)), columns=columns)
result = df.rolling(3).corr()
expected = DataFrame(
np.nan,
index=MultiIndex.from_arrays(
[
np.repeat(np.arange(5, dtype=np.int64), 2),
["M", "N"] * 5,
["P", "Q"] * 5,
],
names=[None, "a", "b"],
),
columns=columns,
)
tm.assert_frame_equal(result, expected)
| TestPairwise |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_C.py | {
"start": 17137,
"end": 18237
} | class ____(Benchmark):
r"""
Cube objective function.
This class defines the Cube global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Cube}}(x) = 100(x_2 - x_1^3)^2 + (1 - x1)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-10, 10]`
for :math:`i=1,...,N`.
*Global optimum*: :math:`f(x_i) = 0.0` for :math:`x = [1, 1]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: jamil#41 has the wrong solution.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = ([0, 2], [0, 2])
self.global_optimum = [[1.0, 1.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return 100.0 * (x[1] - x[0] ** 3.0) ** 2.0 + (1.0 - x[0]) ** 2.0
| Cube |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 294440,
"end": 297133
} | class ____:
def setup_method(self):
self.rng = np.random.default_rng(7186715712)
# gh-6227
def test_logpdf(self):
y = stats.rayleigh.logpdf(50)
assert_allclose(y, -1246.0879769945718)
def test_logsf(self):
y = stats.rayleigh.logsf(50)
assert_allclose(y, -1250)
@pytest.mark.parametrize("rvs_loc,rvs_scale", [(0.85373171, 0.86932204),
(0.20558821, 0.61621008)])
def test_fit(self, rvs_loc, rvs_scale):
data = stats.rayleigh.rvs(size=250, loc=rvs_loc,
scale=rvs_scale, random_state=self.rng)
def scale_mle(data, floc):
return (np.sum((data - floc) ** 2) / (2 * len(data))) ** .5
# when `floc` is provided, `scale` is found with an analytical formula
scale_expect = scale_mle(data, rvs_loc)
loc, scale = stats.rayleigh.fit(data, floc=rvs_loc)
assert_equal(loc, rvs_loc)
assert_equal(scale, scale_expect)
# when `fscale` is fixed, superclass fit is used to determine `loc`.
loc, scale = stats.rayleigh.fit(data, fscale=.6)
assert_equal(scale, .6)
# with both parameters free, one dimensional optimization is done
# over a new function that takes into account the dependent relation
# of `scale` to `loc`.
loc, scale = stats.rayleigh.fit(data)
# test that `scale` is defined by its relation to `loc`
assert_equal(scale, scale_mle(data, loc))
@pytest.mark.parametrize("rvs_loc,rvs_scale", [[0.74, 0.01],
[0.08464463, 0.12069025]])
def test_fit_comparison_super_method(self, rvs_loc, rvs_scale):
# test that the objective function result of the analytical MLEs is
# less than or equal to that of the numerically optimized estimate
data = stats.rayleigh.rvs(size=250, loc=rvs_loc,
scale=rvs_scale, random_state=self.rng)
_assert_less_or_close_loglike(stats.rayleigh, data)
def test_fit_warnings(self):
assert_fit_warnings(stats.rayleigh)
def test_fit_gh17088(self):
# `rayleigh.fit` could return a location that was inconsistent with
# the data. See gh-17088.
rng = np.random.default_rng(456)
loc, scale, size = 50, 600, 500
rvs = stats.rayleigh.rvs(loc, scale, size=size, random_state=rng)
loc_fit, _ = stats.rayleigh.fit(rvs)
assert loc_fit < np.min(rvs)
loc_fit, scale_fit = stats.rayleigh.fit(rvs, fscale=scale)
assert loc_fit < np.min(rvs)
assert scale_fit == scale
| TestRayleigh |
python | pytorch__pytorch | torch/_dynamo/source.py | {
"start": 15104,
"end": 15840
} | class ____(AttrSource):
pass
# This source is intended to be used in places where a source is needed but it is expected
# that the symbol will be simplified out later on. Symbols with ephemeral sources are
# prioritized to be simplified out when e.g. compared against a symbol without an ephemeral
# source. Guarding on this source is an error.
#
# Example: During subclass view fake-ification, any close-over ViewFunc state should be
# symbolicized / fake-ified to avoid invalid specialization during view replay. This source
# is useful for symbols utilized in the middle of the view chain that are not expected to be
# present within the final view shape metadata.
@dataclasses.dataclass(frozen=True)
| UnspecializedParamBufferSource |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_type_check.py | {
"start": 4217,
"end": 5120
} | class ____(TestCase):
def test_real(self):
y = np.random.rand(
10,
)
assert_array_equal(y, np.real(y))
y = np.array(1)
out = np.real(y)
assert_array_equal(y, out)
assert_(isinstance(out, np.ndarray))
y = 1
out = np.real(y)
assert_equal(y, out)
# assert_(not isinstance(out, np.ndarray)) # XXX: 0D tensor, not scalar
def test_cmplx(self):
y = np.random.rand(
10,
) + 1j * np.random.rand(
10,
)
assert_array_equal(y.real, np.real(y))
y = np.array(1 + 1j)
out = np.real(y)
assert_array_equal(y.real, out)
assert_(isinstance(out, np.ndarray))
y = 1 + 1j
out = np.real(y)
assert_equal(1.0, out)
# assert_(not isinstance(out, np.ndarray)) # XXX: 0D tensor, not scalar
| TestReal |
python | pdm-project__pdm | src/pdm/cli/commands/venv/backends.py | {
"start": 4885,
"end": 5533
} | class ____(Backend):
def pip_args(self, with_pip: bool) -> Iterable[str]:
if with_pip:
return ()
return ("--no-pip", "--no-setuptools", "--no-wheel")
def perform_create(self, location: Path, args: tuple[str, ...], prompt: str | None = None) -> None:
prompt_option = (f"--prompt={prompt}",) if prompt else ()
cmd = [
sys.executable,
"-m",
"virtualenv",
str(location),
"-p",
str(self._resolved_interpreter.executable),
*prompt_option,
*args,
]
self.subprocess_call(cmd)
| VirtualenvBackend |
python | numba__numba | numba/tests/test_dyn_array.py | {
"start": 31385,
"end": 33526
} | class ____(ConstructorLikeBaseTest, TestCase):
def check_result_value(self, ret, expected):
np.testing.assert_equal(ret, expected)
def test_like(self):
def func(arr):
return np.full_like(arr, 3.5)
self.check_like(func, np.float64)
# Not supported yet.
@unittest.expectedFailure
def test_like_structured(self):
dtype = np.dtype([('a', np.int16), ('b', np.float32)])
def func(arr):
return np.full_like(arr, 4.5)
self.check_like(func, dtype)
def test_like_dtype(self):
def func(arr):
return np.full_like(arr, 4.5, np.bool_)
self.check_like(func, np.float64)
def test_like_dtype_instance(self):
dtype = np.dtype('bool')
def func(arr):
return np.full_like(arr, 4.5, dtype)
self.check_like(func, np.float64)
def test_like_dtype_kwarg(self):
def func(arr):
return np.full_like(arr, 4.5, dtype=np.bool_)
self.check_like(func, np.float64)
def test_like_dtype_str_kwarg(self):
def func(arr):
return np.full_like(arr, 4.5, 'bool_')
self.check_like(func, np.float64)
def test_like_dtype_str_kwarg_alternative_spelling(self):
def func(arr):
return np.full_like(arr, 4.5, dtype='?')
self.check_like(func, np.float64)
def test_like_dtype_non_const_str_kwarg(self):
@njit
def func(arr, fv, dt):
return np.full_like(arr, fv, dt)
with self.assertRaises(TypingError) as raises:
func(np.ones(3,), 4.5, 'int32')
excstr = str(raises.exception)
msg = ("If np.full_like dtype is a string it must be a "
"string constant.")
self.assertIn(msg, excstr)
def test_like_dtype_invalid_str(self):
@njit
def func(arr, fv):
return np.full_like(arr, fv, "ABCDEF")
with self.assertRaises(TypingError) as raises:
func(np.ones(4), 3.4)
excstr = str(raises.exception)
self.assertIn("Invalid NumPy dtype specified: 'ABCDEF'", excstr)
| TestNdFullLike |
python | django-import-export__django-import-export | tests/core/tests/resources.py | {
"start": 1820,
"end": 2459
} | class ____(resources.ModelResource):
class Meta:
model = Author
@classmethod
def widget_from_django_field(cls, f, default=widgets.Widget):
if f.name == "name":
return HarshRussianWidget
result = default
internal_type = (
f.get_internal_type()
if callable(getattr(f, "get_internal_type", None))
else ""
)
if internal_type in cls.WIDGETS_MAP:
result = cls.WIDGETS_MAP[internal_type]
if isinstance(result, str):
result = getattr(cls, result)(f)
return result
| AuthorResourceWithCustomWidget |
python | giampaolo__psutil | tests/test_posix.py | {
"start": 17047,
"end": 17287
} | class ____(PsutilTestCase):
def test_getpagesize(self):
pagesize = psutil._psplatform.cext.getpagesize()
assert pagesize > 0
assert pagesize == resource.getpagesize()
assert pagesize == mmap.PAGESIZE
| TestMisc |
python | scipy__scipy | scipy/stats/_survival.py | {
"start": 7924,
"end": 17400
} | class ____:
""" Result object returned by `scipy.stats.ecdf`
Attributes
----------
cdf : `~scipy.stats._result_classes.EmpiricalDistributionFunction`
An object representing the empirical cumulative distribution function.
sf : `~scipy.stats._result_classes.EmpiricalDistributionFunction`
An object representing the complement of the empirical cumulative
distribution function.
"""
cdf: EmpiricalDistributionFunction
sf: EmpiricalDistributionFunction
def __init__(self, q, cdf, sf, n, d):
self.cdf = EmpiricalDistributionFunction(q, cdf, n, d, "cdf")
self.sf = EmpiricalDistributionFunction(q, sf, n, d, "sf")
def _iv_CensoredData(
sample: "npt.ArrayLike | CensoredData", param_name: str = "sample"
) -> CensoredData:
"""Attempt to convert `sample` to `CensoredData`."""
if not isinstance(sample, CensoredData):
try: # takes care of input standardization/validation
sample = CensoredData(uncensored=sample)
except ValueError as e:
message = str(e).replace('uncensored', param_name)
raise type(e)(message) from e
return sample
@xp_capabilities(np_only=True)
def ecdf(sample: "npt.ArrayLike | CensoredData") -> ECDFResult:
"""Empirical cumulative distribution function of a sample.
The empirical cumulative distribution function (ECDF) is a step function
estimate of the CDF of the distribution underlying a sample. This function
returns objects representing both the empirical distribution function and
its complement, the empirical survival function.
Parameters
----------
sample : 1D array_like or `scipy.stats.CensoredData`
Besides array_like, instances of `scipy.stats.CensoredData` containing
uncensored and right-censored observations are supported. Currently,
other instances of `scipy.stats.CensoredData` will result in a
``NotImplementedError``.
Returns
-------
res : `~scipy.stats._result_classes.ECDFResult`
An object with the following attributes.
cdf : `~scipy.stats._result_classes.EmpiricalDistributionFunction`
An object representing the empirical cumulative distribution
function.
sf : `~scipy.stats._result_classes.EmpiricalDistributionFunction`
An object representing the empirical survival function.
The `cdf` and `sf` attributes themselves have the following attributes.
quantiles : ndarray
The unique values in the sample that defines the empirical CDF/SF.
probabilities : ndarray
The point estimates of the probabilities corresponding with
`quantiles`.
And the following methods:
evaluate(x) :
Evaluate the CDF/SF at the argument.
plot(ax) :
Plot the CDF/SF on the provided axes.
confidence_interval(confidence_level=0.95) :
Compute the confidence interval around the CDF/SF at the values in
`quantiles`.
Notes
-----
When each observation of the sample is a precise measurement, the ECDF
steps up by ``1/len(sample)`` at each of the observations [1]_.
When observations are lower bounds, upper bounds, or both upper and lower
bounds, the data is said to be "censored", and `sample` may be provided as
an instance of `scipy.stats.CensoredData`.
For right-censored data, the ECDF is given by the Kaplan-Meier estimator
[2]_; other forms of censoring are not supported at this time.
Confidence intervals are computed according to the Greenwood formula or the
more recent "Exponential Greenwood" formula as described in [4]_.
References
----------
.. [1] Conover, William Jay. Practical nonparametric statistics. Vol. 350.
John Wiley & Sons, 1999.
.. [2] Kaplan, Edward L., and Paul Meier. "Nonparametric estimation from
incomplete observations." Journal of the American statistical
association 53.282 (1958): 457-481.
.. [3] Goel, Manish Kumar, Pardeep Khanna, and Jugal Kishore.
"Understanding survival analysis: Kaplan-Meier estimate."
International journal of Ayurveda research 1.4 (2010): 274.
.. [4] Sawyer, Stanley. "The Greenwood and Exponential Greenwood Confidence
Intervals in Survival Analysis."
https://www.math.wustl.edu/~sawyer/handouts/greenwood.pdf
Examples
--------
**Uncensored Data**
As in the example from [1]_ page 79, five boys were selected at random from
those in a single high school. Their one-mile run times were recorded as
follows.
>>> sample = [6.23, 5.58, 7.06, 6.42, 5.20] # one-mile run times (minutes)
The empirical distribution function, which approximates the distribution
function of one-mile run times of the population from which the boys were
sampled, is calculated as follows.
>>> from scipy import stats
>>> res = stats.ecdf(sample)
>>> res.cdf.quantiles
array([5.2 , 5.58, 6.23, 6.42, 7.06])
>>> res.cdf.probabilities
array([0.2, 0.4, 0.6, 0.8, 1. ])
To plot the result as a step function:
>>> import matplotlib.pyplot as plt
>>> ax = plt.subplot()
>>> res.cdf.plot(ax)
>>> ax.set_xlabel('One-Mile Run Time (minutes)')
>>> ax.set_ylabel('Empirical CDF')
>>> plt.show()
**Right-censored Data**
As in the example from [1]_ page 91, the lives of ten car fanbelts were
tested. Five tests concluded because the fanbelt being tested broke, but
the remaining tests concluded for other reasons (e.g. the study ran out of
funding, but the fanbelt was still functional). The mileage driven
with the fanbelts were recorded as follows.
>>> broken = [77, 47, 81, 56, 80] # in thousands of miles driven
>>> unbroken = [62, 60, 43, 71, 37]
Precise survival times of the fanbelts that were still functional at the
end of the tests are unknown, but they are known to exceed the values
recorded in ``unbroken``. Therefore, these observations are said to be
"right-censored", and the data is represented using
`scipy.stats.CensoredData`.
>>> sample = stats.CensoredData(uncensored=broken, right=unbroken)
The empirical survival function is calculated as follows.
>>> res = stats.ecdf(sample)
>>> res.sf.quantiles
array([37., 43., 47., 56., 60., 62., 71., 77., 80., 81.])
>>> res.sf.probabilities
array([1. , 1. , 0.875, 0.75 , 0.75 , 0.75 , 0.75 , 0.5 , 0.25 , 0. ])
To plot the result as a step function:
>>> ax = plt.subplot()
>>> res.sf.plot(ax)
>>> ax.set_xlabel('Fanbelt Survival Time (thousands of miles)')
>>> ax.set_ylabel('Empirical SF')
>>> plt.show()
"""
sample = _iv_CensoredData(sample)
if sample.num_censored() == 0:
res = _ecdf_uncensored(sample._uncensor())
elif sample.num_censored() == sample._right.size:
res = _ecdf_right_censored(sample)
else:
# Support additional censoring options in follow-up PRs
message = ("Currently, only uncensored and right-censored data is "
"supported.")
raise NotImplementedError(message)
t, cdf, sf, n, d = res
return ECDFResult(t, cdf, sf, n, d)
def _ecdf_uncensored(sample):
sample = np.sort(sample)
x, counts = np.unique(sample, return_counts=True)
# [1].81 "the fraction of [observations] that are less than or equal to x
events = np.cumsum(counts)
n = sample.size
cdf = events / n
# [1].89 "the relative frequency of the sample that exceeds x in value"
sf = 1 - cdf
at_risk = np.concatenate(([n], n - events[:-1]))
return x, cdf, sf, at_risk, counts
def _ecdf_right_censored(sample):
# It is conventional to discuss right-censored data in terms of
# "survival time", "death", and "loss" (e.g. [2]). We'll use that
# terminology here.
# This implementation was influenced by the references cited and also
# https://www.youtube.com/watch?v=lxoWsVco_iM
# https://en.wikipedia.org/wiki/Kaplan%E2%80%93Meier_estimator
# In retrospect it is probably most easily compared against [3].
# Ultimately, the data needs to be sorted, so this implementation is
# written to avoid a separate call to `unique` after sorting. In hope of
# better performance on large datasets, it also computes survival
# probabilities at unique times only rather than at each observation.
tod = sample._uncensored # time of "death"
tol = sample._right # time of "loss"
times = np.concatenate((tod, tol))
died = np.asarray([1]*tod.size + [0]*tol.size)
# sort by times
i = np.argsort(times)
times = times[i]
died = died[i]
at_risk = np.arange(times.size, 0, -1)
# logical indices of unique times
j = np.diff(times, prepend=-np.inf, append=np.inf) > 0
j_l = j[:-1] # first instances of unique times
j_r = j[1:] # last instances of unique times
# get number at risk and deaths at each unique time
t = times[j_l] # unique times
n = at_risk[j_l] # number at risk at each unique time
cd = np.cumsum(died)[j_r] # cumulative deaths up to/including unique times
d = np.diff(cd, prepend=0) # deaths at each unique time
# compute survival function
sf = np.cumprod((n - d) / n)
cdf = 1 - sf
return t, cdf, sf, n, d
@dataclass
| ECDFResult |
python | jazzband__django-model-utils | model_utils/fields.py | {
"start": 7100,
"end": 7963
} | class ____:
def __init__(self, instance: models.Model, field_name: str, excerpt_field_name: str):
# instead of storing actual values store a reference to the instance
# along with field names, this makes assignment possible
self.instance = instance
self.field_name = field_name
self.excerpt_field_name = excerpt_field_name
@property
def content(self) -> str:
return self.instance.__dict__[self.field_name]
@content.setter
def content(self, val: str) -> None:
setattr(self.instance, self.field_name, val)
@property
def excerpt(self) -> str:
return getattr(self.instance, self.excerpt_field_name)
@property
def has_more(self) -> bool:
return self.excerpt.strip() != self.content.strip()
def __str__(self) -> str:
return self.content
| SplitText |
python | kamyu104__LeetCode-Solutions | Python/sum-of-floored-pairs.py | {
"start": 97,
"end": 618
} | class ____(object):
def sumOfFlooredPairs(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
MOD = 10**9+7
prefix, counter = [0]*(max(nums)+1), collections.Counter(nums)
for num, cnt in counter.iteritems():
for j in xrange(num, len(prefix), num):
prefix[j] += counter[num]
for i in xrange(len(prefix)-1):
prefix[i+1] += prefix[i]
return reduce(lambda total, num: (total+prefix[num])%MOD, nums, 0)
| Solution |
python | scipy__scipy | scipy/spatial/tests/test_kdtree.py | {
"start": 12974,
"end": 13114
} | class ____(_Test_random_ball):
def setup_method(self):
super().setup_method()
self.p = 1
@KDTreeTest
| _Test_random_ball_l1 |
python | apache__airflow | providers/apache/pinot/tests/integration/apache/pinot/hooks/test_pinot.py | {
"start": 970,
"end": 1551
} | class ____:
# This test occasionally fail in the CI. Re-run this test if it failed after timeout but only once.
@pytest.mark.flaky(reruns=1, reruns_delay=30)
@mock.patch.dict("os.environ", AIRFLOW_CONN_PINOT_BROKER_DEFAULT="pinot://pinot:8000/")
def test_should_return_records(self):
hook = PinotDbApiHook()
sql = "select playerName from baseballStats ORDER BY playerName limit 5"
records = hook.get_records(sql)
assert records == [["A. Harry"], ["A. Harry"], ["Aaron"], ["Aaron Albert"], ["Aaron Albert"]]
| TestPinotDbApiHookIntegration |
python | numba__numba | numba/core/datamodel/models.py | {
"start": 24386,
"end": 24853
} | class ____(StructModel):
def __init__(self, dmm, fe_type):
payload_type = types.ListPayload(fe_type)
members = [
# The meminfo data points to a ListPayload
('meminfo', types.MemInfoPointer(payload_type)),
# This member is only used only for reflected lists
('parent', types.pyobject),
]
super(ListModel, self).__init__(dmm, fe_type, members)
@register_default(types.ListIter)
| ListModel |
python | ray-project__ray | rllib/offline/estimators/weighted_importance_sampling.py | {
"start": 516,
"end": 7047
} | class ____(OffPolicyEstimator):
r"""The step-wise WIS estimator.
Let s_t, a_t, and r_t be the state, action, and reward at timestep t.
For behavior policy \pi_b and evaluation policy \pi_e, define the
cumulative importance ratio at timestep t as:
p_t = \sum_{t'=0}^t (\pi_e(a_{t'} | s_{t'}) / \pi_b(a_{t'} | s_{t'})).
Define the average importance ratio over episodes i in the dataset D as:
w_t = \sum_{i \in D} p^(i)_t / |D|
This estimator computes the expected return for \pi_e for an episode as:
V^{\pi_e}(s_0) = \E[\sum_t \gamma ^ {t} * (p_t / w_t) * r_t]
and returns the mean and standard deviation over episodes.
For more information refer to https://arxiv.org/pdf/1911.06854.pdf"""
@override(OffPolicyEstimator)
def __init__(self, policy: Policy, gamma: float, epsilon_greedy: float = 0.0):
super().__init__(policy, gamma, epsilon_greedy)
# map from time to cummulative propensity values
self.cummulative_ips_values = []
# map from time to number of episodes that reached this time
self.episode_timestep_count = []
# map from eps id to mapping from time to propensity values
self.p = {}
@override(OffPolicyEstimator)
def estimate_on_single_episode(self, episode: SampleBatch) -> Dict[str, Any]:
estimates_per_epsiode = {}
rewards = episode["rewards"]
eps_id = episode[SampleBatch.EPS_ID][0]
if eps_id not in self.p:
raise ValueError(
f"Cannot find target weight for episode {eps_id}. "
f"Did it go though the peek_on_single_episode() function?"
)
# calculate stepwise weighted IS estimate
v_behavior = 0.0
v_target = 0.0
episode_p = self.p[eps_id]
for t in range(episode.count):
v_behavior += rewards[t] * self.gamma**t
w_t = self.cummulative_ips_values[t] / self.episode_timestep_count[t]
v_target += episode_p[t] / w_t * rewards[t] * self.gamma**t
estimates_per_epsiode["v_behavior"] = v_behavior
estimates_per_epsiode["v_target"] = v_target
return estimates_per_epsiode
@override(OffPolicyEstimator)
def estimate_on_single_step_samples(
self, batch: SampleBatch
) -> Dict[str, List[float]]:
estimates_per_epsiode = {}
rewards, old_prob = batch["rewards"], batch["action_prob"]
new_prob = self.compute_action_probs(batch)
weights = new_prob / old_prob
v_behavior = rewards
v_target = weights * rewards / np.mean(weights)
estimates_per_epsiode["v_behavior"] = v_behavior
estimates_per_epsiode["v_target"] = v_target
estimates_per_epsiode["weights"] = weights
estimates_per_epsiode["new_prob"] = new_prob
estimates_per_epsiode["old_prob"] = old_prob
return estimates_per_epsiode
@override(OffPolicyEstimator)
def on_before_split_batch_by_episode(
self, sample_batch: SampleBatch
) -> SampleBatch:
self.cummulative_ips_values = []
self.episode_timestep_count = []
self.p = {}
return sample_batch
@override(OffPolicyEstimator)
def peek_on_single_episode(self, episode: SampleBatch) -> None:
old_prob = episode["action_prob"]
new_prob = self.compute_action_probs(episode)
# calculate importance ratios
episode_p = []
for t in range(episode.count):
if t == 0:
pt_prev = 1.0
else:
pt_prev = episode_p[t - 1]
episode_p.append(pt_prev * new_prob[t] / old_prob[t])
for t, p_t in enumerate(episode_p):
if t >= len(self.cummulative_ips_values):
self.cummulative_ips_values.append(p_t)
self.episode_timestep_count.append(1.0)
else:
self.cummulative_ips_values[t] += p_t
self.episode_timestep_count[t] += 1.0
eps_id = episode[SampleBatch.EPS_ID][0]
if eps_id in self.p:
raise ValueError(
f"eps_id {eps_id} was already passed to the peek function. "
f"Make sure dataset contains only unique episodes with unique ids."
)
self.p[eps_id] = episode_p
@override(OfflineEvaluator)
def estimate_on_dataset(
self, dataset: Dataset, *, n_parallelism: int = ...
) -> Dict[str, Any]:
"""Computes the weighted importance sampling estimate on a dataset.
Note: This estimate works for both continuous and discrete action spaces.
Args:
dataset: Dataset to compute the estimate on. Each record in dataset should
include the following columns: `obs`, `actions`, `action_prob` and
`rewards`. The `obs` on each row shoud be a vector of D dimensions.
n_parallelism: Number of parallel workers to use for the computation.
Returns:
Dictionary with the following keys:
v_target: The weighted importance sampling estimate.
v_behavior: The behavior policy estimate.
v_gain_mean: The mean of the gain of the target policy over the
behavior policy.
v_gain_ste: The standard error of the gain of the target policy over
the behavior policy.
"""
# compute the weights and weighted rewards
batch_size = max(dataset.count() // n_parallelism, 1)
dataset = dataset.map_batches(
remove_time_dim, batch_size=batch_size, batch_format="pandas"
)
updated_ds = dataset.map_batches(
compute_is_weights,
batch_size=batch_size,
batch_format="pandas",
fn_kwargs={
"policy_state": self.policy.get_state(),
"estimator_class": self.__class__,
},
)
v_target = updated_ds.mean("weighted_rewards") / updated_ds.mean("weights")
v_behavior = updated_ds.mean("rewards")
v_gain_mean = v_target / v_behavior
v_gain_ste = (
updated_ds.std("weighted_rewards")
/ updated_ds.mean("weights")
/ v_behavior
/ math.sqrt(dataset.count())
)
return {
"v_target": v_target,
"v_behavior": v_behavior,
"v_gain_mean": v_gain_mean,
"v_gain_ste": v_gain_ste,
}
| WeightedImportanceSampling |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli/api_layer/schemas/asset.py | {
"start": 987,
"end": 1264
} | class ____(BaseModel):
"""Asset checks execution status."""
status: Optional[str] # HEALTHY, WARNING, DEGRADED, UNKNOWN, NOT_APPLICABLE
num_failed_checks: Optional[int]
num_warning_checks: Optional[int]
total_num_checks: Optional[int]
| DgApiAssetChecksStatus |
python | getsentry__sentry | src/sentry/replays/lib/eap/snuba_transpiler.py | {
"start": 31561,
"end": 33942
} | class ____(TypedDict):
data: list[dict[str, bool | float | int | str | None]]
meta: QueryResultMeta
def translate_response(
query: Query, settings: Settings, query_result: TraceItemTableResponse
) -> QueryResult:
# We infer the type of each expression in the select statement. The type information is used
# to extract the value from the response object.
type_map = {label(expr): type_infer(expr, settings) for expr in query.select}
def get_value(name: str, result: AttributeValue) -> bool | float | int | str | None:
"""Return the query result's value using type inference."""
if result.is_null:
return None
typ_ = type_map[name]
if typ_ == bool:
return result.val_bool
if typ_ == float:
return result.val_double
if typ_ == int:
return result.val_int
if typ_ == str:
return result.val_str
else:
return None
if len(query_result.column_values) > 0:
data_len = len(query_result.column_values[0].results)
else:
data_len = 0
response: QueryResult = {
"data": [{} for _ in range(data_len)],
"meta": {
"downsampling_mode": {
"can_go_to_higher_accuracy": query_result.meta.downsampled_storage_meta.can_go_to_higher_accuracy_tier,
"estimated_rows": query_result.meta.downsampled_storage_meta.estimated_num_rows,
},
"next_offset": query_result.page_token.offset,
"request_id": query_result.meta.request_id,
},
}
# I'm assuming that all the columns return an identical number of results. As far as I know
# this is a safe assumption.
for c in query_result.column_values:
for i, result in enumerate(c.results):
response["data"][i][c.attribute_name] = get_value(c.attribute_name, result)
return response
def type_infer(
expression: AliasedExpression | Column | CurriedFunction | Function, settings: Settings
) -> type[bool | float | int | str]:
"""Infer the type of the expression."""
if isinstance(expression, Column):
return settings["attribute_types"][expression.name]
elif isinstance(expression, AliasedExpression):
return settings["attribute_types"][expression.exp.name]
else:
return float
| QueryResult |
python | pytorch__pytorch | torch/_dynamo/output_graph.py | {
"start": 14750,
"end": 18064
} | class ____(OutputGraphGuardsState):
"""
A minimal interface for full graph capture. It is intended to be
the target of any tracer that feeds into backends.
Currently dynamo's OutputGraph is the only known implementation
of this interface, used by (aot) precompile and (strict) export.
Importantly, that implementation also contains many other fields
that are using during tracing but not included in this interface
because they are not used once tracing is complete.
It should be safe to assume that (caching) precompile also uses
this interface.
In the future, we want make_fx, used by (non-strict) export, to
also implement this interface.
The serializable part of this interface is OutputGraphGuardsState.
We do not need to serialize other parts; however it will pay to
be disciplined about what those other parts are, especially since
we want other tracers to be able to meaningfully implement them,
and we should generally try to cut them down when possible.
"""
def __init__(
self,
output_graph_guards_state: OutputGraphGuardsState,
import_sources: Optional[dict[str, str]] = None,
shape_env: Optional[ShapeEnv] = None,
export_metadata: Optional[ExportMetaData] = None,
tracked_fakes_id_to_source: Optional[dict[int, list[Source]]] = None,
):
super().__init__(
output_graph_guards_state.local_scope,
output_graph_guards_state.global_scope,
output_graph_guards_state.torch_function_mode_stack,
output_graph_guards_state.guard_on_key_order,
output_graph_guards_state.input_source_to_sizes_strides,
output_graph_guards_state.dual_level,
output_graph_guards_state.functorch_layers,
output_graph_guards_state.current_device,
output_graph_guards_state.global_state_guard,
output_graph_guards_state._guards,
output_graph_guards_state._aotautograd_guards,
output_graph_guards_state.export,
output_graph_guards_state.skip_guards_check,
output_graph_guards_state.export_constraints,
output_graph_guards_state.name_of_builtins_dict_key_in_fglobals,
)
self.import_sources = import_sources or {}
# The following fields are currently known to be used by clients.
# In particular, we need:
# - shape_env, for building guards
# - export_metadata, for un/flattening inputs and outputs
# - tracked_fakes_id_to_source, for processing tensor dim constraints
self._shape_env = shape_env or ShapeEnv() # private for inheritance
self.export_metadata = export_metadata or ExportMetaData()
self.tracked_fakes_id_to_source: dict[int, list[Source]] = (
tracked_fakes_id_to_source or {}
)
@property
def shape_env(self) -> ShapeEnv:
return self._shape_env
def bypass_package(self, reason: str = "", **kwargs: Any) -> None:
# NOTE: currently there are no tests for this but it is reachable
# when building guards, so technically necessary to include here.
# It is unclear whether we should include packaging altogether.
raise NotImplementedError
| OutputGraphCommon |
python | kamyu104__LeetCode-Solutions | Python/pour-water-between-buckets-to-make-water-levels-equal.py | {
"start": 49,
"end": 697
} | class ____(object):
def equalizeWater(self, buckets, loss):
"""
:type buckets: List[int]
:type loss: int
:rtype: float
"""
def check(buckets, rate, x):
return sum(b-x for b in buckets if b-x > 0)*rate >= sum(x-b for b in buckets if x-b > 0)
EPS = 1e-5
rate = (100-loss)/100.0
left, right = float(min(buckets)), float(sum(buckets))/len(buckets)
while right-left > EPS:
mid = left + (right-left)/2
if not check(buckets, rate, mid):
right = mid
else:
left = mid
return left
| Solution |
python | ApeWorX__ape | src/ape/exceptions.py | {
"start": 15198,
"end": 15450
} | class ____(ProjectError):
"""
Raised when trying to install an unknown version of a package.
"""
def __init__(self, version: str, name: str):
super().__init__(f"Unknown version '{version}' for repo '{name}'.")
| UnknownVersionError |
python | eth-brownie__brownie | brownie/test/managers/runner.py | {
"start": 3765,
"end": 4693
} | class ____:
"""
Custom printer for test execution.
Produces more-readable output when stdout capture is disabled.
"""
_builtins_print = builtins.print
def start(self):
self.first_line = True
builtins.print = self
def __call__(self, *values, sep=" ", end="\n", file=sys.stdout, flush=False):
if file != sys.stdout:
self._builtins_print(*values, sep=sep, end=end, file=file, flush=flush)
return
if self.first_line:
self.first_line = False
sys.stdout.write(f"{yellow}RUNNING{color}\n")
text = f"{sep.join(str(i) for i in values)}{end}"
sys.stdout.write(text)
if flush:
sys.stdout.flush()
def finish(self, nodeid):
if not self.first_line:
sys.stdout.write(f"{nodeid} ")
sys.stdout.flush()
builtins.print = self._builtins_print
| PytestPrinter |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/errors.py | {
"start": 16730,
"end": 17455
} | class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneError,)
name = "DuplicateDynamicPartitionError"
partitions_def_name = graphene.NonNull(graphene.String)
partition_name = graphene.NonNull(graphene.String)
def __init__(self, partitions_def_name, partition_name):
super().__init__()
self.partitions_def_name = check.str_param(partitions_def_name, "partitions_def_name")
self.partition_name = check.str_param(partition_name, "partition_name")
self.message = (
f"Partition {self.partition_name} already exists in dynamic partitions definition"
f" {self.partitions_def_name}."
)
| GrapheneDuplicateDynamicPartitionError |
python | PyCQA__pylint | tests/functional/u/unused/unused_private_member.py | {
"start": 8587,
"end": 8886
} | class ____:
__ham = 1
def method(self):
print(self.__class__.__ham)
# https://github.com/pylint-dev/pylint/issues/4756
# Check for false positives emitted when private functions are not referenced in the class body
# with standard calls but passed as arguments to other functions.
| Foo |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-pgvector/destination_pgvector/common/sql/sql_processor.py | {
"start": 3707,
"end": 34182
} | class ____(RecordProcessorBase):
"""A base class to be used for SQL Caches."""
type_converter_class: type[SQLTypeConverter] = SQLTypeConverter
"""The type converter class to use for converting JSON schema types to SQL types."""
normalizer = LowerCaseNormalizer
"""The name normalizer to user for table and column name normalization."""
file_writer_class: type[FileWriterBase]
"""The file writer class to use for writing files to the cache."""
supports_merge_insert = False
"""True if the database supports the MERGE INTO syntax."""
# Constructor:
def __init__(
self,
*,
sql_config: SqlConfig,
catalog_provider: CatalogProvider,
state_writer: StateWriterBase | None = None,
file_writer: FileWriterBase | None = None,
temp_dir: Path | None = None,
temp_file_cleanup: bool,
) -> None:
if not temp_dir and not file_writer:
raise exc.PyAirbyteInternalError(
message="Either `temp_dir` or `file_writer` must be provided.",
)
state_writer = state_writer or StdOutStateWriter()
self._sql_config: SqlConfig = sql_config
super().__init__(
state_writer=state_writer,
catalog_provider=catalog_provider,
)
self.file_writer = file_writer or self.file_writer_class(
cache_dir=cast(Path, temp_dir),
cleanup=temp_file_cleanup,
)
self.type_converter = self.type_converter_class()
self._cached_table_definitions: dict[str, sqlalchemy.Table] = {}
self._ensure_schema_exists()
# Public interface:
@property
def sql_config(self) -> SqlConfig:
return self._sql_config
def get_sql_alchemy_url(self) -> SecretString:
"""Return the SQLAlchemy URL to use."""
return self.sql_config.get_sql_alchemy_url()
@final
@cached_property
def database_name(self) -> str:
"""Return the name of the database."""
return self.sql_config.get_database_name()
@final
def get_sql_engine(self) -> Engine:
"""Return a new SQL engine to use."""
return self.sql_config.get_sql_engine()
@contextmanager
def get_sql_connection(self) -> Generator[sqlalchemy.engine.Connection, None, None]:
"""A context manager which returns a new SQL connection for running queries.
If the connection needs to close, it will be closed automatically.
"""
with self.get_sql_engine().begin() as connection:
self._init_connection_settings(connection)
yield connection
connection.close()
del connection
def get_sql_table_name(
self,
stream_name: str,
) -> str:
"""Return the name of the SQL table for the given stream."""
table_prefix = self.sql_config.table_prefix
# TODO: Add default prefix based on the source name.
return self.normalizer.normalize(
f"{table_prefix}{stream_name}",
)
@final
def get_sql_table(
self,
stream_name: str,
) -> sqlalchemy.Table:
"""Return the main table object for the stream."""
return self._get_table_by_name(
self.get_sql_table_name(stream_name),
)
# Record processing:
def process_record_message(
self,
record_msg: AirbyteRecordMessage,
stream_schema: dict,
) -> None:
"""Write a record to the cache.
This method is called for each record message, before the batch is written.
In most cases, the SQL processor will not perform any action, but will pass this along to to
the file processor.
"""
self.file_writer.process_record_message(
record_msg,
stream_schema=stream_schema,
)
# Protected members (non-public interface):
def _init_connection_settings(self, connection: Connection) -> None:
"""This is called automatically whenever a new connection is created.
By default this is a no-op. Subclasses can use this to set connection settings, such as
timezone, case-sensitivity settings, and other session-level variables.
"""
pass
def _invalidate_table_cache(
self,
table_name: str,
) -> None:
"""Invalidate the the named table cache.
This should be called whenever the table schema is known to have changed.
"""
if table_name in self._cached_table_definitions:
del self._cached_table_definitions[table_name]
def _get_table_by_name(
self,
table_name: str,
*,
force_refresh: bool = False,
shallow_okay: bool = False,
) -> sqlalchemy.Table:
"""Return a table object from a table name.
If 'shallow_okay' is True, the table will be returned without requiring properties to
be read from the database.
To prevent unnecessary round-trips to the database, the table is cached after the first
query. To ignore the cache and force a refresh, set 'force_refresh' to True.
"""
if force_refresh and shallow_okay:
raise exc.PyAirbyteInternalError(
message="Cannot force refresh and use shallow query at the same time."
)
if force_refresh and table_name in self._cached_table_definitions:
self._invalidate_table_cache(table_name)
if table_name not in self._cached_table_definitions:
if shallow_okay:
# Return a shallow instance, without column declarations. Do not cache
# the table definition in this case.
return sqlalchemy.Table(
table_name,
sqlalchemy.MetaData(schema=self.sql_config.schema_name),
)
self._cached_table_definitions[table_name] = sqlalchemy.Table(
table_name,
sqlalchemy.MetaData(schema=self.sql_config.schema_name),
autoload_with=self.get_sql_engine(),
)
return self._cached_table_definitions[table_name]
def _ensure_schema_exists(
self,
) -> None:
"""Return a new (unique) temporary table name."""
schema_name = self.sql_config.schema_name
if schema_name in self._get_schemas_list():
return
sql = f"CREATE SCHEMA IF NOT EXISTS {schema_name}"
try:
self._execute_sql(sql)
except Exception as ex:
# Ignore schema exists errors.
if "already exists" not in str(ex):
raise
if DEBUG_MODE:
found_schemas = self._get_schemas_list()
assert schema_name in found_schemas, (
f"Schema {schema_name} was not created. Found: {found_schemas}"
)
def _quote_identifier(self, identifier: str) -> str:
"""Return the given identifier, quoted."""
return f'"{identifier}"'
@final
def _get_temp_table_name(
self,
stream_name: str,
batch_id: str | None = None, # ULID of the batch
) -> str:
"""Return a new (unique) temporary table name."""
batch_id = batch_id or str(ulid.ULID())
return self.normalizer.normalize(f"{stream_name}_{batch_id}")
def _fully_qualified(
self,
table_name: str,
) -> str:
"""Return the fully qualified name of the given table."""
return f"{self.sql_config.schema_name}.{self._quote_identifier(table_name)}"
@final
def _create_table_for_loading(
self,
/,
stream_name: str,
batch_id: str,
) -> str:
"""Create a new table for loading data."""
temp_table_name = self._get_temp_table_name(stream_name, batch_id)
column_definition_str = ",\n ".join(
f"{self._quote_identifier(column_name)} {sql_type}"
for column_name, sql_type in self._get_sql_column_definitions(stream_name).items()
)
self._create_table(temp_table_name, column_definition_str)
return temp_table_name
def _get_tables_list(
self,
) -> list[str]:
"""Return a list of all tables in the database."""
with self.get_sql_connection() as conn:
inspector: Inspector = sqlalchemy.inspect(conn)
return inspector.get_table_names(schema=self.sql_config.schema_name)
def _get_schemas_list(
self,
database_name: str | None = None,
) -> list[str]:
"""Return a list of all tables in the database."""
inspector: Inspector = sqlalchemy.inspect(self.get_sql_engine())
database_name = database_name or self.database_name
found_schemas = inspector.get_schema_names()
return [
found_schema.split(".")[-1].strip('"')
for found_schema in found_schemas
if "." not in found_schema
or (found_schema.split(".")[0].lower().strip('"') == database_name.lower())
]
def _ensure_final_table_exists(
self,
stream_name: str,
*,
create_if_missing: bool = True,
) -> str:
"""Create the final table if it doesn't already exist.
Return the table name.
"""
table_name = self.get_sql_table_name(stream_name)
did_exist = self._table_exists(table_name)
if not did_exist and create_if_missing:
column_definition_str = ",\n ".join(
f"{self._quote_identifier(column_name)} {sql_type}"
for column_name, sql_type in self._get_sql_column_definitions(
stream_name,
).items()
)
self._create_table(table_name, column_definition_str)
return table_name
def _ensure_compatible_table_schema(
self,
stream_name: str,
table_name: str,
) -> None:
"""Return true if the given table is compatible with the stream's schema.
Raises an exception if the table schema is not compatible with the schema of the
input stream.
TODO:
- Expand this to check for column types and sizes.
"""
self._add_missing_columns_to_table(
stream_name=stream_name,
table_name=table_name,
)
@final
def _create_table(
self,
table_name: str,
column_definition_str: str,
primary_keys: list[str] | None = None,
) -> None:
if primary_keys:
pk_str = ", ".join(primary_keys)
column_definition_str += f",\n PRIMARY KEY ({pk_str})"
cmd = f"""
CREATE TABLE {self._fully_qualified(table_name)} (
{column_definition_str}
)
"""
_ = self._execute_sql(cmd)
def _get_sql_column_definitions(
self,
stream_name: str,
) -> dict[str, sqlalchemy.types.TypeEngine]:
"""Return the column definitions for the given stream."""
columns: dict[str, sqlalchemy.types.TypeEngine] = {}
properties = self.catalog_provider.get_stream_properties(stream_name)
for property_name, json_schema_property_def in properties.items():
clean_prop_name = self.normalizer.normalize(property_name)
columns[clean_prop_name] = self.type_converter.to_sql_type(
json_schema_property_def,
)
columns[AB_RAW_ID_COLUMN] = self.type_converter_class.get_string_type()
columns[AB_EXTRACTED_AT_COLUMN] = sqlalchemy.TIMESTAMP()
columns[AB_META_COLUMN] = self.type_converter_class.get_json_type()
return columns
@final
def write_stream_data(
self,
stream_name: str,
write_strategy: WriteStrategy,
) -> list[BatchHandle]:
"""Finalize all uncommitted batches.
This is a generic 'final' SQL implementation, which should not be overridden.
Returns a mapping of batch IDs to batch handles, for those processed batches.
TODO: Add a dedupe step here to remove duplicates from the temp table.
Some sources will send us duplicate records within the same stream,
although this is a fairly rare edge case we can ignore in V1.
"""
# Flush any pending writes
self.file_writer.flush_active_batches()
with self.finalizing_batches(stream_name) as batches_to_finalize:
# Make sure the target schema and target table exist.
self._ensure_schema_exists()
final_table_name = self._ensure_final_table_exists(
stream_name,
create_if_missing=True,
)
if not batches_to_finalize:
# If there are no batches to finalize, return after ensuring the table exists.
return []
files: list[Path] = []
# Get a list of all files to finalize from all pending batches.
for batch_handle in batches_to_finalize:
files += batch_handle.files
# Use the max batch ID as the batch ID for table names.
max_batch_id = max(batch.batch_id for batch in batches_to_finalize)
temp_table_name = self._write_files_to_new_table(
files=files,
stream_name=stream_name,
batch_id=max_batch_id,
)
try:
self._write_temp_table_to_final_table(
stream_name=stream_name,
temp_table_name=temp_table_name,
final_table_name=final_table_name,
write_strategy=write_strategy,
)
finally:
self._drop_temp_table(temp_table_name, if_exists=True)
progress.log_stream_finalized(stream_name)
# Return the batch handles as measure of work completed.
return batches_to_finalize
@final
def cleanup_all(self) -> None:
"""Clean resources."""
self.file_writer.cleanup_all()
# Finalizing context manager
@final
@contextlib.contextmanager
def finalizing_batches(
self,
stream_name: str,
) -> Generator[list[BatchHandle], str, None]:
"""Context manager to use for finalizing batches, if applicable.
Returns a mapping of batch IDs to batch handles, for those processed batches.
"""
batches_to_finalize: list[BatchHandle] = self.file_writer.get_pending_batches(stream_name)
state_messages_to_finalize: list[AirbyteStateMessage] = self._pending_state_messages[
stream_name
].copy()
self._pending_state_messages[stream_name].clear()
progress.log_batches_finalizing(stream_name, len(batches_to_finalize))
yield batches_to_finalize
self._finalize_state_messages(state_messages_to_finalize)
progress.log_batches_finalized(stream_name, len(batches_to_finalize))
for batch_handle in batches_to_finalize:
batch_handle.finalized = True
self._finalized_state_messages[stream_name] += state_messages_to_finalize
def _execute_sql(self, sql: str | TextClause | Executable) -> CursorResult:
"""Execute the given SQL statement."""
if isinstance(sql, str):
sql = text(sql)
if isinstance(sql, TextClause):
sql = sql.execution_options(
autocommit=True,
)
with self.get_sql_connection() as conn:
try:
result = conn.execute(sql)
except (
sqlalchemy.exc.ProgrammingError,
sqlalchemy.exc.SQLAlchemyError,
) as ex:
msg = f"Error when executing SQL:\n{sql}\n{type(ex).__name__}{ex!s}"
raise SQLRuntimeError(msg) from None # from ex
return result
def _drop_temp_table(
self,
table_name: str,
*,
if_exists: bool = True,
) -> None:
"""Drop the given table."""
exists_str = "IF EXISTS" if if_exists else ""
self._execute_sql(f"DROP TABLE {exists_str} {self._fully_qualified(table_name)}")
def _write_files_to_new_table(
self,
files: list[Path],
stream_name: str,
batch_id: str,
) -> str:
"""Write a file(s) to a new table.
This is a generic implementation, which can be overridden by subclasses
to improve performance.
"""
temp_table_name = self._create_table_for_loading(stream_name, batch_id)
for file_path in files:
dataframe = pd.read_json(file_path, lines=True)
sql_column_definitions: dict[str, TypeEngine] = self._get_sql_column_definitions(
stream_name
)
# Remove fields that are not in the schema
for col_name in dataframe.columns:
if col_name not in sql_column_definitions:
dataframe = dataframe.drop(columns=col_name)
# Pandas will auto-create the table if it doesn't exist, which we don't want.
if not self._table_exists(temp_table_name):
raise exc.PyAirbyteInternalError(
message="Table does not exist after creation.",
context={
"temp_table_name": temp_table_name,
},
)
# Normalize all column names to lower case.
dataframe.columns = Index([self.normalizer.normalize(col) for col in dataframe.columns])
# Write the data to the table.
dataframe.to_sql(
temp_table_name,
self.get_sql_alchemy_url(),
schema=self.sql_config.schema_name,
if_exists="append",
index=False,
dtype=sql_column_definitions,
)
return temp_table_name
def _add_column_to_table(
self,
table: Table,
column_name: str,
column_type: sqlalchemy.types.TypeEngine,
) -> None:
"""Add a column to the given table."""
self._execute_sql(
text(
f"ALTER TABLE {self._fully_qualified(table.name)} "
f"ADD COLUMN {column_name} {column_type}"
),
)
def _add_missing_columns_to_table(
self,
stream_name: str,
table_name: str,
) -> None:
"""Add missing columns to the table.
This is a no-op if all columns are already present.
"""
columns = self._get_sql_column_definitions(stream_name)
# First check without forcing a refresh of the cache (faster). If nothing is missing,
# then we're done.
table = self._get_table_by_name(
table_name,
force_refresh=False,
)
missing_columns: bool = any(column_name not in table.columns for column_name in columns)
if missing_columns:
# If we found missing columns, refresh the cache and then take action on anything
# that's still confirmed missing.
columns_added = False
table = self._get_table_by_name(
table_name,
force_refresh=True,
)
for column_name, column_type in columns.items():
if column_name not in table.columns:
self._add_column_to_table(table, column_name, column_type)
columns_added = True
if columns_added:
# We've added columns, so invalidate the cache.
self._invalidate_table_cache(table_name)
@final
def _write_temp_table_to_final_table(
self,
stream_name: str,
temp_table_name: str,
final_table_name: str,
write_strategy: WriteStrategy,
) -> None:
"""Write the temp table into the final table using the provided write strategy."""
has_pks: bool = bool(self._get_primary_keys(stream_name))
has_incremental_key: bool = bool(self._get_incremental_key(stream_name))
if write_strategy == WriteStrategy.MERGE and not has_pks:
raise exc.PyAirbyteInputError(
message="Cannot use merge strategy on a stream with no primary keys.",
context={
"stream_name": stream_name,
},
)
if write_strategy == WriteStrategy.AUTO:
configured_destination_sync_mode: DestinationSyncMode = (
self.catalog_provider.get_destination_sync_mode(stream_name)
)
if configured_destination_sync_mode == DestinationSyncMode.overwrite:
write_strategy = WriteStrategy.REPLACE
elif configured_destination_sync_mode == DestinationSyncMode.append:
write_strategy = WriteStrategy.APPEND
elif configured_destination_sync_mode == DestinationSyncMode.append_dedup:
write_strategy = WriteStrategy.MERGE
# TODO: Consider removing the rest of these cases if they are dead code.
elif has_pks:
write_strategy = WriteStrategy.MERGE
elif has_incremental_key:
write_strategy = WriteStrategy.APPEND
else:
write_strategy = WriteStrategy.REPLACE
if write_strategy == WriteStrategy.REPLACE:
# Note: No need to check for schema compatibility
# here, because we are fully replacing the table.
self._swap_temp_table_with_final_table(
stream_name=stream_name,
temp_table_name=temp_table_name,
final_table_name=final_table_name,
)
return
if write_strategy == WriteStrategy.APPEND:
self._ensure_compatible_table_schema(
stream_name=stream_name,
table_name=final_table_name,
)
self._append_temp_table_to_final_table(
stream_name=stream_name,
temp_table_name=temp_table_name,
final_table_name=final_table_name,
)
return
if write_strategy == WriteStrategy.MERGE:
self._ensure_compatible_table_schema(
stream_name=stream_name,
table_name=final_table_name,
)
if not self.supports_merge_insert:
# Fallback to emulated merge if the database does not support merge natively.
self._emulated_merge_temp_table_to_final_table(
stream_name=stream_name,
temp_table_name=temp_table_name,
final_table_name=final_table_name,
)
return
self._merge_temp_table_to_final_table(
stream_name=stream_name,
temp_table_name=temp_table_name,
final_table_name=final_table_name,
)
return
raise exc.PyAirbyteInternalError(
message="Write strategy is not supported.",
context={
"write_strategy": write_strategy,
},
)
def _append_temp_table_to_final_table(
self,
temp_table_name: str,
final_table_name: str,
stream_name: str,
) -> None:
nl = "\n"
columns = [self._quote_identifier(c) for c in self._get_sql_column_definitions(stream_name)]
self._execute_sql(
f"""
INSERT INTO {self._fully_qualified(final_table_name)} (
{f",{nl} ".join(columns)}
)
SELECT
{f",{nl} ".join(columns)}
FROM {self._fully_qualified(temp_table_name)}
""",
)
def _get_primary_keys(
self,
stream_name: str,
) -> list[str]:
pks = self.catalog_provider.get_configured_stream_info(stream_name).primary_key
if not pks:
return []
joined_pks = [".".join(pk) for pk in pks]
for pk in joined_pks:
if "." in pk:
msg = f"Nested primary keys are not yet supported. Found: {pk}"
raise NotImplementedError(msg)
return joined_pks
def _get_incremental_key(
self,
stream_name: str,
) -> str | None:
return self.catalog_provider.get_configured_stream_info(stream_name).cursor_field
def _swap_temp_table_with_final_table(
self,
stream_name: str,
temp_table_name: str,
final_table_name: str,
) -> None:
"""Merge the temp table into the main one.
This implementation requires MERGE support in the SQL DB.
Databases that do not support this syntax can override this method.
"""
if final_table_name is None:
raise exc.PyAirbyteInternalError(message="Arg 'final_table_name' cannot be None.")
if temp_table_name is None:
raise exc.PyAirbyteInternalError(message="Arg 'temp_table_name' cannot be None.")
_ = stream_name
deletion_name = f"{final_table_name}_deleteme"
commands = "\n".join([
f"ALTER TABLE {self._fully_qualified(final_table_name)} RENAME TO {deletion_name};",
f"ALTER TABLE {self._fully_qualified(temp_table_name)} RENAME TO {final_table_name};",
f"DROP TABLE {self._fully_qualified(deletion_name)};",
])
self._execute_sql(commands)
def _merge_temp_table_to_final_table(
self,
stream_name: str,
temp_table_name: str,
final_table_name: str,
) -> None:
"""Merge the temp table into the main one.
This implementation requires MERGE support in the SQL DB.
Databases that do not support this syntax can override this method.
"""
nl = "\n"
columns = {self._quote_identifier(c) for c in self._get_sql_column_definitions(stream_name)}
pk_columns = {self._quote_identifier(c) for c in self._get_primary_keys(stream_name)}
non_pk_columns = columns - pk_columns
join_clause = f"{nl} AND ".join(f"tmp.{pk_col} = final.{pk_col}" for pk_col in pk_columns)
set_clause = f"{nl} , ".join(f"{col} = tmp.{col}" for col in non_pk_columns)
self._execute_sql(
f"""
MERGE INTO {self._fully_qualified(final_table_name)} final
USING (
SELECT *
FROM {self._fully_qualified(temp_table_name)}
) AS tmp
ON {join_clause}
WHEN MATCHED THEN UPDATE
SET
{set_clause}
WHEN NOT MATCHED THEN INSERT
(
{f",{nl} ".join(columns)}
)
VALUES (
tmp.{f",{nl} tmp.".join(columns)}
);
""",
)
def _get_column_by_name(self, table: str | Table, column_name: str) -> Column:
"""Return the column object for the given column name.
This method is case-insensitive.
"""
if isinstance(table, str):
table = self._get_table_by_name(table)
try:
# Try to get the column in a case-insensitive manner
return next(col for col in table.c if col.name.lower() == column_name.lower())
except StopIteration:
raise exc.PyAirbyteInternalError(
message="Could not find matching column.",
context={
"table": table,
"column_name": column_name,
},
) from None
def _emulated_merge_temp_table_to_final_table(
self,
stream_name: str,
temp_table_name: str,
final_table_name: str,
) -> None:
"""Emulate the merge operation using a series of SQL commands.
This is a fallback implementation for databases that do not support MERGE.
"""
final_table = self._get_table_by_name(final_table_name)
temp_table = self._get_table_by_name(temp_table_name)
pk_columns = self._get_primary_keys(stream_name)
columns_to_update: set[str] = self._get_sql_column_definitions(
stream_name=stream_name
).keys() - set(pk_columns)
# Create a dictionary mapping columns in users_final to users_stage for updating
update_values = {
self._get_column_by_name(final_table, column): (
self._get_column_by_name(temp_table, column)
)
for column in columns_to_update
}
# Craft the WHERE clause for composite primary keys
join_conditions = [
self._get_column_by_name(final_table, pk_column)
== self._get_column_by_name(temp_table, pk_column)
for pk_column in pk_columns
]
join_clause = and_(*join_conditions)
# Craft the UPDATE statement
update_stmt = update(final_table).values(update_values).where(join_clause)
# Define a join between temp_table and final_table
joined_table = temp_table.outerjoin(final_table, join_clause)
# Define a condition that checks for records in temp_table that do not have a corresponding
# record in final_table
where_not_exists_clause = self._get_column_by_name(final_table, pk_columns[0]) == null()
# Select records from temp_table that are not in final_table
select_new_records_stmt = (
select([temp_table]).select_from(joined_table).where(where_not_exists_clause)
)
# Craft the INSERT statement using the select statement
insert_new_records_stmt = insert(final_table).from_select(
names=[column.name for column in temp_table.columns], select=select_new_records_stmt
)
if DEBUG_MODE:
print(str(update_stmt))
print(str(insert_new_records_stmt))
with self.get_sql_connection() as conn:
conn.execute(update_stmt)
conn.execute(insert_new_records_stmt)
def _table_exists(
self,
table_name: str,
) -> bool:
"""Return true if the given table exists.
Subclasses may override this method to provide a more efficient implementation.
"""
return table_name in self._get_tables_list()
| SqlProcessorBase |
python | Lightning-AI__lightning | src/lightning/fabric/plugins/precision/precision.py | {
"start": 1359,
"end": 5592
} | class ____:
"""Base class for all plugins handling the precision-specific parts of the training.
The class attribute precision must be overwritten in child classes. The default value reflects fp32 training.
"""
precision: _PRECISION_INPUT_STR = "32-true"
def convert_module(self, module: Module) -> Module:
"""Convert the module parameters to the precision type this plugin handles.
This is optional and depends on the precision limitations during optimization.
"""
return module
def tensor_init_context(self) -> AbstractContextManager:
"""Controls how tensors get created (device, dtype)."""
return nullcontext()
def module_init_context(self) -> AbstractContextManager:
"""Instantiate module parameters or tensors in the precision type this plugin handles.
This is optional and depends on the precision limitations during optimization.
"""
return nullcontext()
def forward_context(self) -> AbstractContextManager:
"""A contextmanager for managing model forward/training_step/evaluation_step/predict_step."""
return nullcontext()
def convert_input(self, data: Any) -> Any:
"""Convert model inputs (forward) to the floating point precision type of this plugin.
This is a no-op in the base precision plugin, since we assume the data already has the desired type (default is
torch.float32).
"""
return data
def convert_output(self, data: Any) -> Any:
"""Convert outputs to the floating point precision type expected after model's forward.
This is a no-op in the base precision plugin, since we assume the data already has the desired type (default is
torch.float32).
"""
return data
def pre_backward(self, tensor: Tensor, module: Optional[Module]) -> Any:
"""Runs before precision plugin executes backward.
Args:
tensor: The tensor that will be used for backpropagation
module: The module that was involved in producing the tensor and whose parameters need the gradients
"""
def backward(self, tensor: Tensor, model: Optional[Module], *args: Any, **kwargs: Any) -> None:
"""Performs the actual backpropagation.
Args:
tensor: The tensor that will be used for backpropagation
model: The module that was involved in producing the tensor and whose parameters need the gradients
"""
tensor.backward(*args, **kwargs)
def post_backward(self, tensor: Tensor, module: Optional[Module]) -> Any:
"""Runs after precision plugin executes backward.
Args:
tensor: The tensor that will be used for backpropagation
module: The module that was involved in producing the tensor and whose parameters need the gradients
"""
def optimizer_step(
self,
optimizer: Optimizable,
**kwargs: Any,
) -> Any:
"""Hook to run the optimizer step."""
return optimizer.step(**kwargs)
def main_params(self, optimizer: Optimizer) -> _PARAMETERS:
"""The main params of the model.
Returns the plain model params here. Maybe different in other precision plugins.
"""
for group in optimizer.param_groups:
yield from group["params"]
def unscale_gradients(self, optimizer: Optimizer) -> None:
return
def state_dict(self) -> dict[str, Any]:
"""Called when saving a checkpoint, implement to generate precision plugin state_dict.
Returns:
A dictionary containing precision plugin state.
"""
return {}
def load_state_dict(self, state_dict: dict[str, Any]) -> None:
"""Called when loading a checkpoint, implement to reload precision plugin state given precision plugin
state_dict.
Args:
state_dict: the precision plugin state returned by ``state_dict``.
"""
pass
def teardown(self) -> None:
"""This method is called to teardown the training process.
It is the right place to release memory and free other resources.
"""
| Precision |
python | sympy__sympy | sympy/utilities/codegen.py | {
"start": 18557,
"end": 30697
} | class ____:
"""Abstract class for the code generators."""
printer = None # will be set to an instance of a CodePrinter subclass
def _indent_code(self, codelines):
return self.printer.indent_code(codelines)
def _printer_method_with_settings(self, method, settings=None, *args, **kwargs):
settings = settings or {}
ori = {k: self.printer._settings[k] for k in settings}
for k, v in settings.items():
self.printer._settings[k] = v
result = getattr(self.printer, method)(*args, **kwargs)
for k, v in ori.items():
self.printer._settings[k] = v
return result
def _get_symbol(self, s):
"""Returns the symbol as fcode prints it."""
if self.printer._settings['human']:
expr_str = self.printer.doprint(s)
else:
constants, not_supported, expr_str = self.printer.doprint(s)
if constants or not_supported:
raise ValueError("Failed to print %s" % str(s))
return expr_str.strip()
def __init__(self, project="project", cse=False):
"""Initialize a code generator.
Derived classes will offer more options that affect the generated
code.
"""
self.project = project
self.cse = cse
def routine(self, name, expr, argument_sequence=None, global_vars=None):
"""Creates an Routine object that is appropriate for this language.
This implementation is appropriate for at least C/Fortran. Subclasses
can override this if necessary.
Here, we assume at most one return value (the l-value) which must be
scalar. Additional outputs are OutputArguments (e.g., pointers on
right-hand-side or pass-by-reference). Matrices are always returned
via OutputArguments. If ``argument_sequence`` is None, arguments will
be ordered alphabetically, but with all InputArguments first, and then
OutputArgument and InOutArguments.
"""
if self.cse:
from sympy.simplify.cse_main import cse
if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)):
if not expr:
raise ValueError("No expression given")
for e in expr:
if not e.is_Equality:
raise CodeGenError("Lists of expressions must all be Equalities. {} is not.".format(e))
# create a list of right hand sides and simplify them
rhs = [e.rhs for e in expr]
common, simplified = cse(rhs)
# pack the simplified expressions back up with their left hand sides
expr = [Equality(e.lhs, rhs) for e, rhs in zip(expr, simplified)]
else:
if isinstance(expr, Equality):
common, simplified = cse(expr.rhs) #, ignore=in_out_args)
expr = Equality(expr.lhs, simplified[0])
else:
common, simplified = cse(expr)
expr = simplified
local_vars = [Result(b,a) for a,b in common]
local_symbols = {a for a,_ in common}
local_expressions = Tuple(*[b for _,b in common])
else:
local_expressions = Tuple()
if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)):
if not expr:
raise ValueError("No expression given")
expressions = Tuple(*expr)
else:
expressions = Tuple(expr)
if self.cse:
if {i.label for i in expressions.atoms(Idx)} != set():
raise CodeGenError("CSE and Indexed expressions do not play well together yet")
else:
# local variables for indexed expressions
local_vars = {i.label for i in expressions.atoms(Idx)}
local_symbols = local_vars
# global variables
global_vars = set() if global_vars is None else set(global_vars)
# symbols that should be arguments
symbols = (expressions.free_symbols | local_expressions.free_symbols) - local_symbols - global_vars
new_symbols = set()
new_symbols.update(symbols)
for symbol in symbols:
if isinstance(symbol, Idx):
new_symbols.remove(symbol)
new_symbols.update(symbol.args[1].free_symbols)
if isinstance(symbol, Indexed):
new_symbols.remove(symbol)
symbols = new_symbols
# Decide whether to use output argument or return value
return_val = []
output_args = []
for expr in expressions:
if isinstance(expr, Equality):
out_arg = expr.lhs
expr = expr.rhs
if isinstance(out_arg, Indexed):
dims = tuple([ (S.Zero, dim - 1) for dim in out_arg.shape])
symbol = out_arg.base.label
elif isinstance(out_arg, Symbol):
dims = []
symbol = out_arg
elif isinstance(out_arg, MatrixSymbol):
dims = tuple([ (S.Zero, dim - 1) for dim in out_arg.shape])
symbol = out_arg
else:
raise CodeGenError("Only Indexed, Symbol, or MatrixSymbol "
"can define output arguments.")
if expr.has(symbol):
output_args.append(
InOutArgument(symbol, out_arg, expr, dimensions=dims))
else:
output_args.append(
OutputArgument(symbol, out_arg, expr, dimensions=dims))
# remove duplicate arguments when they are not local variables
if symbol not in local_vars:
# avoid duplicate arguments
symbols.remove(symbol)
elif isinstance(expr, (ImmutableMatrix, MatrixSlice)):
# Create a "dummy" MatrixSymbol to use as the Output arg
out_arg = MatrixSymbol('out_%s' % abs(hash(expr)), *expr.shape)
dims = tuple([(S.Zero, dim - 1) for dim in out_arg.shape])
output_args.append(
OutputArgument(out_arg, out_arg, expr, dimensions=dims))
else:
return_val.append(Result(expr))
arg_list = []
# setup input argument list
# helper to get dimensions for data for array-like args
def dimensions(s):
return [(S.Zero, dim - 1) for dim in s.shape]
array_symbols = {}
for array in expressions.atoms(Indexed) | local_expressions.atoms(Indexed):
array_symbols[array.base.label] = array
for array in expressions.atoms(MatrixSymbol) | local_expressions.atoms(MatrixSymbol):
array_symbols[array] = array
for symbol in sorted(symbols, key=str):
if symbol in array_symbols:
array = array_symbols[symbol]
metadata = {'dimensions': dimensions(array)}
else:
metadata = {}
arg_list.append(InputArgument(symbol, **metadata))
output_args.sort(key=lambda x: str(x.name))
arg_list.extend(output_args)
if argument_sequence is not None:
# if the user has supplied IndexedBase instances, we'll accept that
new_sequence = []
for arg in argument_sequence:
if isinstance(arg, IndexedBase):
new_sequence.append(arg.label)
else:
new_sequence.append(arg)
argument_sequence = new_sequence
missing = [x for x in arg_list if x.name not in argument_sequence]
if missing:
msg = "Argument list didn't specify: {0} "
msg = msg.format(", ".join([str(m.name) for m in missing]))
raise CodeGenArgumentListError(msg, missing)
# create redundant arguments to produce the requested sequence
name_arg_dict = {x.name: x for x in arg_list}
new_args = []
for symbol in argument_sequence:
try:
new_args.append(name_arg_dict[symbol])
except KeyError:
if isinstance(symbol, (IndexedBase, MatrixSymbol)):
metadata = {'dimensions': dimensions(symbol)}
else:
metadata = {}
new_args.append(InputArgument(symbol, **metadata))
arg_list = new_args
return Routine(name, arg_list, return_val, local_vars, global_vars)
def write(self, routines, prefix, to_files=False, header=True, empty=True):
"""Writes all the source code files for the given routines.
The generated source is returned as a list of (filename, contents)
tuples, or is written to files (see below). Each filename consists
of the given prefix, appended with an appropriate extension.
Parameters
==========
routines : list
A list of Routine instances to be written
prefix : string
The prefix for the output files
to_files : bool, optional
When True, the output is written to files. Otherwise, a list
of (filename, contents) tuples is returned. [default: False]
header : bool, optional
When True, a header comment is included on top of each source
file. [default: True]
empty : bool, optional
When True, empty lines are included to structure the source
files. [default: True]
"""
if to_files:
for dump_fn in self.dump_fns:
filename = "%s.%s" % (prefix, dump_fn.extension)
with open(filename, "w") as f:
dump_fn(self, routines, f, prefix, header, empty)
else:
result = []
for dump_fn in self.dump_fns:
filename = "%s.%s" % (prefix, dump_fn.extension)
contents = StringIO()
dump_fn(self, routines, contents, prefix, header, empty)
result.append((filename, contents.getvalue()))
return result
def dump_code(self, routines, f, prefix, header=True, empty=True):
"""Write the code by calling language specific methods.
The generated file contains all the definitions of the routines in
low-level code and refers to the header file if appropriate.
Parameters
==========
routines : list
A list of Routine instances.
f : file-like
Where to write the file.
prefix : string
The filename prefix, used to refer to the proper header file.
Only the basename of the prefix is used.
header : bool, optional
When True, a header comment is included on top of each source
file. [default : True]
empty : bool, optional
When True, empty lines are included to structure the source
files. [default : True]
"""
code_lines = self._preprocessor_statements(prefix)
for routine in routines:
if empty:
code_lines.append("\n")
code_lines.extend(self._get_routine_opening(routine))
code_lines.extend(self._declare_arguments(routine))
code_lines.extend(self._declare_globals(routine))
code_lines.extend(self._declare_locals(routine))
if empty:
code_lines.append("\n")
code_lines.extend(self._call_printer(routine))
if empty:
code_lines.append("\n")
code_lines.extend(self._get_routine_ending(routine))
code_lines = self._indent_code(''.join(code_lines))
if header:
code_lines = ''.join(self._get_header() + [code_lines])
if code_lines:
f.write(code_lines)
| CodeGen |
python | matplotlib__matplotlib | lib/matplotlib/backends/_backend_gtk.py | {
"start": 2623,
"end": 3707
} | class ____(TimerBase):
"""Subclass of `.TimerBase` using GTK timer events."""
def __init__(self, *args, **kwargs):
self._timer = None
super().__init__(*args, **kwargs)
def _timer_start(self):
# Need to stop it, otherwise we potentially leak a timer id that will
# never be stopped.
self._timer_stop()
self._timer = GLib.timeout_add(self._interval, self._on_timer)
def _timer_stop(self):
if self._timer is not None:
GLib.source_remove(self._timer)
self._timer = None
def _timer_set_interval(self):
# Only stop and restart it if the timer has already been started.
if self._timer is not None:
self._timer_stop()
self._timer_start()
def _on_timer(self):
super()._on_timer()
# Gtk timeout_add() requires that the callback returns True if it
# is to be called again.
if self.callbacks and not self._single:
return True
else:
self._timer = None
return False
| TimerGTK |
python | sympy__sympy | sympy/functions/special/polynomials.py | {
"start": 28821,
"end": 32927
} | class ____(DefinedFunction):
r"""
``assoc_legendre(n, m, x)`` gives $P_n^m(x)$, where $n$ and $m$ are
the degree and order or an expression which is related to the nth
order Legendre polynomial, $P_n(x)$ in the following manner:
.. math::
P_n^m(x) = (-1)^m (1 - x^2)^{\frac{m}{2}}
\frac{\mathrm{d}^m P_n(x)}{\mathrm{d} x^m}
Explanation
===========
Associated Legendre polynomials are orthogonal on $[-1, 1]$ with:
- weight $= 1$ for the same $m$ and different $n$.
- weight $= \frac{1}{1-x^2}$ for the same $n$ and different $m$.
Examples
========
>>> from sympy import assoc_legendre
>>> from sympy.abc import x, m, n
>>> assoc_legendre(0,0, x)
1
>>> assoc_legendre(1,0, x)
x
>>> assoc_legendre(1,1, x)
-sqrt(1 - x**2)
>>> assoc_legendre(n,m,x)
assoc_legendre(n, m, x)
See Also
========
jacobi, gegenbauer,
chebyshevt, chebyshevt_root, chebyshevu, chebyshevu_root,
legendre,
hermite, hermite_prob,
laguerre, assoc_laguerre,
sympy.polys.orthopolys.jacobi_poly
sympy.polys.orthopolys.gegenbauer_poly
sympy.polys.orthopolys.chebyshevt_poly
sympy.polys.orthopolys.chebyshevu_poly
sympy.polys.orthopolys.hermite_poly
sympy.polys.orthopolys.hermite_prob_poly
sympy.polys.orthopolys.legendre_poly
sympy.polys.orthopolys.laguerre_poly
References
==========
.. [1] https://en.wikipedia.org/wiki/Associated_Legendre_polynomials
.. [2] https://mathworld.wolfram.com/LegendrePolynomial.html
.. [3] https://functions.wolfram.com/Polynomials/LegendreP/
.. [4] https://functions.wolfram.com/Polynomials/LegendreP2/
"""
@classmethod
def _eval_at_order(cls, n, m):
P = legendre_poly(n, _x, polys=True).diff((_x, m))
return S.NegativeOne**m * (1 - _x**2)**Rational(m, 2) * P.as_expr()
@classmethod
def eval(cls, n, m, x):
if m.could_extract_minus_sign():
# P^{-m}_n ---> F * P^m_n
return S.NegativeOne**(-m) * (factorial(m + n)/factorial(n - m)) * assoc_legendre(n, -m, x)
if m == 0:
# P^0_n ---> L_n
return legendre(n, x)
if x == 0:
return 2**m*sqrt(S.Pi) / (gamma((1 - m - n)/2)*gamma(1 - (m - n)/2))
if n.is_Number and m.is_Number and n.is_integer and m.is_integer:
if n.is_negative:
raise ValueError("%s : 1st index must be nonnegative integer (got %r)" % (cls, n))
if abs(m) > n:
raise ValueError("%s : abs('2nd index') must be <= '1st index' (got %r, %r)" % (cls, n, m))
return cls._eval_at_order(int(n), abs(int(m))).subs(_x, x)
def fdiff(self, argindex=3):
if argindex == 1:
# Diff wrt n
raise ArgumentIndexError(self, argindex)
elif argindex == 2:
# Diff wrt m
raise ArgumentIndexError(self, argindex)
elif argindex == 3:
# Diff wrt x
# Find better formula, this is unsuitable for x = 1
n, m, x = self.args
return 1/(x**2 - 1)*(x*n*assoc_legendre(n, m, x) - (m + n)*assoc_legendre(n - 1, m, x))
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_Sum(self, n, m, x, **kwargs):
from sympy.concrete.summations import Sum
k = Dummy("k")
kern = factorial(2*n - 2*k)/(2**n*factorial(n - k)*factorial(
k)*factorial(n - 2*k - m))*S.NegativeOne**k*x**(n - m - 2*k)
return (1 - x**2)**(m/2) * Sum(kern, (k, 0, floor((n - m)*S.Half)))
def _eval_rewrite_as_polynomial(self, n, m, x, **kwargs):
# This function is just kept for backwards compatibility
# but should not be used
return self._eval_rewrite_as_Sum(n, m, x, **kwargs)
def _eval_conjugate(self):
n, m, x = self.args
return self.func(n, m.conjugate(), x.conjugate())
#----------------------------------------------------------------------------
# Hermite polynomials
#
| assoc_legendre |
python | google__pytype | pytype/pytd/parse/node.py | {
"start": 663,
"end": 8252
} | class ____(
_Struct,
frozen=True,
tag=True,
tag_field="_struct_type",
kw_only=True,
omit_defaults=True,
cache_hash=True,
):
"""Base Node class."""
# We pretend that `name` is a ClassVar so that msgspec treats it as a struct
# field only when it is defined in a subclass.
name: ClassVar[str] = ""
def __iter__(self):
for name in self.__struct_fields__:
yield getattr(self, name)
def _ToTuple(self):
"""Returns a tuple of the stringified fields of self as a sort key."""
return tuple((x.__class__.__name__, str(x)) for x in self)
def __lt__(self, other):
"""Smaller than other node? Define so we can have deterministic ordering."""
if self is other:
return False
elif self.__class__ is other.__class__:
return tuple.__lt__(self._ToTuple(), other._ToTuple())
else:
return self.__class__.__name__ < other.__class__.__name__
def __gt__(self, other):
"""Larger than other node? Define so we can have deterministic ordering."""
if self is other:
return False
elif self.__class__ is other.__class__:
return tuple.__gt__(self._ToTuple(), other._ToTuple())
else:
return self.__class__.__name__ > other.__class__.__name__
def __le__(self, other):
return self == other or self < other
def __ge__(self, other):
return self == other or self > other
def IterChildren(self):
for name in self.__struct_fields__:
yield name, getattr(self, name)
def Visit(self, visitor, *args, **kwargs):
"""Visitor interface for transforming a tree of nodes to a new tree.
You can pass a visitor, and callback functions on that visitor will be
called for all nodes in the tree. Note that nodes are also allowed to
be stored in lists and as the values of dictionaries, as long as these
lists/dictionaries are stored in the named fields of the Node class.
It's possible to overload the Visit function on Nodes, to do your own
processing.
Arguments:
visitor: An instance of a visitor for this tree. For every node type you
want to transform, this visitor implements a "Visit<Classname>"
function named after the class of the node this function should
target. Note that <Classname> is the *actual* class of the node, so
if you subclass a Node class, visitors for the superclasses will *not*
be triggered anymore. Also, visitor callbacks are only triggered
for subclasses of Node.
*args: Passed to the visitor callback.
**kwargs: Passed to the visitor callback.
Returns:
Transformed version of this node.
"""
return _Visit(self, visitor, *args, **kwargs)
def Replace(self, **kwargs):
return msgspec.structs.replace(self, **kwargs)
# The set of visitor names currently being processed.
_visiting = set()
def _Visit(node, visitor, *args, **kwargs):
"""Visit the node."""
name = type(visitor).__name__
recursive = name in _visiting
_visiting.add(name)
start = metrics.get_cpu_clock()
try:
return _VisitNode(node, visitor, *args, **kwargs)
finally:
if not recursive:
_visiting.remove(name)
elapsed = metrics.get_cpu_clock() - start
metrics.get_metric("visit_" + name, metrics.Distribution).add(elapsed)
if _visiting:
metrics.get_metric(
"visit_nested_" + name, metrics.Distribution).add(elapsed)
def _VisitNode(node, visitor, *args, **kwargs):
"""Transform a node and all its children using a visitor.
This will iterate over all children of this node, and also process certain
things that are not nodes. The latter are either tuples, which will have their
elements visited, or primitive types, which will be returned as-is.
Args:
node: The node to transform. Either an actual instance of Node, or a
tuple found while scanning a node tree, or any other type (which will
be returned unmodified).
visitor: The visitor to apply. If this visitor has a "Visit<Name>" method,
with <Name> the name of the Node class, a callback will be triggered,
and the transformed version of this node will be whatever the callback
returned. Before calling the Visit callback, the following
attribute(s) on the Visitor class will be populated:
visitor.old_node: The node before the child nodes were visited.
Additionally, if the visitor has a "Enter<Name>" method, that method
will be called on the original node before descending into it. If
"Enter<Name>" returns False, the visitor will not visit children of
this node. If "Enter<name>" returns a set of field names, those field
names will not be visited. Otherwise, "Enter<Name>" should return
None, to indicate that nodes will be visited normally.
"Enter<Name>" is called pre-order; "Visit<Name> and "Leave<Name>" are
called post-order. A counterpart to "Enter<Name>" is "Leave<Name>",
which is intended for any clean-up that "Enter<Name>" needs (other
than that, it's redundant, and could be combined with "Visit<Name>").
*args: Passed to visitor callbacks.
**kwargs: Passed to visitor callbacks.
Returns:
The transformed Node (which *may* be the original node but could be a new
node, even if the contents are the same).
"""
node_class = node.__class__
if node_class is tuple:
changed = False
new_children = []
for child in node:
new_child = _VisitNode(child, visitor, *args, **kwargs)
if new_child is not child:
changed = True
new_children.append(new_child)
if changed:
# Since some of our children changed, instantiate a new node.
return node_class(new_children)
else:
# Optimization: if we didn't change any of the children, keep the entire
# object the same.
return node
elif not isinstance(node, Node):
return node
# At this point, assume node is a Node.
node_class_name = node_class.__name__
if node_class_name not in visitor.visit_class_names:
return node
skip_children = set()
if node_class_name in visitor.enter_functions:
# The visitor wants to be informed that we're descending into this part
# of the tree.
status = visitor.Enter(node, *args, **kwargs)
if status is False: # pylint: disable=g-bool-id-comparison
# Don't descend if Enter<Node> explicitly returns False, but not None,
# since None is the default return of Python functions.
return node
elif isinstance(status, set):
# If we are given a set of field names, do not visit those fields
skip_children = status
else:
# Any other value returned from Enter is ignored, so check:
assert status is None, repr((node_class_name, status))
changed = False
new_children = []
for name, child in node.IterChildren():
if name in skip_children:
new_child = child
else:
new_child = _VisitNode(child, visitor, *args, **kwargs)
if new_child is not child:
changed = True
new_children.append(new_child)
if changed:
new_node = node_class(*new_children)
else:
new_node = node
visitor.old_node = node
# Now call the user supplied callback(s), if they exist.
if (visitor.visits_all_node_types or
node_class_name in visitor.visit_functions):
new_node = visitor.Visit(new_node, *args, **kwargs)
if node_class_name in visitor.leave_functions:
visitor.Leave(node, *args, **kwargs)
del visitor.old_node
return new_node
| Node |
python | rushter__MLAlgorithms | mla/base/base.py | {
"start": 36,
"end": 1826
} | class ____:
y_required = True
fit_required = True
def _setup_input(self, X, y=None):
"""Ensure inputs to an estimator are in the expected format.
Ensures X and y are stored as numpy ndarrays by converting from an
array-like object if necessary. Enables estimators to define whether
they require a set of y target values or not with y_required, e.g.
kmeans clustering requires no target labels and is fit against only X.
Parameters
----------
X : array-like
Feature dataset.
y : array-like
Target values. By default is required, but if y_required = false
then may be omitted.
"""
if not isinstance(X, np.ndarray):
X = np.array(X)
if X.size == 0:
raise ValueError("Got an empty matrix.")
if X.ndim == 1:
self.n_samples, self.n_features = 1, X.shape
else:
self.n_samples, self.n_features = X.shape[0], np.prod(X.shape[1:])
self.X = X
if self.y_required:
if y is None:
raise ValueError("Missed required argument y")
if not isinstance(y, np.ndarray):
y = np.array(y)
if y.size == 0:
raise ValueError("The targets array must be no-empty.")
self.y = y
def fit(self, X, y=None):
self._setup_input(X, y)
def predict(self, X=None):
if not isinstance(X, np.ndarray):
X = np.array(X)
if self.X is not None or not self.fit_required:
return self._predict(X)
else:
raise ValueError("You must call `fit` before `predict`")
def _predict(self, X=None):
raise NotImplementedError()
| BaseEstimator |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/enum6.py | {
"start": 940,
"end": 986
} | class ____(EnumWithValue):
z: int
| EnumSubclass |
python | kamyu104__LeetCode-Solutions | Python/find-the-safest-path-in-a-grid.py | {
"start": 2662,
"end": 4425
} | class ____(object):
def maximumSafenessFactor(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
DIRECTIONS = ((1, 0), (0, 1), (-1, 0), (0, -1))
def bfs():
dist = [[0 if grid[r][c] == 1 else -1 for c in xrange(len(grid[0]))] for r in xrange(len(grid))]
q = [(r, c) for r in xrange(len(grid)) for c in xrange(len(grid[0])) if grid[r][c]]
d = 0
while q:
new_q = []
for r, c in q:
for dr, dc in DIRECTIONS:
nr, nc = r+dr, c+dc
if not (0 <= nr < len(dist) and 0 <= nc < len(dist[0]) and dist[nr][nc] == -1):
continue
dist[nr][nc] = d+1
new_q.append((nr, nc))
q = new_q
d += 1
return dist
def dijkstra(start, target):
max_heap = [(-dist[start[0]][start[1]], start)]
dist[start[0]][start[1]] = -1
while max_heap:
curr, u = heapq.heappop(max_heap)
curr = -curr
if u == target:
return curr
for dr, dc in DIRECTIONS:
nr, nc = u[0]+dr, u[1]+dc
if not (0 <= nr < len(dist) and 0 <= nc < len(dist[0]) and dist[nr][nc] != -1):
continue
heapq.heappush(max_heap, (-min(curr, dist[nr][nc]), (nr, nc)))
dist[nr][nc] = -1
return -1
dist = bfs()
return dijkstra(dist, (0, 0), (len(grid)-1, len(grid[0])-1))
# Time: O(n^2 * logn)
# Space: O(n^2)
import heapq
# bfs, binary search
| Solution2 |
python | django__django | tests/mail/tests.py | {
"start": 124598,
"end": 125427
} | class ____(SMTPBackendTestsBase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.backend = smtp.EmailBackend(username="", password="")
cls.smtp_controller.stop()
@classmethod
def stop_smtp(cls):
# SMTP controller is stopped in setUpClass().
pass
def test_server_stopped(self):
"""
Closing the backend while the SMTP server is stopped doesn't raise an
exception.
"""
self.backend.close()
def test_fail_silently_on_connection_error(self):
"""
A socket connection error is silenced with fail_silently=True.
"""
with self.assertRaises(ConnectionError):
self.backend.open()
self.backend.fail_silently = True
self.backend.open()
| SMTPBackendStoppedServerTests |
python | tensorflow__tensorflow | tensorflow/python/training/training.py | {
"start": 15028,
"end": 15653
} | class ____(typing.NamedTuple):
context: Dict[str, Feature]
feature_lists: FeatureLists
```
This proto implements the `List[Feature]` portion.
"""
FeatureLists.__doc__ = """\
Mainly used as part of a `tf.train.SequenceExample`.
Contains a list of `tf.train.Feature`s.
The `tf.train.SequenceExample` proto can be thought of as a
proto implementation of the following python type:
```
# tf.train.Feature
Feature = Union[List[bytes],
List[int64],
List[float]]
# tf.train.FeatureList
FeatureList = List[Feature]
# tf.train.FeatureLists
FeatureLists = Dict[str, FeatureList]
| SequenceExample |
python | plotly__plotly.py | plotly/graph_objs/_carpet.py | {
"start": 215,
"end": 39833
} | class ____(_BaseTraceType):
_parent_path_str = ""
_path_str = "carpet"
_valid_props = {
"a",
"a0",
"aaxis",
"asrc",
"b",
"b0",
"baxis",
"bsrc",
"carpet",
"cheaterslope",
"color",
"customdata",
"customdatasrc",
"da",
"db",
"font",
"ids",
"idssrc",
"legend",
"legendgrouptitle",
"legendrank",
"legendwidth",
"meta",
"metasrc",
"name",
"opacity",
"stream",
"type",
"uid",
"uirevision",
"visible",
"x",
"xaxis",
"xsrc",
"y",
"yaxis",
"ysrc",
"zorder",
}
@property
def a(self):
"""
An array containing values of the first parameter value
The 'a' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["a"]
@a.setter
def a(self, val):
self["a"] = val
@property
def a0(self):
"""
Alternate to `a`. Builds a linear space of a coordinates. Use
with `da` where `a0` is the starting coordinate and `da` the
step.
The 'a0' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["a0"]
@a0.setter
def a0(self, val):
self["a0"] = val
@property
def aaxis(self):
"""
The 'aaxis' property is an instance of Aaxis
that may be specified as:
- An instance of :class:`plotly.graph_objs.carpet.Aaxis`
- A dict of string/value properties that will be passed
to the Aaxis constructor
Returns
-------
plotly.graph_objs.carpet.Aaxis
"""
return self["aaxis"]
@aaxis.setter
def aaxis(self, val):
self["aaxis"] = val
@property
def asrc(self):
"""
Sets the source reference on Chart Studio Cloud for `a`.
The 'asrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["asrc"]
@asrc.setter
def asrc(self, val):
self["asrc"] = val
@property
def b(self):
"""
A two dimensional array of y coordinates at each carpet point.
The 'b' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["b"]
@b.setter
def b(self, val):
self["b"] = val
@property
def b0(self):
"""
Alternate to `b`. Builds a linear space of a coordinates. Use
with `db` where `b0` is the starting coordinate and `db` the
step.
The 'b0' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["b0"]
@b0.setter
def b0(self, val):
self["b0"] = val
@property
def baxis(self):
"""
The 'baxis' property is an instance of Baxis
that may be specified as:
- An instance of :class:`plotly.graph_objs.carpet.Baxis`
- A dict of string/value properties that will be passed
to the Baxis constructor
Returns
-------
plotly.graph_objs.carpet.Baxis
"""
return self["baxis"]
@baxis.setter
def baxis(self, val):
self["baxis"] = val
@property
def bsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `b`.
The 'bsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bsrc"]
@bsrc.setter
def bsrc(self, val):
self["bsrc"] = val
@property
def carpet(self):
"""
An identifier for this carpet, so that `scattercarpet` and
`contourcarpet` traces can specify a carpet plot on which they
lie
The 'carpet' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["carpet"]
@carpet.setter
def carpet(self, val):
self["carpet"] = val
@property
def cheaterslope(self):
"""
The shift applied to each successive row of data in creating a
cheater plot. Only used if `x` is been omitted.
The 'cheaterslope' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cheaterslope"]
@cheaterslope.setter
def cheaterslope(self, val):
self["cheaterslope"] = val
@property
def color(self):
"""
Sets default for all colors associated with this axis all at
once: line, font, tick, and grid colors. Grid color is
lightened by blending this with the plot background Individual
pieces can override this.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`customdata`.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
@property
def da(self):
"""
Sets the a coordinate step. See `a0` for more info.
The 'da' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["da"]
@da.setter
def da(self, val):
self["da"] = val
@property
def db(self):
"""
Sets the b coordinate step. See `b0` for more info.
The 'db' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["db"]
@db.setter
def db(self, val):
self["db"] = val
@property
def font(self):
"""
The default font used for axis & tick labels on this carpet
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.carpet.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.carpet.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ids`.
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
@property
def legend(self):
"""
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2", "legend3",
etc. Settings for these legends are set in the layout, under
`layout.legend`, `layout.legend2`, etc.
The 'legend' property is an identifier of a particular
subplot, of type 'legend', that may be specified as the string 'legend'
optionally followed by an integer >= 1
(e.g. 'legend', 'legend1', 'legend2', 'legend3', etc.)
Returns
-------
str
"""
return self["legend"]
@legend.setter
def legend(self, val):
self["legend"] = val
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.carpet.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Returns
-------
plotly.graph_objs.carpet.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
"reversed" `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items. When
having unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and layout.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
@property
def legendwidth(self):
"""
Sets the width (in px or fraction) of the legend for this
trace.
The 'legendwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["legendwidth"]
@legendwidth.setter
def legendwidth(self, val):
self["legendwidth"] = val
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for `meta`.
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
@property
def name(self):
"""
Sets the trace name. The trace name appears as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.carpet.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Returns
-------
plotly.graph_objs.carpet.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def x(self):
"""
A two dimensional array of x coordinates at each carpet point.
If omitted, the plot is a cheater plot and the xaxis is hidden
by default.
The 'x' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xaxis(self):
"""
Sets a reference between this trace's x coordinates and a 2D
cartesian x axis. If "x" (the default value), the x coordinates
refer to `layout.xaxis`. If "x2", the x coordinates refer to
`layout.xaxis2`, and so on.
The 'xaxis' property is an identifier of a particular
subplot, of type 'x', that may be specified as the string 'x'
optionally followed by an integer >= 1
(e.g. 'x', 'x1', 'x2', 'x3', etc.)
Returns
-------
str
"""
return self["xaxis"]
@xaxis.setter
def xaxis(self, val):
self["xaxis"] = val
@property
def xsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `x`.
The 'xsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["xsrc"]
@xsrc.setter
def xsrc(self, val):
self["xsrc"] = val
@property
def y(self):
"""
A two dimensional array of y coordinates at each carpet point.
The 'y' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def yaxis(self):
"""
Sets a reference between this trace's y coordinates and a 2D
cartesian y axis. If "y" (the default value), the y coordinates
refer to `layout.yaxis`. If "y2", the y coordinates refer to
`layout.yaxis2`, and so on.
The 'yaxis' property is an identifier of a particular
subplot, of type 'y', that may be specified as the string 'y'
optionally followed by an integer >= 1
(e.g. 'y', 'y1', 'y2', 'y3', etc.)
Returns
-------
str
"""
return self["yaxis"]
@yaxis.setter
def yaxis(self, val):
self["yaxis"] = val
@property
def ysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `y`.
The 'ysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ysrc"]
@ysrc.setter
def ysrc(self, val):
self["ysrc"] = val
@property
def zorder(self):
"""
Sets the layer on which this trace is displayed, relative to
other SVG traces on the same subplot. SVG traces with higher
`zorder` appear in front of those with lower `zorder`.
The 'zorder' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
Returns
-------
int
"""
return self["zorder"]
@zorder.setter
def zorder(self, val):
self["zorder"] = val
@property
def type(self):
return self._props["type"]
@property
def _prop_descriptions(self):
return """\
a
An array containing values of the first parameter value
a0
Alternate to `a`. Builds a linear space of a
coordinates. Use with `da` where `a0` is the starting
coordinate and `da` the step.
aaxis
:class:`plotly.graph_objects.carpet.Aaxis` instance or
dict with compatible properties
asrc
Sets the source reference on Chart Studio Cloud for
`a`.
b
A two dimensional array of y coordinates at each carpet
point.
b0
Alternate to `b`. Builds a linear space of a
coordinates. Use with `db` where `b0` is the starting
coordinate and `db` the step.
baxis
:class:`plotly.graph_objects.carpet.Baxis` instance or
dict with compatible properties
bsrc
Sets the source reference on Chart Studio Cloud for
`b`.
carpet
An identifier for this carpet, so that `scattercarpet`
and `contourcarpet` traces can specify a carpet plot on
which they lie
cheaterslope
The shift applied to each successive row of data in
creating a cheater plot. Only used if `x` is been
omitted.
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
da
Sets the a coordinate step. See `a0` for more info.
db
Sets the b coordinate step. See `b0` for more info.
font
The default font used for axis & tick labels on this
carpet
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgrouptitle
:class:`plotly.graph_objects.carpet.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
opacity
Sets the opacity of the trace.
stream
:class:`plotly.graph_objects.carpet.Stream` instance or
dict with compatible properties
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
A two dimensional array of x coordinates at each carpet
point. If omitted, the plot is a cheater plot and the
xaxis is hidden by default.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
y
A two dimensional array of y coordinates at each carpet
point.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
ysrc
Sets the source reference on Chart Studio Cloud for
`y`.
zorder
Sets the layer on which this trace is displayed,
relative to other SVG traces on the same subplot. SVG
traces with higher `zorder` appear in front of those
with lower `zorder`.
"""
def __init__(
self,
arg=None,
a=None,
a0=None,
aaxis=None,
asrc=None,
b=None,
b0=None,
baxis=None,
bsrc=None,
carpet=None,
cheaterslope=None,
color=None,
customdata=None,
customdatasrc=None,
da=None,
db=None,
font=None,
ids=None,
idssrc=None,
legend=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
stream=None,
uid=None,
uirevision=None,
visible=None,
x=None,
xaxis=None,
xsrc=None,
y=None,
yaxis=None,
ysrc=None,
zorder=None,
**kwargs,
):
"""
Construct a new Carpet object
The data describing carpet axis layout is set in `y` and
(optionally) also `x`. If only `y` is present, `x` the plot is
interpreted as a cheater plot and is filled in using the `y`
values. `x` and `y` may either be 2D arrays matching with each
dimension matching that of `a` and `b`, or they may be 1D
arrays with total length equal to that of `a` and `b`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Carpet`
a
An array containing values of the first parameter value
a0
Alternate to `a`. Builds a linear space of a
coordinates. Use with `da` where `a0` is the starting
coordinate and `da` the step.
aaxis
:class:`plotly.graph_objects.carpet.Aaxis` instance or
dict with compatible properties
asrc
Sets the source reference on Chart Studio Cloud for
`a`.
b
A two dimensional array of y coordinates at each carpet
point.
b0
Alternate to `b`. Builds a linear space of a
coordinates. Use with `db` where `b0` is the starting
coordinate and `db` the step.
baxis
:class:`plotly.graph_objects.carpet.Baxis` instance or
dict with compatible properties
bsrc
Sets the source reference on Chart Studio Cloud for
`b`.
carpet
An identifier for this carpet, so that `scattercarpet`
and `contourcarpet` traces can specify a carpet plot on
which they lie
cheaterslope
The shift applied to each successive row of data in
creating a cheater plot. Only used if `x` is been
omitted.
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
da
Sets the a coordinate step. See `a0` for more info.
db
Sets the b coordinate step. See `b0` for more info.
font
The default font used for axis & tick labels on this
carpet
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgrouptitle
:class:`plotly.graph_objects.carpet.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
opacity
Sets the opacity of the trace.
stream
:class:`plotly.graph_objects.carpet.Stream` instance or
dict with compatible properties
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
A two dimensional array of x coordinates at each carpet
point. If omitted, the plot is a cheater plot and the
xaxis is hidden by default.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
y
A two dimensional array of y coordinates at each carpet
point.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
ysrc
Sets the source reference on Chart Studio Cloud for
`y`.
zorder
Sets the layer on which this trace is displayed,
relative to other SVG traces on the same subplot. SVG
traces with higher `zorder` appear in front of those
with lower `zorder`.
Returns
-------
Carpet
"""
super().__init__("carpet")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.Carpet
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Carpet`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("a", arg, a)
self._set_property("a0", arg, a0)
self._set_property("aaxis", arg, aaxis)
self._set_property("asrc", arg, asrc)
self._set_property("b", arg, b)
self._set_property("b0", arg, b0)
self._set_property("baxis", arg, baxis)
self._set_property("bsrc", arg, bsrc)
self._set_property("carpet", arg, carpet)
self._set_property("cheaterslope", arg, cheaterslope)
self._set_property("color", arg, color)
self._set_property("customdata", arg, customdata)
self._set_property("customdatasrc", arg, customdatasrc)
self._set_property("da", arg, da)
self._set_property("db", arg, db)
self._set_property("font", arg, font)
self._set_property("ids", arg, ids)
self._set_property("idssrc", arg, idssrc)
self._set_property("legend", arg, legend)
self._set_property("legendgrouptitle", arg, legendgrouptitle)
self._set_property("legendrank", arg, legendrank)
self._set_property("legendwidth", arg, legendwidth)
self._set_property("meta", arg, meta)
self._set_property("metasrc", arg, metasrc)
self._set_property("name", arg, name)
self._set_property("opacity", arg, opacity)
self._set_property("stream", arg, stream)
self._set_property("uid", arg, uid)
self._set_property("uirevision", arg, uirevision)
self._set_property("visible", arg, visible)
self._set_property("x", arg, x)
self._set_property("xaxis", arg, xaxis)
self._set_property("xsrc", arg, xsrc)
self._set_property("y", arg, y)
self._set_property("yaxis", arg, yaxis)
self._set_property("ysrc", arg, ysrc)
self._set_property("zorder", arg, zorder)
self._props["type"] = "carpet"
arg.pop("type", None)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Carpet |
python | getsentry__sentry | src/sentry/workflow_engine/typings/notification_action.py | {
"start": 12493,
"end": 13334
} | class ____(BaseActionTranslator):
@property
def action_type(self) -> ActionType:
return ActionType.OPSGENIE
field_mappings = {
"priority": FieldMapping(
source_field="priority", default_value=str(OPSGENIE_DEFAULT_PRIORITY)
)
}
@property
def required_fields(self) -> list[str]:
return [
ACTION_FIELD_MAPPINGS[ActionType.OPSGENIE][
ActionFieldMappingKeys.INTEGRATION_ID_KEY.value
],
ACTION_FIELD_MAPPINGS[ActionType.OPSGENIE][
ActionFieldMappingKeys.TARGET_IDENTIFIER_KEY.value
],
]
@property
def target_type(self) -> int:
return ActionTarget.SPECIFIC.value
@property
def blob_type(self) -> type[DataBlob]:
return OnCallDataBlob
| OpsgenieActionTranslator |
python | doocs__leetcode | solution/0300-0399/0349.Intersection of Two Arrays/Solution.py | {
"start": 0,
"end": 138
} | class ____:
def intersection(self, nums1: List[int], nums2: List[int]) -> List[int]:
return list(set(nums1) & set(nums2))
| Solution |
python | pola-rs__polars | py-polars/src/polars/interchange/protocol.py | {
"start": 7215,
"end": 7346
} | class ____(RuntimeError):
"""Exception raised when a copy is required, but `allow_copy` is set to `False`."""
| CopyNotAllowedError |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.