language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-shopify/source_shopify/streams/base_streams.py
|
{
"start": 30841,
"end": 41594
}
|
class ____(IncrementalShopifyStream):
filter_field = "updated_at"
cursor_field = "updated_at"
data_field = "graphql"
parent_stream_class: Optional[Union[ShopifyStream, IncrementalShopifyStream]] = None
def __init__(self, config: Dict) -> None:
super().__init__(config)
# define BULK Manager instance
self.job_manager: ShopifyBulkManager = ShopifyBulkManager(
http_client=self.bulk_http_client,
base_url=f"{self.url_base}{self.path()}",
query=self.bulk_query(config, self.parent_stream_query_cursor_alias),
job_termination_threshold=float(config.get("job_termination_threshold", 3600)),
# overide the default job slice size, if provided (it's auto-adjusted, later on)
job_size=config.get("bulk_window_in_days", 30.0),
# provide the job checkpoint interval value, default value is 200k lines collected
job_checkpoint_interval=config.get("job_checkpoint_interval", 200_000),
parent_stream_name=self.parent_stream_name,
parent_stream_cursor=self.parent_stream_cursor,
)
@property
def filter_by_state_checkpoint(self) -> bool:
return self.job_manager._supports_checkpointing
@property
def bulk_http_client(self) -> HttpClient:
"""
Returns the instance of the `HttpClient`, with the stream info.
"""
return HttpClient(self.name, self.logger, ShopifyErrorHandler(), session=self._http_client._session)
@cached_property
def parent_stream(self) -> Union[ShopifyStream, IncrementalShopifyStream]:
"""
Returns the instance of parent stream, if the substream has a `parent_stream_class` dependency.
"""
return self.parent_stream_class(self.config) if self.parent_stream_class else None
@cached_property
def parent_stream_name(self) -> Optional[str]:
"""
Returns the parent stream name, if the substream has a `parent_stream_class` dependency.
"""
return self.parent_stream.name if self.parent_stream_class else None
@cached_property
def parent_stream_cursor(self) -> Optional[str]:
"""
Returns the parent stream cursor, if the substream has a `parent_stream_class` dependency.
"""
return self.parent_stream.cursor_field if self.parent_stream_class else None
@cached_property
def parent_stream_query_cursor_alias(self) -> Optional[str]:
if self.parent_stream_name and self.parent_stream_cursor:
return f"{self.parent_stream_name}_{self.parent_stream_cursor}"
@property
@abstractmethod
def bulk_query(self) -> ShopifyBulkQuery:
"""
This method property should be defined in the stream class instance,
and should be instantiated from the `ShopifyBulkQuery` class.
"""
def add_shop_url_field(self, records: Iterable[MutableMapping[str, Any]] = []) -> Iterable[MutableMapping[str, Any]]:
# ! Mandatory, add shop_url to the record to make querying easy
# more info: https://github.com/airbytehq/airbyte/issues/25110
for record in records:
if record:
record["shop_url"] = self.config["shop"]
yield record
@property
def default_state_comparison_value(self) -> Union[int, str]:
# certain streams are using `id` field as `cursor_field`, which requires to use `int` type,
# but many other use `str` values for this, we determine what to use based on `cursor_field` value
return 0 if self.cursor_field == "id" else self.config.get("start_date")
# CDK OVERIDES
@property
def availability_strategy(self) -> None:
"""NOT USED FOR BULK OPERATIONS TO SAVE THE RATE LIMITS AND TIME FOR THE SYNC."""
return None
def get_updated_state(
self,
current_stream_state: MutableMapping[str, Any],
latest_record: Mapping[str, Any],
) -> MutableMapping[str, Any]:
"""UPDATING THE STATE OBJECT:
Stream: CustomerAddress
Parent Stream: Customers
Returns:
{
"customer_address": {
"id": 12345,
"customers": {
"updated_at": "2022-03-03T03:47:46-08:00"
}
}
}
"""
updated_state = super().get_updated_state(current_stream_state, latest_record)
if self.parent_stream_class:
# the default way of getting the parent stream state is to use the value from the RecordProducer,
# since the parent record could be present but no substream's records are present to emit,
# the parent state is tracked for each parent record processed, thus updated regardless having substream records.
tracked_parent_state = self.job_manager.record_producer.get_parent_stream_state()
# fallback to the record level to search for the parent cursor or use the stream cursor value
parent_state = tracked_parent_state if tracked_parent_state else self._get_parent_state_from_record(latest_record)
# add parent_stream_state to `updated_state`
updated_state[self.parent_stream_name] = parent_state
return updated_state
def _get_parent_state_from_record(self, latest_record: Mapping[str, Any]) -> MutableMapping[str, Any]:
parent_state = latest_record.get(self.parent_stream_name, {})
parent_state_value = parent_state.get(self.parent_stream_cursor) if parent_state else latest_record.get(self.parent_stream_cursor)
parent_state[self.parent_stream_cursor] = parent_state_value
return parent_state
def _get_stream_cursor_value(self, stream_state: Optional[Mapping[str, Any]] = None) -> Optional[str]:
if stream_state:
return stream_state.get(self.cursor_field, self.default_state_comparison_value)
else:
return self.config.get("start_date")
def _get_stream_state_value(self, stream_state: Optional[Mapping[str, Any]] = None) -> Optional[str]:
if stream_state:
if self.parent_stream_class:
# get parent stream state from the stream_state object.
parent_state = stream_state.get(self.parent_stream_name, {})
if parent_state:
return parent_state.get(self.parent_stream_cursor, self.default_state_comparison_value)
else:
# use the streams cursor value, if no parent state available
return self._get_stream_cursor_value(stream_state)
else:
# get the stream state, if no `parent_stream_class` was assigned.
return self._get_stream_cursor_value(stream_state)
else:
return self.config.get("start_date")
def _get_state_value(self, stream_state: Optional[Mapping[str, Any]] = None) -> Optional[Union[str, int]]:
if stream_state:
return self._get_stream_state_value(stream_state)
else:
# for majority of cases we fallback to start_date, otherwise.
return self.config.get("start_date")
def emit_slice_message(self, slice_start: datetime, slice_end: datetime) -> None:
slice_size_message = f"Slice size: `P{round(self.job_manager._job_size, 1)}D`"
slice_message = f"Stream: `{self.name}` requesting BULK Job for period: {slice_start} -- {slice_end}. {slice_size_message}."
if self.job_manager._supports_checkpointing:
checkpointing_message = f" The BULK checkpoint after `{self.job_manager.job_checkpoint_interval}` lines."
else:
checkpointing_message = f" The BULK checkpointing is not supported."
self.logger.info(slice_message + checkpointing_message)
def emit_checkpoint_message(self) -> None:
if self.job_manager._job_adjust_slice_from_checkpoint:
self.logger.info(f"Stream {self.name}, continue from checkpoint: `{self._checkpoint_cursor}`.")
@stream_state_cache.cache_stream_state
def stream_slices(self, stream_state: Optional[Mapping[str, Any]] = None, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]:
if self.filter_field:
state = self._get_state_value(stream_state)
start = pdm.parse(state)
end = pdm.now()
while start < end:
self.job_manager.job_size_normalize(start, end)
slice_end = self.job_manager.get_adjusted_job_start(start)
self.emit_slice_message(start, slice_end)
yield {"start": start.to_rfc3339_string(), "end": slice_end.to_rfc3339_string()}
# increment the end of the slice or reduce the next slice
start = self.job_manager.get_adjusted_job_end(start, slice_end, self._checkpoint_cursor, self._filter_checkpointed_cursor)
else:
# for the streams that don't support filtering
yield {}
def sort_output_asc(self, non_sorted_records: Iterable[Mapping[str, Any]] = None) -> Iterable[Mapping[str, Any]]:
"""
Apply sorting for collected records, to guarantee the `ASC` output.
This handles the STATE and CHECKPOINTING correctly, for the `incremental` streams.
"""
if non_sorted_records:
if not self.cursor_field:
yield from non_sorted_records
else:
yield from sorted(
non_sorted_records,
key=lambda x: x.get(self.cursor_field) if x.get(self.cursor_field) else self.default_state_comparison_value,
)
else:
# always return an empty iterable, if no records
return []
def read_records(
self,
sync_mode: SyncMode,
cursor_field: Optional[List[str]] = None,
stream_slice: Optional[Mapping[str, Any]] = None,
stream_state: Optional[Mapping[str, Any]] = None,
) -> Iterable[StreamData]:
self.job_manager.create_job(stream_slice, self.filter_field)
stream_state = stream_state_cache.cached_state.get(self.name, {self.cursor_field: self.default_state_comparison_value})
# add `shop_url` field to each record produced
records = self.add_shop_url_field(
# produce records from saved bulk job result
self.job_manager.job_get_results()
)
# emit records in ASC order
yield from self.filter_records_newer_than_state(stream_state, self.sort_output_asc(records))
# add log message about the checkpoint value
self.emit_checkpoint_message()
|
IncrementalShopifyGraphQlBulkStream
|
python
|
rapidsai__cudf
|
python/cudf_polars/cudf_polars/utils/config.py
|
{
"start": 33434,
"end": 35362
}
|
class ____(str, enum.Enum):
"""
The policy to use for acquiring new CUDA streams.
* ``CUDAStreamPolicy.DEFAULT`` : Use the default CUDA stream.
* ``CUDAStreamPolicy.NEW`` : Create a new CUDA stream.
"""
DEFAULT = "default"
NEW = "new"
def _convert_cuda_stream_policy(
user_cuda_stream_policy: dict | str,
) -> CUDAStreamPolicy | CUDAStreamPoolConfig:
match user_cuda_stream_policy:
case "default" | "new":
return CUDAStreamPolicy(user_cuda_stream_policy)
case "pool":
return CUDAStreamPoolConfig()
case dict():
return CUDAStreamPoolConfig(**user_cuda_stream_policy)
case str():
# assume it's a JSON encoded CUDAStreamPoolConfig
try:
d = json.loads(user_cuda_stream_policy)
except json.JSONDecodeError:
raise ValueError(
f"Invalid CUDA stream policy: '{user_cuda_stream_policy}'"
) from None
match d:
case {"pool_size": int(), "flags": int()}:
return CUDAStreamPoolConfig(
pool_size=d["pool_size"], flags=CudaStreamFlags(d["flags"])
)
case {"pool_size": int(), "flags": str()}:
# convert the string names to enums
return CUDAStreamPoolConfig(
pool_size=d["pool_size"],
flags=CudaStreamFlags(CudaStreamFlags.__members__[d["flags"]]),
)
case _:
try:
return CUDAStreamPoolConfig(**d)
except TypeError:
raise ValueError(
f"Invalid CUDA stream policy: {user_cuda_stream_policy}"
) from None
@dataclasses.dataclass(frozen=True, eq=True)
|
CUDAStreamPolicy
|
python
|
huggingface__transformers
|
src/transformers/models/conditional_detr/image_processing_conditional_detr.py
|
{
"start": 29363,
"end": 80160
}
|
class ____(BaseImageProcessor):
r"""
Constructs a Conditional Detr image processor.
Args:
format (`str`, *optional*, defaults to `"coco_detection"`):
Data format of the annotations. One of "coco_detection" or "coco_panoptic".
do_resize (`bool`, *optional*, defaults to `True`):
Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be
overridden by the `do_resize` parameter in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`):
Size of the image's `(height, width)` dimensions after resizing. Can be overridden by the `size` parameter
in the `preprocess` method. Available options are:
- `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`.
Do NOT keep the aspect ratio.
- `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting
the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge
less or equal to `longest_edge`.
- `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the
aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to
`max_width`.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use if resizing the image.
do_rescale (`bool`, *optional*, defaults to `True`):
Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
`do_rescale` parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_normalize:
Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
`preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
Mean values to use when normalizing the image. Can be a single value or a list of values, one for each
channel. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one
for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_annotations (`bool`, *optional*, defaults to `True`):
Controls whether to convert the annotations to the format expected by the DETR model. Converts the
bounding boxes to the format `(center_x, center_y, width, height)` and in the range `[0, 1]`.
Can be overridden by the `do_convert_annotations` parameter in the `preprocess` method.
do_pad (`bool`, *optional*, defaults to `True`):
Controls whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess`
method. If `True`, padding will be applied to the bottom and right of the image with zeros.
If `pad_size` is provided, the image will be padded to the specified dimensions.
Otherwise, the image will be padded to the maximum height and width of the batch.
pad_size (`dict[str, int]`, *optional*):
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch.
"""
model_input_names = ["pixel_values", "pixel_mask"]
valid_kwargs = ConditionalDetrImageProcessorKwargs
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.__init__
def __init__(
self,
format: Union[str, AnnotationFormat] = AnnotationFormat.COCO_DETECTION,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_annotations: Optional[bool] = None,
do_pad: bool = True,
pad_size: Optional[dict[str, int]] = None,
**kwargs,
) -> None:
max_size = None if size is None else kwargs.pop("max_size", 1333)
size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333}
size = get_size_dict(size, max_size=max_size, default_to_square=False)
# Backwards compatibility
if do_convert_annotations is None:
do_convert_annotations = do_normalize
super().__init__(**kwargs)
self.format = format
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.do_convert_annotations = do_convert_annotations
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
self.do_pad = kwargs.pop("pad_and_return_pixel_mask", do_pad)
self.pad_size = pad_size
self._valid_processor_keys = [
"images",
"annotations",
"return_segmentation_masks",
"masks_path",
"do_resize",
"size",
"resample",
"do_rescale",
"rescale_factor",
"do_normalize",
"do_convert_annotations",
"image_mean",
"image_std",
"do_pad",
"pad_size",
"format",
"return_tensors",
"data_format",
"input_data_format",
]
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->ConditionalDetr
def prepare_annotation(
self,
image: np.ndarray,
target: dict,
format: Optional[AnnotationFormat] = None,
return_segmentation_masks: Optional[bool] = None,
masks_path: Optional[Union[str, pathlib.Path]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> dict:
"""
Prepare an annotation for feeding into ConditionalDetr model.
"""
format = format if format is not None else self.format
if format == AnnotationFormat.COCO_DETECTION:
return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks
target = prepare_coco_detection_annotation(
image, target, return_segmentation_masks, input_data_format=input_data_format
)
elif format == AnnotationFormat.COCO_PANOPTIC:
return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks
target = prepare_coco_panoptic_annotation(
image,
target,
masks_path=masks_path,
return_masks=return_segmentation_masks,
input_data_format=input_data_format,
)
else:
raise ValueError(f"Format {format} is not supported.")
return target
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BILINEAR,
data_format: Optional[ChannelDimension] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an
int, smaller edge of the image will be matched to this number.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the image's `(height, width)` dimensions after resizing. Available options are:
- `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`.
Do NOT keep the aspect ratio.
- `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting
the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge
less or equal to `longest_edge`.
- `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the
aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to
`max_width`.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use if resizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
size = get_size_dict(size, max_size=None, default_to_square=False)
if "shortest_edge" in size and "longest_edge" in size:
new_size = get_resize_output_image_size(
image, size["shortest_edge"], size["longest_edge"], input_data_format=input_data_format
)
elif "max_height" in size and "max_width" in size:
new_size = get_image_size_for_max_height_width(
image, size["max_height"], size["max_width"], input_data_format=input_data_format
)
elif "height" in size and "width" in size:
new_size = (size["height"], size["width"])
else:
raise ValueError(
"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got"
f" {size.keys()}."
)
image = resize(
image,
size=new_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
return image
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation
def resize_annotation(
self,
annotation,
orig_size,
size,
resample: PILImageResampling = PILImageResampling.NEAREST,
) -> dict:
"""
Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched
to this number.
"""
return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample)
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale
def rescale(
self,
image: np.ndarray,
rescale_factor: float,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""
Rescale the image by the given factor. image = image * rescale_factor.
Args:
image (`np.ndarray`):
Image to rescale.
rescale_factor (`float`):
The value to use for rescaling.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. If unset, is inferred from the input image. Can be
one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
"""
return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation
def normalize_annotation(self, annotation: dict, image_size: tuple[int, int]) -> dict:
"""
Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to
`[center_x, center_y, width, height]` format and from absolute to relative pixel values.
"""
return normalize_annotation(annotation, image_size=image_size)
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._update_annotation_for_padded_image
def _update_annotation_for_padded_image(
self,
annotation: dict,
input_image_size: tuple[int, int],
output_image_size: tuple[int, int],
padding,
update_bboxes,
) -> dict:
"""
Update the annotation for a padded image.
"""
new_annotation = {}
new_annotation["size"] = output_image_size
for key, value in annotation.items():
if key == "masks":
masks = value
masks = pad(
masks,
padding,
mode=PaddingMode.CONSTANT,
constant_values=0,
input_data_format=ChannelDimension.FIRST,
)
masks = safe_squeeze(masks, 1)
new_annotation["masks"] = masks
elif key == "boxes" and update_bboxes:
boxes = value
boxes *= np.asarray(
[
input_image_size[1] / output_image_size[1],
input_image_size[0] / output_image_size[0],
input_image_size[1] / output_image_size[1],
input_image_size[0] / output_image_size[0],
]
)
new_annotation["boxes"] = boxes
elif key == "size":
new_annotation["size"] = output_image_size
else:
new_annotation[key] = value
return new_annotation
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image
def _pad_image(
self,
image: np.ndarray,
output_size: tuple[int, int],
annotation: Optional[dict[str, Any]] = None,
constant_values: Union[float, Iterable[float]] = 0,
data_format: Optional[ChannelDimension] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
update_bboxes: bool = True,
) -> np.ndarray:
"""
Pad an image with zeros to the given size.
"""
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
output_height, output_width = output_size
pad_bottom = output_height - input_height
pad_right = output_width - input_width
padding = ((0, pad_bottom), (0, pad_right))
padded_image = pad(
image,
padding,
mode=PaddingMode.CONSTANT,
constant_values=constant_values,
data_format=data_format,
input_data_format=input_data_format,
)
if annotation is not None:
annotation = self._update_annotation_for_padded_image(
annotation, (input_height, input_width), (output_height, output_width), padding, update_bboxes
)
return padded_image, annotation
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad
def pad(
self,
images: list[np.ndarray],
annotations: Optional[Union[AnnotationType, list[AnnotationType]]] = None,
constant_values: Union[float, Iterable[float]] = 0,
return_pixel_mask: bool = True,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[ChannelDimension] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
update_bboxes: bool = True,
pad_size: Optional[dict[str, int]] = None,
) -> BatchFeature:
"""
Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
in the batch and optionally returns their corresponding pixel mask.
Args:
images (list[`np.ndarray`]):
Images to pad.
annotations (`AnnotationType` or `list[AnnotationType]`, *optional*):
Annotations to transform according to the padding that is applied to the images.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
return_pixel_mask (`bool`, *optional*, defaults to `True`):
Whether to return a pixel mask.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
update_bboxes (`bool`, *optional*, defaults to `True`):
Whether to update the bounding boxes in the annotations to match the padded images. If the
bounding boxes have not been converted to relative coordinates and `(centre_x, centre_y, width, height)`
format, the bounding boxes will not be updated.
pad_size (`dict[str, int]`, *optional*):
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch.
"""
pad_size = pad_size if pad_size is not None else self.pad_size
if pad_size is not None:
padded_size = (pad_size["height"], pad_size["width"])
else:
padded_size = get_max_height_width(images, input_data_format=input_data_format)
annotation_list = annotations if annotations is not None else [None] * len(images)
padded_images = []
padded_annotations = []
for image, annotation in zip(images, annotation_list):
padded_image, padded_annotation = self._pad_image(
image,
padded_size,
annotation,
constant_values=constant_values,
data_format=data_format,
input_data_format=input_data_format,
update_bboxes=update_bboxes,
)
padded_images.append(padded_image)
padded_annotations.append(padded_annotation)
data = {"pixel_values": padded_images}
if return_pixel_mask:
masks = [
make_pixel_mask(image=image, output_size=padded_size, input_data_format=input_data_format)
for image in images
]
data["pixel_mask"] = masks
encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
if annotations is not None:
encoded_inputs["labels"] = [
BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations
]
return encoded_inputs
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.preprocess
def preprocess(
self,
images: ImageInput,
annotations: Optional[Union[AnnotationType, list[AnnotationType]]] = None,
return_segmentation_masks: Optional[bool] = None,
masks_path: Optional[Union[str, pathlib.Path]] = None,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample=None, # PILImageResampling
do_rescale: Optional[bool] = None,
rescale_factor: Optional[Union[int, float]] = None,
do_normalize: Optional[bool] = None,
do_convert_annotations: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_pad: Optional[bool] = None,
format: Optional[Union[str, AnnotationFormat]] = None,
return_tensors: Optional[Union[TensorType, str]] = None,
data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
pad_size: Optional[dict[str, int]] = None,
**kwargs,
) -> BatchFeature:
"""
Preprocess an image or a batch of images so that it can be used by the model.
Args:
images (`ImageInput`):
Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging
from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.
annotations (`AnnotationType` or `list[AnnotationType]`, *optional*):
List of annotations associated with the image or batch of images. If annotation is for object
detection, the annotations should be a dictionary with the following keys:
- "image_id" (`int`): The image id.
- "annotations" (`list[Dict]`): List of annotations for an image. Each annotation should be a
dictionary. An image can have no annotations, in which case the list should be empty.
If annotation is for segmentation, the annotations should be a dictionary with the following keys:
- "image_id" (`int`): The image id.
- "segments_info" (`list[Dict]`): List of segments for an image. Each segment should be a dictionary.
An image can have no segments, in which case the list should be empty.
- "file_name" (`str`): The file name of the image.
return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks):
Whether to return segmentation masks.
masks_path (`str` or `pathlib.Path`, *optional*):
Path to the directory containing the segmentation masks.
do_resize (`bool`, *optional*, defaults to self.do_resize):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to self.size):
Size of the image's `(height, width)` dimensions after resizing. Available options are:
- `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`.
Do NOT keep the aspect ratio.
- `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting
the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge
less or equal to `longest_edge`.
- `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the
aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to
`max_width`.
resample (`PILImageResampling`, *optional*, defaults to self.resample):
Resampling filter to use when resizing the image.
do_rescale (`bool`, *optional*, defaults to self.do_rescale):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to self.rescale_factor):
Rescale factor to use when rescaling the image.
do_normalize (`bool`, *optional*, defaults to self.do_normalize):
Whether to normalize the image.
do_convert_annotations (`bool`, *optional*, defaults to self.do_convert_annotations):
Whether to convert the annotations to the format expected by the model. Converts the bounding
boxes from the format `(top_left_x, top_left_y, width, height)` to `(center_x, center_y, width, height)`
and in relative coordinates.
image_mean (`float` or `list[float]`, *optional*, defaults to self.image_mean):
Mean to use when normalizing the image.
image_std (`float` or `list[float]`, *optional*, defaults to self.image_std):
Standard deviation to use when normalizing the image.
do_pad (`bool`, *optional*, defaults to self.do_pad):
Whether to pad the image. If `True`, padding will be applied to the bottom and right of
the image with zeros. If `pad_size` is provided, the image will be padded to the specified
dimensions. Otherwise, the image will be padded to the maximum height and width of the batch.
format (`str` or `AnnotationFormat`, *optional*, defaults to self.format):
Format of the annotations.
return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors):
Type of tensors to return. If `None`, will return the list of images.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
pad_size (`dict[str, int]`, *optional*):
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch.
"""
do_resize = self.do_resize if do_resize is None else do_resize
size = self.size if size is None else size
size = get_size_dict(size=size, default_to_square=False)
resample = self.resample if resample is None else resample
do_rescale = self.do_rescale if do_rescale is None else do_rescale
rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor
do_normalize = self.do_normalize if do_normalize is None else do_normalize
image_mean = self.image_mean if image_mean is None else image_mean
image_std = self.image_std if image_std is None else image_std
do_convert_annotations = (
self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations
)
do_pad = self.do_pad if do_pad is None else do_pad
pad_size = self.pad_size if pad_size is None else pad_size
format = self.format if format is None else format
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor.")
validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
# Here, the pad() method pads to the maximum of (width, height). It does not need to be validated.
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
if annotations is not None and isinstance(annotations, dict):
annotations = [annotations]
if annotations is not None and len(images) != len(annotations):
raise ValueError(
f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match."
)
format = AnnotationFormat(format)
if annotations is not None:
validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations)
if (
masks_path is not None
and format == AnnotationFormat.COCO_PANOPTIC
and not isinstance(masks_path, (pathlib.Path, str))
):
raise ValueError(
"The path to the directory containing the mask PNG files should be provided as a"
f" `pathlib.Path` or string object, but is {type(masks_path)} instead."
)
# All transformations expect numpy arrays
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
# prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image)
if annotations is not None:
prepared_images = []
prepared_annotations = []
for image, target in zip(images, annotations):
target = self.prepare_annotation(
image,
target,
format,
return_segmentation_masks=return_segmentation_masks,
masks_path=masks_path,
input_data_format=input_data_format,
)
prepared_images.append(image)
prepared_annotations.append(target)
images = prepared_images
annotations = prepared_annotations
del prepared_images, prepared_annotations
# transformations
if do_resize:
if annotations is not None:
resized_images, resized_annotations = [], []
for image, target in zip(images, annotations):
orig_size = get_image_size(image, input_data_format)
resized_image = self.resize(
image, size=size, resample=resample, input_data_format=input_data_format
)
resized_annotation = self.resize_annotation(
target, orig_size, get_image_size(resized_image, input_data_format)
)
resized_images.append(resized_image)
resized_annotations.append(resized_annotation)
images = resized_images
annotations = resized_annotations
del resized_images, resized_annotations
else:
images = [
self.resize(image, size=size, resample=resample, input_data_format=input_data_format)
for image in images
]
if do_rescale:
images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]
if do_normalize:
images = [
self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images
]
if do_convert_annotations and annotations is not None:
annotations = [
self.normalize_annotation(annotation, get_image_size(image, input_data_format))
for annotation, image in zip(annotations, images)
]
if do_pad:
# Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...}
encoded_inputs = self.pad(
images,
annotations=annotations,
return_pixel_mask=True,
data_format=data_format,
input_data_format=input_data_format,
update_bboxes=do_convert_annotations,
return_tensors=return_tensors,
pad_size=pad_size,
)
else:
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
for image in images
]
encoded_inputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
if annotations is not None:
encoded_inputs["labels"] = [
BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations
]
return encoded_inputs
# Copied from transformers.models.deformable_detr.image_processing_deformable_detr.DeformableDetrImageProcessor.post_process_object_detection with DeformableDetr->ConditionalDetr
def post_process_object_detection(
self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, list[tuple]] = None, top_k: int = 100
):
"""
Converts the raw output of [`ConditionalDetrForObjectDetection`] into final bounding boxes in (top_left_x,
top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.
Args:
outputs ([`DetrObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*):
Score threshold to keep object detection predictions.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
(height, width) of each image in the batch. If left to None, predictions will not be resized.
top_k (`int`, *optional*, defaults to 100):
Keep only top k bounding boxes before filtering by thresholding.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
in the batch as predicted by the model.
"""
out_logits, out_bbox = outputs.logits, outputs.pred_boxes
if target_sizes is not None:
if len(out_logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
prob = out_logits.sigmoid()
prob = prob.view(out_logits.shape[0], -1)
k_value = min(top_k, prob.size(1))
topk_values, topk_indexes = torch.topk(prob, k_value, dim=1)
scores = topk_values
topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor")
labels = topk_indexes % out_logits.shape[2]
boxes = center_to_corners_format(out_bbox)
boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
# and from relative [0, 1] to absolute [0, height] coordinates
if target_sizes is not None:
if isinstance(target_sizes, list):
img_h = torch.Tensor([i[0] for i in target_sizes])
img_w = torch.Tensor([i[1] for i in target_sizes])
else:
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
boxes = boxes * scale_fct[:, None, :]
results = []
for s, l, b in zip(scores, labels, boxes):
score = s[s > threshold]
label = l[s > threshold]
box = b[s > threshold]
results.append({"scores": score, "labels": label, "boxes": box})
return results
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_semantic_segmentation with Detr->ConditionalDetr
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple[int, int]]] = None):
"""
Converts the output of [`ConditionalDetrForSegmentation`] into semantic segmentation maps. Only supports PyTorch.
Args:
outputs ([`ConditionalDetrForSegmentation`]):
Raw outputs of the model.
target_sizes (`list[tuple[int, int]]`, *optional*):
A list of tuples (`tuple[int, int]`) containing the target size (height, width) of each image in the
batch. If unset, predictions will not be resized.
Returns:
`list[torch.Tensor]`:
A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)
corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each
`torch.Tensor` correspond to a semantic class id.
"""
class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1]
masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width]
# Remove the null class `[..., :-1]`
masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]
masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
# Semantic segmentation logits of shape (batch_size, num_classes, height, width)
segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs)
batch_size = class_queries_logits.shape[0]
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if batch_size != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
semantic_segmentation = []
for idx in range(batch_size):
resized_logits = nn.functional.interpolate(
segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = segmentation.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_instance_segmentation with Detr->ConditionalDetr
def post_process_instance_segmentation(
self,
outputs,
threshold: float = 0.5,
mask_threshold: float = 0.5,
overlap_mask_area_threshold: float = 0.8,
target_sizes: Optional[list[tuple[int, int]]] = None,
return_coco_annotation: Optional[bool] = False,
) -> list[dict]:
"""
Converts the output of [`ConditionalDetrForSegmentation`] into instance segmentation predictions. Only supports PyTorch.
Args:
outputs ([`ConditionalDetrForSegmentation`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction. If unset, predictions will not be resized.
return_coco_annotation (`bool`, *optional*):
Defaults to `False`. If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE)
format.
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or
`list[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to
`True`. Set to `None` if no mask if found above `threshold`.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- An integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
"""
class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1]
masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width]
batch_size = class_queries_logits.shape[0]
num_labels = class_queries_logits.shape[-1] - 1
mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
# Predicted label and score of each query (batch_size, num_queries)
pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1)
# Loop over items in batch size
results: list[dict[str, TensorType]] = []
for i in range(batch_size):
mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects(
mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels
)
# No mask found
if mask_probs_item.shape[0] <= 0:
height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:]
segmentation = torch.zeros((height, width)) - 1
results.append({"segmentation": segmentation, "segments_info": []})
continue
# Get segmentation map and segment information of batch item
target_size = target_sizes[i] if target_sizes is not None else None
segmentation, segments = compute_segments(
mask_probs=mask_probs_item,
pred_scores=pred_scores_item,
pred_labels=pred_labels_item,
mask_threshold=mask_threshold,
overlap_mask_area_threshold=overlap_mask_area_threshold,
label_ids_to_fuse=[],
target_size=target_size,
)
# Return segmentation map in run-length encoding (RLE) format
if return_coco_annotation:
segmentation = convert_segmentation_to_rle(segmentation)
results.append({"segmentation": segmentation, "segments_info": segments})
return results
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_panoptic_segmentation with Detr->ConditionalDetr
def post_process_panoptic_segmentation(
self,
outputs,
threshold: float = 0.5,
mask_threshold: float = 0.5,
overlap_mask_area_threshold: float = 0.8,
label_ids_to_fuse: Optional[set[int]] = None,
target_sizes: Optional[list[tuple[int, int]]] = None,
) -> list[dict]:
"""
Converts the output of [`ConditionalDetrForSegmentation`] into image panoptic segmentation predictions. Only supports
PyTorch.
Args:
outputs ([`ConditionalDetrForSegmentation`]):
The outputs from [`ConditionalDetrForSegmentation`].
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
label_ids_to_fuse (`Set[int]`, *optional*):
The labels in this state will have all their instances be fused together. For instance we could say
there can only be one sky in an image, but several persons, so the label ID for sky would be in that
set, but not the one for person.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction in batch. If unset, predictions will not be resized.
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id` or
`None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to
the corresponding `target_sizes` entry.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- an integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise.
Multiple instances of the same class / label were fused and assigned a single `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
"""
if label_ids_to_fuse is None:
logger.warning_once("`label_ids_to_fuse` unset. No instance will be fused.")
label_ids_to_fuse = set()
class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1]
masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width]
batch_size = class_queries_logits.shape[0]
num_labels = class_queries_logits.shape[-1] - 1
mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
# Predicted label and score of each query (batch_size, num_queries)
pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1)
# Loop over items in batch size
results: list[dict[str, TensorType]] = []
for i in range(batch_size):
mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects(
mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels
)
# No mask found
if mask_probs_item.shape[0] <= 0:
height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:]
segmentation = torch.zeros((height, width)) - 1
results.append({"segmentation": segmentation, "segments_info": []})
continue
# Get segmentation map and segment information of batch item
target_size = target_sizes[i] if target_sizes is not None else None
segmentation, segments = compute_segments(
mask_probs=mask_probs_item,
pred_scores=pred_scores_item,
pred_labels=pred_labels_item,
mask_threshold=mask_threshold,
overlap_mask_area_threshold=overlap_mask_area_threshold,
label_ids_to_fuse=label_ids_to_fuse,
target_size=target_size,
)
results.append({"segmentation": segmentation, "segments_info": segments})
return results
__all__ = ["ConditionalDetrImageProcessor"]
|
ConditionalDetrImageProcessor
|
python
|
ray-project__ray
|
rllib/connectors/learner/add_next_observations_from_episodes_to_train_batch.py
|
{
"start": 374,
"end": 3880
}
|
class ____(ConnectorV2):
"""Adds the NEXT_OBS column with the correct episode observations to train batch.
- Operates on a list of Episode objects.
- Gets all observation(s) from all the given episodes (except the very first ones)
and adds them to the batch under construction in the NEXT_OBS column (as a list of
individual observations).
- Does NOT alter any observations (or other data) in the given episodes.
- Can be used in Learner connector pipelines.
.. testcode::
import gymnasium as gym
import numpy as np
from ray.rllib.connectors.learner import (
AddNextObservationsFromEpisodesToTrainBatch
)
from ray.rllib.core.columns import Columns
from ray.rllib.env.single_agent_episode import SingleAgentEpisode
from ray.rllib.utils.test_utils import check
# Create two dummy SingleAgentEpisodes, each containing 3 observations,
# 2 actions and 2 rewards (both episodes are length=2).
obs_space = gym.spaces.Box(-1.0, 1.0, (2,), np.float32)
act_space = gym.spaces.Discrete(2)
episodes = [SingleAgentEpisode(
observations=[obs_space.sample(), obs_space.sample(), obs_space.sample()],
actions=[act_space.sample(), act_space.sample()],
rewards=[1.0, 2.0],
len_lookback_buffer=0,
) for _ in range(2)]
eps_1_next_obses = episodes[0].get_observations([1, 2])
eps_2_next_obses = episodes[1].get_observations([1, 2])
print(f"1st Episode's next obses are {eps_1_next_obses}")
print(f"2nd Episode's next obses are {eps_2_next_obses}")
# Create an instance of this class.
connector = AddNextObservationsFromEpisodesToTrainBatch()
# Call the connector with the two created episodes.
# Note that this particular connector works without an RLModule, so we
# simplify here for the sake of this example.
output_data = connector(
rl_module=None,
batch={},
episodes=episodes,
explore=True,
shared_data={},
)
# The output data should now contain the last observations of both episodes,
# in a "per-episode organized" fashion.
check(
output_data,
{
Columns.NEXT_OBS: {
(episodes[0].id_,): eps_1_next_obses,
(episodes[1].id_,): eps_2_next_obses,
},
},
)
"""
@override(ConnectorV2)
def __call__(
self,
*,
rl_module: RLModule,
batch: Dict[str, Any],
episodes: List[EpisodeType],
explore: Optional[bool] = None,
shared_data: Optional[dict] = None,
**kwargs,
) -> Any:
# If "obs" already in `batch`, early out.
if Columns.NEXT_OBS in batch:
return batch
for sa_episode in self.single_agent_episode_iterator(
# This is a Learner-only connector -> Get all episodes (for train batch).
episodes,
agents_that_stepped_only=False,
):
self.add_n_batch_items(
batch,
Columns.NEXT_OBS,
items_to_add=sa_episode.get_observations(slice(1, len(sa_episode) + 1)),
num_items=len(sa_episode),
single_agent_episode=sa_episode,
)
return batch
|
AddNextObservationsFromEpisodesToTrainBatch
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1alpha1_storage_version_migration_list.py
|
{
"start": 383,
"end": 7360
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1alpha1StorageVersionMigration]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1StorageVersionMigrationList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1alpha1StorageVersionMigrationList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1alpha1StorageVersionMigrationList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1alpha1StorageVersionMigrationList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1alpha1StorageVersionMigrationList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1alpha1StorageVersionMigrationList. # noqa: E501
Items is the list of StorageVersionMigration # noqa: E501
:return: The items of this V1alpha1StorageVersionMigrationList. # noqa: E501
:rtype: list[V1alpha1StorageVersionMigration]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1alpha1StorageVersionMigrationList.
Items is the list of StorageVersionMigration # noqa: E501
:param items: The items of this V1alpha1StorageVersionMigrationList. # noqa: E501
:type: list[V1alpha1StorageVersionMigration]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1alpha1StorageVersionMigrationList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1alpha1StorageVersionMigrationList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1alpha1StorageVersionMigrationList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1alpha1StorageVersionMigrationList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1alpha1StorageVersionMigrationList. # noqa: E501
:return: The metadata of this V1alpha1StorageVersionMigrationList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1alpha1StorageVersionMigrationList.
:param metadata: The metadata of this V1alpha1StorageVersionMigrationList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1StorageVersionMigrationList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1StorageVersionMigrationList):
return True
return self.to_dict() != other.to_dict()
|
V1alpha1StorageVersionMigrationList
|
python
|
mlflow__mlflow
|
tests/pyfunc/test_chat_model.py
|
{
"start": 4236,
"end": 4551
}
|
class ____(mlflow.pyfunc.ChatModel):
@mlflow.trace
def predict(
self, context, messages: list[ChatMessage], params: ChatParams
) -> ChatCompletionResponse:
mock_response = get_mock_response(messages, params)
return ChatCompletionResponse.from_dict(mock_response)
|
ChatModelWithTrace
|
python
|
getsentry__sentry
|
tests/sentry/api/endpoints/release_thresholds/health_checks/test_is_crash_free_rate_healthy.py
|
{
"start": 5523,
"end": 12943
}
|
class ____(TestCase):
def setUp(self) -> None:
self.project1 = self.create_project(name="foo", organization=self.organization)
self.release1 = Release.objects.create(version="v1", organization=self.organization)
self.sessions_data = mock_sessions_data
@patch(
"sentry.api.endpoints.release_thresholds.health_checks.is_crash_free_rate_healthy.get_interval_indexes"
)
@patch(
"sentry.api.endpoints.release_thresholds.health_checks.is_crash_free_rate_healthy.get_groups_totals"
)
def test_is_crash_free_rate_success(
self, mock_get_groups_totals: MagicMock, mock_get_interval_indexes: MagicMock
) -> None:
now = datetime.utcnow()
mock_get_interval_indexes.return_value = 0, 10
mock_get_groups_totals.side_effect = [0, 10]
# current threshold within series
mock_threshold: EnrichedThreshold = {
"id": "1",
"date": now,
"start": now - timedelta(minutes=1),
"end": now,
"environment": None,
"is_healthy": False,
"key": "",
"project": serialize(self.project1),
"project_id": self.project1.id,
"project_slug": self.project1.slug,
"release": self.release1.version,
"threshold_type": ReleaseThresholdType.CRASH_FREE_SESSION_RATE,
"trigger_type": TriggerType.UNDER_STR,
"value": 99, # crash free rate
"window_in_seconds": 60, # NOTE: window_in_seconds only used to determine start/end. Not utilized in validation method
"metric_value": None,
}
is_healthy, metric_value = is_crash_free_rate_healthy_check(
ethreshold=mock_threshold,
sessions_data=self.sessions_data,
display=CRASH_SESSIONS_DISPLAY,
)
assert mock_get_interval_indexes.call_count == 1
assert mock_get_groups_totals.call_count == 2
assert is_healthy
assert metric_value == 100
@patch(
"sentry.api.endpoints.release_thresholds.health_checks.is_crash_free_rate_healthy.get_interval_indexes"
)
@patch(
"sentry.api.endpoints.release_thresholds.health_checks.is_crash_free_rate_healthy.get_groups_totals"
)
def test_is_crash_free_rate_failure(
self, mock_get_groups_totals: MagicMock, mock_get_interval_indexes: MagicMock
) -> None:
now = datetime.utcnow()
mock_get_interval_indexes.return_value = 0, 10
mock_get_groups_totals.side_effect = [5, 10] # 5 crashes, 10 total
# current threshold within series
mock_threshold: EnrichedThreshold = {
"id": "1",
"date": now,
"start": now - timedelta(minutes=1),
"end": now,
"environment": None,
"is_healthy": False,
"key": "",
"project": serialize(self.project1),
"project_id": self.project1.id,
"project_slug": self.project1.slug,
"release": self.release1.version,
"threshold_type": ReleaseThresholdType.CRASH_FREE_SESSION_RATE,
"trigger_type": TriggerType.UNDER_STR,
"value": 99, # crash free rate
"window_in_seconds": 60, # NOTE: window_in_seconds only used to determine start/end. Not utilized in validation method
"metric_value": None,
}
is_healthy, metric_value = is_crash_free_rate_healthy_check(
ethreshold=mock_threshold,
sessions_data=self.sessions_data,
display=CRASH_SESSIONS_DISPLAY,
)
assert mock_get_interval_indexes.call_count == 1
assert mock_get_groups_totals.call_count == 2
assert not is_healthy
assert metric_value == 50
@patch(
"sentry.api.endpoints.release_thresholds.health_checks.is_crash_free_rate_healthy.get_interval_indexes"
)
@patch(
"sentry.api.endpoints.release_thresholds.health_checks.is_crash_free_rate_healthy.get_groups_totals"
)
def test_is_crash_free_rate_catches_interval_idx_error(
self, mock_get_groups_totals: MagicMock, mock_get_interval_indexes: MagicMock
) -> None:
now = datetime.utcnow()
mock_get_interval_indexes.return_value = 0, 10
def side_effect(**kwargs: object) -> NoReturn:
raise IndexError
mock_get_groups_totals.side_effect = side_effect
# current threshold within series
mock_threshold: EnrichedThreshold = {
"id": "1",
"date": now,
"start": now - timedelta(minutes=1),
"end": now,
"environment": None,
"is_healthy": False,
"key": "",
"project": serialize(self.project1),
"project_id": self.project1.id,
"project_slug": self.project1.slug,
"release": self.release1.version,
"threshold_type": ReleaseThresholdType.CRASH_FREE_SESSION_RATE,
"trigger_type": TriggerType.UNDER_STR,
"value": 99, # crash free rate
"window_in_seconds": 60, # NOTE: window_in_seconds only used to determine start/end. Not utilized in validation method
"metric_value": None,
}
is_healthy, metric_value = is_crash_free_rate_healthy_check(
ethreshold=mock_threshold,
sessions_data=self.sessions_data,
display=CRASH_SESSIONS_DISPLAY,
)
assert mock_get_interval_indexes.call_count == 1
assert mock_get_groups_totals.call_count == 1
assert not is_healthy
assert metric_value == -1
@patch(
"sentry.api.endpoints.release_thresholds.health_checks.is_crash_free_rate_healthy.get_interval_indexes"
)
@patch(
"sentry.api.endpoints.release_thresholds.health_checks.is_crash_free_rate_healthy.get_groups_totals"
)
def test_get_group_catches_totals_errors(
self, mock_get_groups_totals: MagicMock, mock_get_interval_indexes: MagicMock
) -> None:
now = datetime.utcnow()
mock_get_interval_indexes.return_value = 10, 0
# current threshold within series
mock_threshold: EnrichedThreshold = {
"id": "1",
"date": now,
"start": now - timedelta(minutes=1),
"end": now,
"environment": None,
"is_healthy": False,
"key": "",
"project": serialize(self.project1),
"project_id": self.project1.id,
"project_slug": self.project1.slug,
"release": self.release1.version,
"threshold_type": ReleaseThresholdType.CRASH_FREE_SESSION_RATE,
"trigger_type": TriggerType.UNDER_STR,
"value": 99, # crash free rate
"window_in_seconds": 60, # NOTE: window_in_seconds only used to determine start/end. Not utilized in validation method
"metric_value": None,
}
is_healthy, metric_value = is_crash_free_rate_healthy_check(
ethreshold=mock_threshold,
sessions_data=self.sessions_data,
display=CRASH_SESSIONS_DISPLAY,
)
assert mock_get_interval_indexes.call_count == 1
assert mock_get_groups_totals.call_count == 0
assert not is_healthy
assert metric_value == -1
|
CrashFreeRateThresholdCheckTest
|
python
|
getsentry__sentry
|
tests/sentry/seer/autofix/test_autofix.py
|
{
"start": 18661,
"end": 26085
}
|
class ____(APITestCase, SnubaTestCase):
@patch("sentry.seer.explorer.utils.get_from_profiling_service")
def test_get_profile_from_trace_tree_basic(self, mock_get_from_profiling_service) -> None:
"""Test finding a profile for a matching transaction in trace tree."""
# Setup mock event with transaction name
event = Mock()
event.event_id = "error-event-id"
event.trace_id = "1234567890abcdef1234567890abcdef"
event.transaction = "/api/users"
# Create a simple trace tree structure with a span that has a profile
profile_id = "profile123456789"
trace_tree = {
"trace_id": "1234567890abcdef1234567890abcdef",
"trace": [
{
"id": "tx-span-id",
"description": "/api/users", # Matches event transaction
"profile_id": profile_id,
"start_timestamp": 1672567200.0,
"end_timestamp": 1672567210.0,
"children": [],
}
],
}
# Mock the profile data response
mock_profile_data = {
"profile": {
"frames": [
{
"function": "main",
"module": "app.main",
"filename": "main.py",
"lineno": 10,
"in_app": True,
}
],
"stacks": [[0]],
"samples": [{"stack_id": 0, "thread_id": "1", "elapsed_since_start_ns": 10000000}],
"thread_metadata": {"1": {"name": "MainThread"}},
}
}
mock_response = Mock()
mock_response.status = 200
mock_response.data = orjson.dumps(mock_profile_data)
mock_get_from_profiling_service.return_value = mock_response
profile_result = _get_profile_from_trace_tree(trace_tree, event, self.project)
assert profile_result is not None
assert "execution_tree" in profile_result
assert len(profile_result["execution_tree"]) == 1
assert profile_result["execution_tree"][0]["function"] == "main"
mock_get_from_profiling_service.assert_called_once_with(
"GET",
f"/organizations/{self.project.organization_id}/projects/{self.project.id}/profiles/{profile_id}",
params={"format": "sample"},
)
@patch("sentry.profiles.profile_chunks.get_chunk_ids")
@patch("sentry.seer.explorer.utils.get_from_profiling_service")
def test_get_profile_from_trace_tree_with_profiler_id(
self, mock_get_from_profiling_service, mock_get_chunk_ids
) -> None:
"""Test finding a continuous profile using profiler_id."""
event = Mock()
event.transaction = "/api/test"
profiler_id = "12345678-1234-1234-1234-123456789abc"
trace_tree = {
"trace": [
{
"description": "/api/test",
"profiler_id": profiler_id,
"start_timestamp": 1672567200.0,
"end_timestamp": 1672567210.0,
"children": [],
}
],
}
# Mock continuous profile response (note the "chunk" wrapper)
mock_profile_data = {
"chunk": {
"profile": {
"frames": [
{
"function": "test",
"module": "app",
"filename": "test.py",
"lineno": 5,
"in_app": True,
}
],
"stacks": [[0]],
"samples": [
{"stack_id": 0, "thread_id": "1", "elapsed_since_start_ns": 5000000}
],
"thread_metadata": {"1": {"name": "MainThread"}},
}
}
}
mock_get_chunk_ids.return_value = ["chunk1"]
mock_response = Mock()
mock_response.status = 200
mock_response.data = orjson.dumps(mock_profile_data)
mock_get_from_profiling_service.return_value = mock_response
profile_result = _get_profile_from_trace_tree(trace_tree, event, self.project)
assert profile_result is not None
assert "execution_tree" in profile_result
# Verify continuous profile endpoint was called
mock_get_from_profiling_service.assert_called_once()
args, kwargs = mock_get_from_profiling_service.call_args
assert kwargs["method"] == "POST"
assert (
f"/organizations/{self.project.organization_id}/projects/{self.project.id}/chunks"
in kwargs["path"]
)
assert kwargs["json_data"]["profiler_id"] == profiler_id
def test_get_profile_from_trace_tree_no_matching_transaction(self) -> None:
"""Test that function returns None when no matching transaction is found."""
event = Mock()
event.transaction = "/api/different"
trace_tree = {
"trace": [
{
"description": "/api/other", # Doesn't match
"profile_id": "profile123",
"children": [],
}
],
}
profile_result = _get_profile_from_trace_tree(trace_tree, event, self.project)
assert profile_result is None
def test_get_profile_from_trace_tree_no_transaction_name(self) -> None:
"""Test that function returns None when event has no transaction name."""
event = Mock()
event.transaction = None
trace_tree = {
"trace": [
{
"description": "/api/test",
"profile_id": "profile123",
"children": [],
}
],
}
profile_result = _get_profile_from_trace_tree(trace_tree, event, self.project)
assert profile_result is None
def test_get_profile_from_trace_tree_no_trace_tree(self) -> None:
"""Test that function returns None when trace tree is None."""
event = Mock()
event.transaction = "/api/test"
profile_result = _get_profile_from_trace_tree(None, event, self.project)
assert profile_result is None
@patch("sentry.seer.explorer.utils.get_from_profiling_service")
def test_get_profile_from_trace_tree_api_error(self, mock_get_from_profiling_service) -> None:
"""Test that function returns None when profiling API returns an error."""
event = Mock()
event.transaction = "/api/test"
trace_tree = {
"trace": [
{
"description": "/api/test",
"profile_id": "profile123",
"start_timestamp": 1672567200.0,
"end_timestamp": 1672567210.0,
"children": [],
}
],
}
mock_response = Mock()
mock_response.status = 404
mock_get_from_profiling_service.return_value = mock_response
profile_result = _get_profile_from_trace_tree(trace_tree, event, self.project)
assert profile_result is None
@pytest.mark.django_db
|
TestGetProfileFromTraceTree
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 485250,
"end": 485759
}
|
class ____(sgqlc.types.Type):
"""Autogenerated return type of CancelSponsorship"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "sponsors_tier")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
sponsors_tier = sgqlc.types.Field("SponsorsTier", graphql_name="sponsorsTier")
"""The tier that was being used at the time of cancellation."""
|
CancelSponsorshipPayload
|
python
|
kamyu104__LeetCode-Solutions
|
Python/count-triplets-with-even-xor-set-bits-i.py
|
{
"start": 93,
"end": 742
}
|
class ____(object):
def tripletCount(self, a, b, c):
"""
:type a: List[int]
:type b: List[int]
:type c: List[int]
:rtype: int
"""
def popcount(x):
return bin(x).count('1')
def count(a):
odd = sum(popcount(x)&1 for x in a)
return [len(a)-odd, odd]
cnt = map(count, (a, b, c))
return sum(cnt[0][0 if i == 0 or i == 1 else 1]*cnt[1][0 if i == 0 or i == 2 else 1]*cnt[2][0 if i == 0 or i == 3 else 1] for i in xrange(4))
# Time: O(nlogr), r = max(max(a), max(b), max(c))
# Space: O(1)
# bit manipulation, parity
|
Solution
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/decorator4.py
|
{
"start": 406,
"end": 628
}
|
class ____:
def __init__(self, a, b, c):
pass
v1 = ClassA(1, 2, 3)
reveal_type(v1, expected_text="ClassA")
@decorator1
def func1() -> int:
return 3
v2 = func1()
reveal_type(v2, expected_text="int")
|
ClassA
|
python
|
ansible__ansible
|
test/units/module_utils/basic/test_no_log.py
|
{
"start": 1694,
"end": 6085
}
|
class ____:
OMIT = 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
dataset_no_remove = (
('string', frozenset(['nope'])),
(1234, frozenset(['4321'])),
(False, frozenset(['4321'])),
(1.0, frozenset(['4321'])),
(['string', 'strang', 'strung'], frozenset(['nope'])),
({'one': 1, 'two': 'dos', 'secret': 'key'}, frozenset(['nope'])),
(
{
'one': 1,
'two': 'dos',
'three': [
'amigos', 'musketeers', None, {
'ping': 'pong', 'base': ['balls', 'raquets']
}
]
},
frozenset(['nope'])
),
(u'Toshio くら'.encode('utf-8'), frozenset([u'とみ'.encode('utf-8')])),
(u'Toshio くら', frozenset([u'とみ'])),
)
dataset_remove = (
('string', frozenset(['string']), OMIT),
(1234, frozenset(['1234']), OMIT),
(1234, frozenset(['23']), OMIT),
(1.0, frozenset(['1.0']), OMIT),
(['string', 'strang', 'strung'], frozenset(['strang']), ['string', OMIT, 'strung']),
(['string', 'strang', 'strung'], frozenset(['strang', 'string', 'strung']), [OMIT, OMIT, OMIT]),
(('string', 'strang', 'strung'), frozenset(['string', 'strung']), [OMIT, 'strang', OMIT]),
((1234567890, 345678, 987654321), frozenset(['1234567890']), [OMIT, 345678, 987654321]),
((1234567890, 345678, 987654321), frozenset(['345678']), [OMIT, OMIT, 987654321]),
({'one': 1, 'two': 'dos', 'secret': 'key'}, frozenset(['key']), {'one': 1, 'two': 'dos', 'secret': OMIT}),
({'one': 1, 'two': 'dos', 'secret': 'key'}, frozenset(['key', 'dos', '1']), {'one': OMIT, 'two': OMIT, 'secret': OMIT}),
({'one': 1, 'two': 'dos', 'secret': 'key'}, frozenset(['key', 'dos', '1']), {'one': OMIT, 'two': OMIT, 'secret': OMIT}),
(
{
'one': 1,
'two': 'dos',
'three': [
'amigos', 'musketeers', None, {
'ping': 'pong', 'base': [
'balls', 'raquets'
]
}
]
},
frozenset(['balls', 'base', 'pong', 'amigos']),
{
'one': 1,
'two': 'dos',
'three': [
OMIT, 'musketeers', None, {
'ping': OMIT,
'base': [
OMIT, 'raquets'
]
}
]
}
),
(
'This sentence has an enigma wrapped in a mystery inside of a secret. - mr mystery',
frozenset(['enigma', 'mystery', 'secret']),
'This sentence has an ******** wrapped in a ******** inside of a ********. - mr ********'
),
(u'Toshio くらとみ'.encode('utf-8'), frozenset([u'くらとみ'.encode('utf-8')]), u'Toshio ********'.encode('utf-8')),
(u'Toshio くらとみ', frozenset([u'くらとみ']), u'Toshio ********'),
)
def test_no_removal(self):
for value, no_log_strings in self.dataset_no_remove:
assert remove_values(value, no_log_strings) == value
def test_strings_to_remove(self):
for value, no_log_strings, expected in self.dataset_remove:
assert remove_values(value, no_log_strings) == expected
def test_unknown_type(self):
with pytest.raises(TypeError):
remove_values(object(), frozenset())
def test_hit_recursion_limit(self):
""" Check that we do not hit a recursion limit"""
data_list = []
inner_list = data_list
for i in range(0, 10000):
new_list = []
inner_list.append(new_list)
inner_list = new_list
inner_list.append('secret')
# Check that this does not hit a recursion limit
actual_data_list = remove_values(data_list, frozenset(('secret',)))
levels = 0
inner_list = actual_data_list
while True:
if isinstance(inner_list, list):
assert len(inner_list) == 1
else:
levels -= 1
break
inner_list = inner_list[0]
levels += 1
assert inner_list == self.OMIT
assert levels == 10000
|
TestRemoveValues
|
python
|
ray-project__ray
|
python/ray/_private/event/export_event_logger.py
|
{
"start": 1096,
"end": 3006
}
|
class ____(Enum):
"""Enum class representing different types of export event logs.
Each enum value contains a log type name and a set of supported event data types.
Attributes:
TRAIN_STATE: Export events related to training state, supporting train run and attempt events.
SUBMISSION_JOB: Export events related to job submissions.
DATASET_METADATA: Export events related to dataset metadata.
DATASET_OPERATOR_EVENT: Export events related to Ray Data operator.
"""
TRAIN_STATE = (
"EXPORT_TRAIN_STATE",
{ExportTrainRunEventData, ExportTrainRunAttemptEventData},
)
SUBMISSION_JOB = ("EXPORT_SUBMISSION_JOB", {ExportSubmissionJobEventData})
DATASET_METADATA = ("EXPORT_DATASET_METADATA", {ExportDatasetMetadata})
DATASET_OPERATOR_EVENT = (
"EXPORT_DATASET_OPERATOR_EVENT",
{ExportDatasetOperatorEventData},
)
def __init__(self, log_type_name: str, event_types: set[ExportEventDataType]):
"""Initialize an EventLogType enum value.
Args:
log_type_name: String identifier for the log type. This name is used to construct the log file name.
See `_build_export_event_file_logger` for more details.
event_types: Set of event data types that this log type supports.
"""
self.log_type_name = log_type_name
self.event_types = event_types
def supports_event_type(self, event_type: ExportEventDataType) -> bool:
"""Check if this log type supports the given event data type.
Args:
event_type: The event data type to check for support.
Returns:
bool: True if the event type is supported, False otherwise.
"""
return type(event_type) in self.event_types
def generate_event_id():
return "".join([random.choice(string.hexdigits) for _ in range(18)])
|
EventLogType
|
python
|
openai__openai-python
|
src/openai/types/responses/response_reasoning_item_param.py
|
{
"start": 520,
"end": 744
}
|
class ____(TypedDict, total=False):
text: Required[str]
"""The reasoning text from the model."""
type: Required[Literal["reasoning_text"]]
"""The type of the reasoning text. Always `reasoning_text`."""
|
Content
|
python
|
ray-project__ray
|
python/ray/_private/metrics_agent.py
|
{
"start": 1684,
"end": 5083
}
|
class ____(View):
"""Gauge representation of opencensus view.
This class is used to collect process metrics from the reporter agent.
Cpp metrics should be collected in a different way.
"""
def __init__(self, name, description, unit, tags: List[str]):
if _is_invalid_metric_name(name):
raise ValueError(
f"Invalid metric name: {name}. Metric will be discarded "
"and data will not be collected or published. "
"Metric names can only contain letters, numbers, _, and :. "
"Metric names cannot start with numbers."
)
self._measure = measure_module.MeasureInt(name, description, unit)
self._description = description
tags = [tag_key_module.TagKey(tag) for tag in tags]
self._view = View(
name, description, tags, self.measure, aggregation.LastValueAggregation()
)
@property
def measure(self):
return self._measure
@property
def view(self):
return self._view
@property
def name(self):
return self.measure.name
@property
def description(self):
return self._description
Record = namedtuple("Record", ["gauge", "value", "tags"])
def fix_grpc_metric(metric: Metric):
"""
Fix the inbound `opencensus.proto.metrics.v1.Metric` protos to make it acceptable
by opencensus.stats.DistributionAggregationData.
- metric name: gRPC OpenCensus metrics have names with slashes and dots, e.g.
`grpc.io/client/server_latency`[1]. However Prometheus metric names only take
alphanums,underscores and colons[2]. We santinize the name by replacing non-alphanum
chars to underscore, like the official opencensus prometheus exporter[3].
- distribution bucket bounds: The Metric proto asks distribution bucket bounds to
be > 0 [4]. However, gRPC OpenCensus metrics have their first bucket bound == 0 [1].
This makes the `DistributionAggregationData` constructor to raise Exceptions. This
applies to all bytes and milliseconds (latencies). The fix: we update the initial 0
bounds to be 0.000_000_1. This will not affect the precision of the metrics, since
we don't expect any less-than-1 bytes, or less-than-1-nanosecond times.
[1] https://github.com/census-instrumentation/opencensus-specs/blob/master/stats/gRPC.md#units # noqa: E501
[2] https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels
[3] https://github.com/census-instrumentation/opencensus-cpp/blob/50eb5de762e5f87e206c011a4f930adb1a1775b1/opencensus/exporters/stats/prometheus/internal/prometheus_utils.cc#L39 # noqa: E501
[4] https://github.com/census-instrumentation/opencensus-proto/blob/master/src/opencensus/proto/metrics/v1/metrics.proto#L218 # noqa: E501
"""
if not metric.metric_descriptor.name.startswith("grpc.io/"):
return
metric.metric_descriptor.name = RE_NON_ALPHANUMS.sub(
"_", metric.metric_descriptor.name
)
for series in metric.timeseries:
for point in series.points:
if point.HasField("distribution_value"):
dist_value = point.distribution_value
bucket_bounds = dist_value.bucket_options.explicit.bounds
if len(bucket_bounds) > 0 and bucket_bounds[0] == 0:
bucket_bounds[0] = 0.000_000_1
|
Gauge
|
python
|
numba__numba
|
numba/tests/test_random.py
|
{
"start": 65375,
"end": 67866
}
|
class ____(ConcurrencyBaseTest):
"""
Check the PRNG behaves well in child processes.
"""
# Avoid nested multiprocessing AssertionError
# ("daemonic processes are not allowed to have children")
_numba_parallel_test_ = False
def extract_in_processes(self, nprocs, extract_randomness):
"""
Run *nprocs* processes extracting randomness
without explicit seeding.
"""
q = multiprocessing.Queue()
results = []
def target_inner():
out = self._get_output(self._extract_iterations)
extract_randomness(seed=0, out=out)
return out
def target():
try:
out = target_inner()
q.put(out)
except Exception as e:
# Ensure an exception in a child gets reported
# in the parent.
q.put(e)
raise
if hasattr(multiprocessing, 'get_context'):
# The test works only in fork context.
mpc = multiprocessing.get_context('fork')
else:
mpc = multiprocessing
procs = [mpc.Process(target=target)
for i in range(nprocs)]
for p in procs:
p.start()
# Need to dequeue before joining, otherwise the large size of the
# enqueued objects will lead to deadlock.
for i in range(nprocs):
results.append(q.get(timeout=5))
for p in procs:
p.join()
# Exercise parent process as well; this will detect if the
# same state was reused for one of the children.
results.append(target_inner())
for res in results:
if isinstance(res, Exception):
self.fail("Exception in child: %s" % (res,))
return results
def check_implicit_initialization(self, extract_randomness):
"""
The PRNG in new processes should be implicitly initialized
with system entropy, to avoid reproducing the same sequences.
"""
results = self.extract_in_processes(2, extract_randomness)
# All processes gave a different, valid random sequence
self.check_several_outputs(results, same_expected=False)
def test_py_implicit_initialization(self):
self.check_implicit_initialization(py_extract_randomness)
def test_np_implicit_initialization(self):
self.check_implicit_initialization(np_extract_randomness)
|
TestProcesses
|
python
|
apache__airflow
|
dev/breeze/tests/test_ui_commands.py
|
{
"start": 3161,
"end": 6378
}
|
class ____:
def test_compare_keys_identical(self, tmp_path):
# Create temporary locale files
en_dir = tmp_path / "en"
en_dir.mkdir()
de_dir = tmp_path / "de"
de_dir.mkdir()
test_data = {"greeting": "Hello", "farewell": "Goodbye"}
(en_dir / "test.json").write_text(json.dumps(test_data))
(de_dir / "test.json").write_text(json.dumps(test_data))
# Mock LOCALES_DIR temporarily
import airflow_breeze.commands.ui_commands as ui_commands
original_locales_dir = ui_commands.LOCALES_DIR
ui_commands.LOCALES_DIR = tmp_path
try:
locale_files = [
LocaleFiles(locale="en", files=["test.json"]),
LocaleFiles(locale="de", files=["test.json"]),
]
summary, missing_counts = compare_keys(locale_files)
assert "test.json" in summary
assert summary["test.json"].missing_keys.get("de", []) == []
assert summary["test.json"].extra_keys.get("de", []) == []
finally:
ui_commands.LOCALES_DIR = original_locales_dir
def test_compare_keys_with_missing(self, tmp_path):
en_dir = tmp_path / "en"
en_dir.mkdir()
de_dir = tmp_path / "de"
de_dir.mkdir()
en_data = {"greeting": "Hello", "farewell": "Goodbye"}
de_data = {"greeting": "Hallo"}
(en_dir / "test.json").write_text(json.dumps(en_data))
(de_dir / "test.json").write_text(json.dumps(de_data))
import airflow_breeze.commands.ui_commands as ui_commands
original_locales_dir = ui_commands.LOCALES_DIR
ui_commands.LOCALES_DIR = tmp_path
try:
locale_files = [
LocaleFiles(locale="en", files=["test.json"]),
LocaleFiles(locale="de", files=["test.json"]),
]
summary, missing_counts = compare_keys(locale_files)
assert "test.json" in summary
assert "farewell" in summary["test.json"].missing_keys.get("de", [])
assert missing_counts["test.json"]["de"] == 1
finally:
ui_commands.LOCALES_DIR = original_locales_dir
def test_compare_keys_with_extra(self, tmp_path):
en_dir = tmp_path / "en"
en_dir.mkdir()
de_dir = tmp_path / "de"
de_dir.mkdir()
en_data = {"greeting": "Hello"}
de_data = {"greeting": "Hallo", "extra": "Extra"}
(en_dir / "test.json").write_text(json.dumps(en_data))
(de_dir / "test.json").write_text(json.dumps(de_data))
import airflow_breeze.commands.ui_commands as ui_commands
original_locales_dir = ui_commands.LOCALES_DIR
ui_commands.LOCALES_DIR = tmp_path
try:
locale_files = [
LocaleFiles(locale="en", files=["test.json"]),
LocaleFiles(locale="de", files=["test.json"]),
]
summary, missing_counts = compare_keys(locale_files)
assert "test.json" in summary
assert "extra" in summary["test.json"].extra_keys.get("de", [])
finally:
ui_commands.LOCALES_DIR = original_locales_dir
|
TestCompareKeys
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/constraints.py
|
{
"start": 5583,
"end": 7620
}
|
class ____(Constraint):
"""MinMaxNorm weight constraint.
Constrains the weights incident to each hidden unit
to have the norm between a lower bound and an upper bound.
Also available via the shortcut function `tf.keras.constraints.min_max_norm`.
Args:
min_value: the minimum norm for the incoming weights.
max_value: the maximum norm for the incoming weights.
rate: rate for enforcing the constraint: weights will be
rescaled to yield
`(1 - rate) * norm + rate * norm.clip(min_value, max_value)`.
Effectively, this means that rate=1.0 stands for strict
enforcement of the constraint, while rate<1.0 means that
weights will be rescaled at each step to slowly move
towards a value inside the desired interval.
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, min_value=0.0, max_value=1.0, rate=1.0, axis=0):
self.min_value = min_value
self.max_value = max_value
self.rate = rate
self.axis = axis
@doc_controls.do_not_generate_docs
def __call__(self, w):
norms = backend.sqrt(
math_ops.reduce_sum(math_ops.square(w), axis=self.axis, keepdims=True))
desired = (
self.rate * backend.clip(norms, self.min_value, self.max_value) +
(1 - self.rate) * norms)
return w * (desired / (backend.epsilon() + norms))
@doc_controls.do_not_generate_docs
def get_config(self):
return {
'min_value': self.min_value,
'max_value': self.max_value,
'rate': self.rate,
'axis': self.axis
}
|
MinMaxNorm
|
python
|
modin-project__modin
|
modin/tests/pandas/native_df_interoperability/test_compiler_caster.py
|
{
"start": 7264,
"end": 7877
}
|
class ____(CalculatorTestQc):
"Represents a query compiler which knows a lot, and wants to avoid work"
def get_backend(self):
return "Lazy"
# encorage other engines to take my workload
def move_to_cost(self, other_qc_cls, api_cls_name, op, arguments):
return QCCoercionCost.COST_ZERO
# try to keep other workloads from getting my workload
@classmethod
def move_to_me_cost(cls, other_qc, api_cls_name, operation, arguments):
if isinstance(other_qc, cls):
return QCCoercionCost.COST_ZERO
return QCCoercionCost.COST_IMPOSSIBLE
|
OmniscientLazyQC
|
python
|
pytorch__pytorch
|
torch/nn/modules/fold.py
|
{
"start": 164,
"end": 6671
}
|
class ____(Module):
(
r"""Combines an array of sliding local blocks into a large containing tensor.
Consider a batched :attr:`input` tensor containing sliding local blocks,
e.g., patches of images, of shape :math:`(N, C \times \prod(\text{kernel\_size}), L)`,
where :math:`N` is batch dimension, :math:`C \times \prod(\text{kernel\_size})`
is the number of values within a block (a block has :math:`\prod(\text{kernel\_size})`
spatial locations each containing a :math:`C`-channeled vector), and
:math:`L` is the total number of blocks. (This is exactly the
same specification as the output shape of :class:`~torch.nn.Unfold`.) This
operation combines these local blocks into the large :attr:`output` tensor
of shape :math:`(N, C, \text{output\_size}[0], \text{output\_size}[1], \dots)`
by summing the overlapping values. Similar to :class:`~torch.nn.Unfold`, the
arguments must satisfy
.. math::
L = \prod_d \left\lfloor\frac{\text{output\_size}[d] + 2 \times \text{padding}[d] %
- \text{dilation}[d] \times (\text{kernel\_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor,
where :math:`d` is over all spatial dimensions.
* :attr:`output_size` describes the spatial shape of the large containing
tensor of the sliding local blocks. It is useful to resolve the ambiguity
when multiple input shapes map to same number of sliding blocks, e.g.,
with ``stride > 0``.
The :attr:`padding`, :attr:`stride` and :attr:`dilation` arguments specify
how the sliding blocks are retrieved.
* :attr:`stride` controls the stride for the sliding blocks.
* :attr:`padding` controls the amount of implicit zero-paddings on both
sides for :attr:`padding` number of points for each dimension before
reshaping.
"""
"""
* :attr:`dilation` controls the spacing between the kernel points; also known as the \u00e0 trous algorithm.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
"""
r"""
Args:
output_size (int or tuple): the shape of the spatial dimensions of the
output (i.e., ``output.sizes()[2:]``)
kernel_size (int or tuple): the size of the sliding blocks
dilation (int or tuple, optional): a parameter that controls the
stride of elements within the
neighborhood. Default: 1
padding (int or tuple, optional): implicit zero padding to be added on
both sides of input. Default: 0
stride (int or tuple): the stride of the sliding blocks in the input
spatial dimensions. Default: 1
* If :attr:`output_size`, :attr:`kernel_size`, :attr:`dilation`,
:attr:`padding` or :attr:`stride` is an int or a tuple of length 1 then
their values will be replicated across all spatial dimensions.
* For the case of two output spatial dimensions this operation is sometimes
called ``col2im``.
.. note::
:class:`~torch.nn.Fold` calculates each combined value in the resulting
large tensor by summing all values from all containing blocks.
:class:`~torch.nn.Unfold` extracts the values in the local blocks by
copying from the large tensor. So, if the blocks overlap, they are not
inverses of each other.
In general, folding and unfolding operations are related as
follows. Consider :class:`~torch.nn.Fold` and
:class:`~torch.nn.Unfold` instances created with the same
parameters:
>>> fold_params = dict(kernel_size=..., dilation=..., padding=..., stride=...)
>>> fold = nn.Fold(output_size=..., **fold_params)
>>> unfold = nn.Unfold(**fold_params)
Then for any (supported) ``input`` tensor the following
equality holds:
::
fold(unfold(input)) == divisor * input
where ``divisor`` is a tensor that depends only on the shape
and dtype of the ``input``:
>>> # xdoctest: +SKIP
>>> input_ones = torch.ones(input.shape, dtype=input.dtype)
>>> divisor = fold(unfold(input_ones))
When the ``divisor`` tensor contains no zero elements, then
``fold`` and ``unfold`` operations are inverses of each
other (up to constant divisor).
.. warning::
Currently, only unbatched (3D) or batched (4D) image-like output tensors are supported.
Shape:
- Input: :math:`(N, C \times \prod(\text{kernel\_size}), L)` or :math:`(C \times \prod(\text{kernel\_size}), L)`
- Output: :math:`(N, C, \text{output\_size}[0], \text{output\_size}[1], \dots)`
or :math:`(C, \text{output\_size}[0], \text{output\_size}[1], \dots)` as described above
Examples::
>>> fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 2))
>>> input = torch.randn(1, 3 * 2 * 2, 12)
>>> output = fold(input)
>>> output.size()
torch.Size([1, 3, 4, 5])
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
)
__constants__ = ["output_size", "kernel_size", "dilation", "padding", "stride"]
output_size: _size_any_t
kernel_size: _size_any_t
dilation: _size_any_t
padding: _size_any_t
stride: _size_any_t
def __init__(
self,
output_size: _size_any_t,
kernel_size: _size_any_t,
dilation: _size_any_t = 1,
padding: _size_any_t = 0,
stride: _size_any_t = 1,
) -> None:
super().__init__()
self.output_size = output_size
self.kernel_size = kernel_size
self.dilation = dilation
self.padding = padding
self.stride = stride
def forward(self, input: Tensor) -> Tensor:
"""
Runs the forward pass.
"""
return F.fold(
input,
self.output_size,
self.kernel_size,
self.dilation,
self.padding,
self.stride,
)
def extra_repr(self) -> str:
"""
Return the extra representation of the module.
"""
return (
"output_size={output_size}, kernel_size={kernel_size}, "
"dilation={dilation}, padding={padding}, stride={stride}".format(
**self.__dict__
)
)
|
Fold
|
python
|
psf__black
|
tests/data/cases/docstring_no_string_normalization.py
|
{
"start": 37,
"end": 1814
}
|
class ____:
'''
A multiline class docstring.
'''
def AnEquallyLonelyMethod(self):
'''
A multiline method docstring'''
pass
def one_function():
'''This is a docstring with a single line of text.'''
pass
def shockingly_the_quotes_are_normalized():
'''This is a multiline docstring.
This is a multiline docstring.
This is a multiline docstring.
'''
pass
def foo():
"""This is a docstring with
some lines of text here
"""
return
def baz():
'''"This" is a string with some
embedded "quotes"'''
return
def poit():
"""
Lorem ipsum dolor sit amet.
Consectetur adipiscing elit:
- sed do eiusmod tempor incididunt ut labore
- dolore magna aliqua
- enim ad minim veniam
- quis nostrud exercitation ullamco laboris nisi
- aliquip ex ea commodo consequat
"""
pass
def under_indent():
"""
These lines are indented in a way that does not
make sense.
"""
pass
def over_indent():
"""
This has a shallow indent
- But some lines are deeper
- And the closing quote is too deep
"""
pass
def single_line():
"""But with a newline after it!
"""
pass
def this():
r"""
'hey ho'
"""
def that():
""" "hey yah" """
def and_that():
"""
"hey yah" """
def and_this():
'''
"hey yah"'''
def believe_it_or_not_this_is_in_the_py_stdlib(): '''
"hey yah"'''
def shockingly_the_quotes_are_normalized_v2():
'''
Docstring Docstring Docstring
'''
pass
def backslash_space():
'\ '
def multiline_backslash_1():
'''
hey\there\
\ '''
def multiline_backslash_2():
'''
hey there \ '''
def multiline_backslash_3():
'''
already escaped \\ '''
# output
|
ALonelyClass
|
python
|
google__jax
|
tests/pallas/tpu_pallas_test.py
|
{
"start": 2547,
"end": 2895
}
|
class ____(jtu.JaxTestCase):
INTERPRET: bool = False
def setUp(self):
if not jtu.test_device_matches(['tpu']) and not self.INTERPRET:
self.skipTest('Test requires TPUs, or interpret mode')
super().setUp()
def pallas_call(self, *args, **kwargs):
return pl.pallas_call(*args, **kwargs, interpret=self.INTERPRET)
|
PallasBaseTest
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/runtime/state/test_presentation.py
|
{
"start": 1163,
"end": 12636
}
|
class ____:
def __init__(self) -> None:
self._new_widget_state = _FakeWStates()
def _get_widget_metadata(self, widget_id: str) -> WidgetMetadata[Any] | None:
return self._new_widget_state.widget_metadata.get(widget_id)
def test_apply_presenter_returns_base_when_no_meta() -> None:
"""Return base value unchanged when widget metadata is missing."""
ss = _FakeSession()
base = {"value": 1}
out = apply_presenter(ss, "wid", base)
assert out is base
def test_apply_presenter_returns_base_when_no_presenter() -> None:
"""Return base value unchanged when metadata has no presenter."""
ss = _FakeSession()
ss._new_widget_state.widget_metadata["wid"] = SimpleNamespace()
base = [1, 2, 3]
out = apply_presenter(ss, "wid", base)
assert out is base
def test_apply_presenter_applies_presenter() -> None:
"""Apply the registered presenter to the base value."""
def _presenter(base: Any, _ss: Any) -> Any:
return {"presented": base}
ss = _FakeSession()
ss._new_widget_state.widget_metadata["wid"] = SimpleNamespace(presenter=_presenter)
base = {"value": 123}
out = apply_presenter(ss, "wid", base)
assert out == {"presented": {"value": 123}}
def test_apply_presenter_swallows_presenter_errors() -> None:
"""Return base value unchanged if presenter raises an exception."""
def _boom(_base: Any, _ss: Any) -> Any:
raise RuntimeError("boom")
ss = _FakeSession()
ss._new_widget_state.widget_metadata["wid"] = SimpleNamespace(presenter=_boom)
base = "hello"
out = apply_presenter(ss, "wid", base)
assert out is base
def test_presenter_applied_once_via_getitem_and_filtered_state() -> None:
"""Presenter must be applied exactly once for both __getitem__ and filtered_state.
We simulate a widget with a user key mapping and attach a presenter that wraps
the base value in a dict. Double application would produce nested wrapping.
"""
ss = SessionState()
# Simulate a widget with element id and user key mapping
widget_id = "$$ID-abc-ukey"
user_key = "ukey"
# Register metadata with a no-op deserializer/serializer for a simple string
meta = WidgetMetadata[str](
id=widget_id,
deserializer=lambda v: v,
serializer=lambda v: v,
value_type="string_value",
)
ss._set_widget_metadata(meta)
ss._set_key_widget_mapping(widget_id, user_key)
# Set the underlying widget value in new widget state
ss._new_widget_state.set_from_value(widget_id, "base")
# Install a presenter that wraps once
def _wrap_once(base: Any, _ss: Any) -> Any:
return {"presented": base}
# Attach presenter to metadata store
ss._new_widget_state.widget_metadata[widget_id] = SimpleNamespace(
id=widget_id,
deserializer=meta.deserializer,
serializer=meta.serializer,
value_type=meta.value_type,
presenter=_wrap_once,
)
# Access via __getitem__ using the widget id; should apply once
got = ss[widget_id]
assert got == {"presented": "base"}
# Access via filtered_state using the user key; should apply once
filtered = ss.filtered_state
assert filtered[user_key] == {"presented": "base"}
def test_get_widget_states_uses_base_value_not_presented() -> None:
"""Serialized widget states must contain base (unpresented) values.
This ensures presentation is only applied for user-facing access via
`st.session_state[...]` and `filtered_state`, while serialization stays
lossless and stable.
"""
ss = SessionState()
# Create widget metadata for a simple string and register mapping.
widget_id = "$$ID-abc-ukey"
user_key = "ukey"
meta = WidgetMetadata[str](
id=widget_id,
deserializer=lambda v: v,
serializer=lambda v: v,
value_type="string_value",
)
ss._set_widget_metadata(meta)
ss._set_key_widget_mapping(widget_id, user_key)
# Underlying base value stored in widget state.
base_value = "raw"
ss._new_widget_state.set_from_value(widget_id, base_value)
# Presenter that would wrap the base value if applied.
def _wrap(base: Any, _ss: Any) -> Any:
return {"presented": base}
# Attach presenter on metadata store (simulating element registration that
# enriches the metadata entry).
ss._new_widget_state.widget_metadata[widget_id] = SimpleNamespace(
id=widget_id,
deserializer=meta.deserializer,
serializer=meta.serializer,
value_type=meta.value_type,
presenter=_wrap,
)
# Verify that user-facing access applies presentation.
assert ss[widget_id] == {"presented": base_value}
# Now get serialized widget states; these should contain the base value
# via the `string_value` field and not the presented wrapper.
states = ss.get_widget_states()
assert len(states) == 1
st = states[0]
# Ensure we serialized as string_value and not JSON or other wrappers
assert st.WhichOneof("value") == "string_value"
assert st.string_value == base_value
def test_filtered_state_includes_keyed_element_when_not_internal() -> None:
"""filtered_state includes user key for keyed element ids when not internal."""
ss = SessionState()
widget_id = "$$ID-abc-ukey"
user_key = "ukey"
# Minimal metadata and value
meta = WidgetMetadata[str](
id=widget_id,
deserializer=lambda v: v,
serializer=lambda v: v,
value_type="string_value",
)
ss._set_widget_metadata(meta)
ss._new_widget_state.set_from_value(widget_id, "base")
ss._set_key_widget_mapping(widget_id, user_key)
filtered = ss.filtered_state
assert user_key in filtered
assert filtered[user_key] == "base"
def test_filtered_state_excludes_keyed_element_when_internal(monkeypatch) -> None:
"""filtered_state excludes entries when _is_internal_key(k) is True for the id."""
import streamlit.runtime.state.session_state as ss_mod
ss = SessionState()
widget_id = "$$ID-internal-ukey"
user_key = "ukey"
meta = WidgetMetadata[str](
id=widget_id,
deserializer=lambda v: v,
serializer=lambda v: v,
value_type="string_value",
)
ss._set_widget_metadata(meta)
ss._new_widget_state.set_from_value(widget_id, "base")
ss._set_key_widget_mapping(widget_id, user_key)
# Patch _is_internal_key to treat this widget_id as internal
original = ss_mod._is_internal_key
monkeypatch.setattr(
ss_mod,
"_is_internal_key",
lambda k: True if k == widget_id else original(k),
raising=True,
)
filtered = ss.filtered_state
assert user_key not in filtered
def test_session_state_merges_ccv2_trigger_values_via_presenter() -> None:
"""Integration: SessionState uses presenter to merge CCv2 trigger values.
We simulate a CCv2 component with a persistent state widget and an internal
trigger aggregator. The component registers a presenter via the shared
facade. We then assert that SessionState.filtered_state (user-facing view)
returns the persistent state merged with the latest trigger values, while
the underlying stored state remains unmodified.
"""
session_state = SessionState()
# Simulate a component persistent state widget with user key mapping
component_id = "$$ID-bidi_component-my_component"
user_key = "my_component"
session_state._key_id_mapper[user_key] = component_id
# Store base persistent state as flat mapping
base_persistent = {"alpha": 1}
session_state._new_widget_state.states[component_id] = Value(base_persistent)
session_state._new_widget_state.widget_metadata[component_id] = WidgetMetadata(
id=component_id,
deserializer=lambda x: x,
serializer=lambda x: x,
value_type="json_value",
)
# Create trigger aggregator and payloads
aggregator_id = _make_trigger_id(component_id, "events")
session_state._new_widget_state.widget_metadata[aggregator_id] = WidgetMetadata(
id=aggregator_id,
deserializer=lambda x: x,
serializer=lambda x: x,
value_type="json_trigger_value",
)
session_state._new_widget_state.states[aggregator_id] = Value(
[
{"event": "foo", "value": True},
{"event": "bar", "value": 123},
]
)
# Attach presenter (what bidi_component.py does during registration)
presenter = make_bidi_component_presenter(aggregator_id)
meta = session_state._new_widget_state.widget_metadata[component_id]
object.__setattr__(meta, "presenter", presenter)
# User-visible filtered state should show merged view
merged = session_state.filtered_state[user_key]
assert dict(merged) == {"alpha": 1, "foo": True, "bar": 123}
# Underlying stored state remains unmodified
assert session_state._new_widget_state.states[component_id].value is base_persistent
def test_session_state_presenter_errors_degrade_gracefully() -> None:
"""Integration: presenter exceptions should not break SessionState access.
If a presenter raises, SessionState should fall back to the base value
without propagating exceptions to the caller.
"""
session_state = SessionState()
component_id = "$$ID-bidi_component-err_component"
user_key = "err_component"
session_state._key_id_mapper[user_key] = component_id
base_persistent: dict[str, Any] = {"value": {"x": 1}}
session_state._new_widget_state.states[component_id] = Value(base_persistent)
meta = WidgetMetadata(
id=component_id,
deserializer=lambda x: x,
serializer=lambda x: x,
value_type="json_value",
)
object.__setattr__(
meta, "presenter", lambda _b, _s: exec('raise RuntimeError("boom")')
)
session_state._new_widget_state.widget_metadata[component_id] = meta
# Access should not raise; should return base value instead
assert session_state.filtered_state[user_key] == base_persistent
def test_bidi_presenter_state_overrides_duplicate_keys() -> None:
"""State must override trigger values on duplicate keys.
This verifies the merge precedence documented in the presenter and in
BidiComponentResult: triggers are surfaced first, but persistent state
wins for duplicate keys.
"""
class _FakeWStates2:
def __init__(self) -> None:
self.widget_metadata: dict[str, Any] = {}
self._payloads: dict[str, Any] = {}
def __getitem__(self, k: str) -> Any: # emulate __getitem__ for payloads
if k not in self._payloads:
raise KeyError(k)
return self._payloads[k]
class _FakeSession2:
def __init__(self) -> None:
self._new_widget_state = _FakeWStates2()
ss = _FakeSession2()
agg_id = "$$__internal__events"
presenter = make_bidi_component_presenter(agg_id)
ss._new_widget_state.widget_metadata[agg_id] = SimpleNamespace(
value_type="json_trigger_value"
)
ss._new_widget_state._payloads[agg_id] = [
{"event": "shared", "value": "trigger"},
{"event": "only_trigger", "value": 1},
]
base = {"shared": "state", "only_state": 2}
out = presenter(base, ss)
assert dict(out) == {
"shared": "state",
"only_trigger": 1,
"only_state": 2,
}
|
_FakeSession
|
python
|
gevent__gevent
|
src/greentest/3.10/test_socket.py
|
{
"start": 77572,
"end": 80783
}
|
class ____(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.')
|
CANTest
|
python
|
pypa__pipenv
|
pipenv/vendor/click/exceptions.py
|
{
"start": 2523,
"end": 4137
}
|
class ____(UsageError):
"""An exception that formats out a standardized error message for a
bad parameter. This is useful when thrown from a callback or type as
Click will attach contextual information to it (for instance, which
parameter it is).
.. versionadded:: 2.0
:param param: the parameter object that caused this error. This can
be left out, and Click will attach this info itself
if possible.
:param param_hint: a string that shows up as parameter name. This
can be used as alternative to `param` in cases
where custom validation should happen. If it is
a string it's used as such, if it's a list then
each item is quoted and separated.
"""
def __init__(
self,
message: str,
ctx: t.Optional["Context"] = None,
param: t.Optional["Parameter"] = None,
param_hint: t.Optional[str] = None,
) -> None:
super().__init__(message, ctx)
self.param = param
self.param_hint = param_hint
def format_message(self) -> str:
if self.param_hint is not None:
param_hint = self.param_hint
elif self.param is not None:
param_hint = self.param.get_error_hint(self.ctx) # type: ignore
else:
return _("Invalid value: {message}").format(message=self.message)
return _("Invalid value for {param_hint}: {message}").format(
param_hint=_join_param_hints(param_hint), message=self.message
)
|
BadParameter
|
python
|
pydantic__pydantic
|
pydantic-core/python/pydantic_core/core_schema.py
|
{
"start": 127891,
"end": 129463
}
|
class ____(TypedDict, total=False):
name: Required[str]
schema: Required[CoreSchema]
mode: Literal[
'positional_only',
'positional_or_keyword',
'keyword_only',
'var_args',
'var_kwargs_uniform',
'var_kwargs_unpacked_typed_dict',
] # default positional_or_keyword
alias: Union[str, list[Union[str, int]], list[list[Union[str, int]]]]
def arguments_v3_parameter(
name: str,
schema: CoreSchema,
*,
mode: Literal[
'positional_only',
'positional_or_keyword',
'keyword_only',
'var_args',
'var_kwargs_uniform',
'var_kwargs_unpacked_typed_dict',
]
| None = None,
alias: str | list[str | int] | list[list[str | int]] | None = None,
) -> ArgumentsV3Parameter:
"""
Returns a schema that matches an argument parameter, e.g.:
```py
from pydantic_core import SchemaValidator, core_schema
param = core_schema.arguments_v3_parameter(
name='a', schema=core_schema.str_schema(), mode='positional_only'
)
schema = core_schema.arguments_v3_schema([param])
v = SchemaValidator(schema)
assert v.validate_python({'a': 'hello'}) == (('hello',), {})
```
Args:
name: The name to use for the argument parameter
schema: The schema to use for the argument parameter
mode: The mode to use for the argument parameter
alias: The alias to use for the argument parameter
"""
return _dict_not_none(name=name, schema=schema, mode=mode, alias=alias)
|
ArgumentsV3Parameter
|
python
|
django__django
|
django/contrib/auth/forms.py
|
{
"start": 1944,
"end": 2701
}
|
class ____(forms.CharField):
def to_python(self, value):
value = super().to_python(value)
if self.max_length is not None and len(value) > self.max_length:
# Normalization can increase the string length (e.g.
# "ff" -> "ff", "½" -> "1⁄2") but cannot reduce it, so there is no
# point in normalizing invalid data. Moreover, Unicode
# normalization is very slow on Windows and can be a DoS attack
# vector.
return value
return unicodedata.normalize("NFKC", value)
def widget_attrs(self, widget):
return {
**super().widget_attrs(widget),
"autocapitalize": "none",
"autocomplete": "username",
}
|
UsernameField
|
python
|
great-expectations__great_expectations
|
great_expectations/datasource/fluent/alloy_datasource.py
|
{
"start": 358,
"end": 984
}
|
class ____(SQLDatasource):
"""Adds an alloy datasource to the data context.
Args:
name: The name of this alloy datasource.
connection_string: The connection string used to connect to the postgres database.
For example: "postgresql+psycopg2://<username>:<password>@<hostname>:<port>/<database_name>"
assets: An optional dictionary whose keys are TableAsset or QueryAsset names and whose
values are TableAsset or QueryAsset objects.
"""
type: Literal["alloy"] = "alloy" # type: ignore[assignment]
connection_string: Union[ConfigStr, PostgresDsn]
|
AlloyDatasource
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/rolling.py
|
{
"start": 10461,
"end": 10777
}
|
class ____:
params = ["var", "std", "cov", "corr"]
param_names = ["method"]
def setup(self, method):
df = pd.DataFrame({"A": range(50), "B": range(50)})
self.gb_ewm = df.groupby("A").ewm(com=1.0)
def time_groupby_method(self, method):
getattr(self.gb_ewm, method)()
|
GroupbyEWM
|
python
|
ipython__ipython
|
IPython/lib/demo.py
|
{
"start": 21775,
"end": 22608
}
|
class ____:
"""Use this mixin to make Demo classes with less visual clutter.
Demos using this mixin will clear the screen before every block and use
blank marquees.
Note that in order for the methods defined here to actually override those
of the classes it's mixed with, it must go /first/ in the inheritance
tree. For example:
class ClearIPDemo(ClearMixin,IPythonDemo): pass
will provide an IPythonDemo class with the mixin's features.
"""
def marquee(self,txt='',width=78,mark='*'):
"""Blank marquee that returns '' no matter what the input."""
return ''
def pre_cmd(self):
"""Method called before executing each block.
This one simply clears the screen."""
from IPython.utils.terminal import _term_clear
_term_clear()
|
ClearMixin
|
python
|
google__jax
|
jax/experimental/jax2tf/tests/call_tf_test.py
|
{
"start": 30868,
"end": 40538
}
|
class ____(tf_test_util.JaxToTfTestCase):
"""Reloading output of jax2tf into JAX with call_tf."""
def setUp(self):
if tf is None:
raise unittest.SkipTest("Test requires tensorflow")
# TODO(b/171320191): this line works around a missing context initialization
# bug in TensorFlow.
_ = tf.add(1, 1)
super().setUp()
def test_simple(self):
f_jax = jnp.sin
f_jax_rt = jax2tf.call_tf(jax2tf.convert(f_jax))
x = np.float32(0.7)
self.assertAllClose(f_jax(x), f_jax_rt(x))
def test_pytree(self):
def f_jax(x): # x: dict(a=f32, b=f32)
return dict(a=x["a"]+1., b=x)
x = dict(a=0.7, b=0.8)
f_jax_rt = jax2tf.call_tf(jax2tf.convert(f_jax))
self.assertAllClose(f_jax(x), f_jax_rt(x))
def test_custom_grad(self):
@jax.custom_vjp
def f(x):
return x * x
# f_fwd: a -> (b, residual)
def f_fwd(x):
return f(x), np.float32(3.) * x
# f_bwd: (residual, CT b) -> [CT a]
def f_bwd(residual, ct_b):
return residual * ct_b,
f.defvjp(f_fwd, f_bwd)
f_rt = jax2tf.call_tf(jax2tf.convert(f, with_gradient=True))
x = np.float32(0.7)
self.assertAllClose(f(x), f_rt(x))
self.assertAllClose(jax.grad(f)(x), jax.grad(f_rt)(x))
def test_shape_poly(self):
f_jax = jnp.sin
f_jax_rt = jax2tf.call_tf(jax2tf.convert(f_jax,
polymorphic_shapes=["(b, ...)"]))
x = np.array([0.7, 0.8], dtype=np.float32)
self.assertAllClose(f_jax(x), f_jax_rt(x))
def test_saved_model_simple(self):
x = np.array([0.7, 0.8], dtype=np.float32)
def f_jax(x):
return jnp.sin(x)
f_tf = jax2tf.convert(f_jax)
restored_tf, _ = tf_test_util.SaveAndLoadFunction(f_tf, input_args=[x])
restored_jax = jax2tf.call_tf(restored_tf)
self.assertAllClose(f_jax(x), restored_jax(x))
def test_saved_model_variables(self):
param = np.array([1., 2.], dtype=np.float32)
x = np.array([0.7, 0.8], dtype=np.float32)
def f_jax(param, x):
return jnp.sin(x) + jnp.cos(param)
param_v = tf.Variable(param)
f_tf = jax2tf.convert(f_jax)
_, restored_model = tf_test_util.SaveAndLoadFunction(
lambda x: f_tf(param_v, x),
input_args=[x],
variables=[param_v])
restored_jax = jax2tf.call_tf(restored_model.f)
self.assertAllClose(f_jax(param, x), restored_jax(x))
self.assertAllClose(f_jax(param, x), jax.jit(restored_jax)(x))
self.assertAllClose(f_jax(param, x), jax2tf.convert(restored_jax)(x))
self.assertAllClose(f_jax(param, x),
tf.function(jax2tf.convert(restored_jax),
autograph=False)(x))
self.assertAllClose(f_jax(param, x),
tf.function(jax2tf.convert(restored_jax),
autograph=True)(x))
def test_saved_model_shape_poly(self):
tracing_count = 0
x = np.array([0.7, 0.8], dtype=np.float32)
def f_jax(x):
nonlocal tracing_count
tracing_count += 1
return jnp.sin(x)
f_tf = jax2tf.convert(f_jax, polymorphic_shapes=["(b, ...)"])
res_jax = f_jax(x)
self.assertEqual(1, tracing_count)
# Will trace twice, it seems. Once to get the result signature, and once again
# for the actual saving.
restored_f, _ = tf_test_util.SaveAndLoadFunction(
f_tf, input_signature=[tf.TensorSpec([None], x.dtype)])
self.assertGreaterEqual(tracing_count, 2)
tracing_count = 0
f_jax_rt = jax2tf.call_tf(restored_f)
self.assertAllClose(res_jax, f_jax_rt(x))
# Ensure that restored_f works at other batch size as well
y = np.concatenate([x, x])
self.assertEqual(0, tracing_count)
res_jax_y = f_jax(y)
self.assertEqual(1, tracing_count)
# No more tracing for f_jax_rt
self.assertAllClose(res_jax_y, f_jax_rt(y))
self.assertEqual(1, tracing_count)
def test_custom_grad_saved_model(self):
@jax.custom_vjp
def f(x):
return x * x
# f_fwd: a -> (b, residual)
def f_fwd(x):
return f(x), np.float32(3.) * x
# f_bwd: (residual, CT b) -> [CT a]
def f_bwd(residual, ct_b):
return residual * ct_b,
f.defvjp(f_fwd, f_bwd)
def g(x):
return jnp.sum(f(x))
g_tf, _ = tf_test_util.SaveAndLoadFunction(
jax2tf.convert(g, with_gradient=True),
input_signature=[tf.TensorSpec(shape=(1,), dtype=tf.float32)],
)
g_rt = jax2tf.call_tf(g_tf)
x = np.array([0.7], dtype=np.float32)
self.assertAllClose(g(x), g_rt(x))
self.assertAllClose(jax.grad(g)(x), jax.grad(g_rt)(x))
def test_without_gradient_saved_model(self):
# Explicitly with_gradient=False
f_jax = jnp.sum
x = np.array([0.7, 0.8], dtype=np.float32)
f_tf, _ = tf_test_util.SaveAndLoadFunction(
jax2tf.convert(f_jax, with_gradient=False),
input_args=[x])
f_rt = jax2tf.call_tf(f_tf)
self.assertAllClose(f_jax(x), f_rt(x))
with self.assertRaisesRegex(Exception,
"Gradient explicitly disabled.*jax2tf-converted function does not support gradients. Use `with_gradient` parameter to enable gradients"):
jax.grad(f_rt)(x)
def test_saved_model_no_gradients(self):
# Save without gradients
f_jax = jnp.sum
x = np.array([0.7, 0.8], dtype=np.float32)
f_tf, _ = tf_test_util.SaveAndLoadFunction(
jax2tf.convert(f_jax, with_gradient=True), input_args=[x],
save_gradients=False)
f_rt = jax2tf.call_tf(f_tf)
self.assertAllClose(f_jax(x), f_rt(x))
# TODO: clean this up b/191117111: it should fail with a clear error
# The following results in a confusing error:
# TypeError: tf.Graph captured an external symbolic tensor.
with self.assertRaises(TypeError):
_ = jax.grad(f_rt)(x)
def test_call_tf_under_function_context(self):
def fun_jax(x, y):
z = jax2tf.call_tf(tf.math.sin)(x) + jnp.cos(y)
return z
x = np.array([-1.0, 0.0, 1.0], dtype=np.float32)
y = np.array([-0.5, 0.0, 0.5], dtype=np.float32)
converted_fun = tf.function(jax2tf.convert(fun_jax))
expected = np.sin(x) + np.cos(y)
res = tf.function(converted_fun, jit_compile=True, autograph=False)(x, y)
self.assertAllClose(expected, res.numpy(), atol=1e-5, rtol=1e-5)
@parameterized.named_parameters(
dict(
testcase_name=f"_{dtype.__name__}",
dtype=dtype,
)
for dtype in set(jtu.dtypes.all_floating)
)
def test_all_floating_input_gradient(self, dtype):
def tf_f(x):
res = tf.math.sin(x)
return tf.reduce_sum(res)
jax_f = jax2tf.call_tf(tf_f)
tf_f_rt = jax2tf.convert(jax_f)
x = jnp.array([5.0, 6.0, 7.0]).astype(dtype)
def assert_all_close_support_bfloat16(baseline, candidate):
def conversion(x):
# convert scalar to array and bfloat16 to float32
# to support self.assertAllClose numpy array comparison.
if x.shape == tf.TensorShape([]):
x = tf.convert_to_tensor([x])
if dtype == jnp.float16:
x = tf.cast(x, tf.float32)
return x
baseline = jax.tree_util.tree_map(conversion, baseline)
candidate = jax.tree_util.tree_map(conversion, candidate)
tol = (
1e-2
if jtu.test_device_matches(["tpu"]) and dtype == np.float16
else None
)
self.assertAllClose(baseline, candidate, atol=tol, rtol=tol)
# Eager mode
assert_all_close_support_bfloat16(tf_f(x), tf_f_rt(x))
# Compiled function mode
assert_all_close_support_bfloat16(
tf.function(tf_f)(x), tf.function(tf_f_rt)(x)
)
# Compiled function mode with jit_compiled=True
assert_all_close_support_bfloat16(
tf.function(tf_f, jit_compile=True)(x),
tf.function(tf_f_rt, jit_compile=True)(x),
)
# RoundTrip test for the gradient
grad_fun_jax = jax.grad(jax2tf.call_tf(tf_f))
grad_fun_jax_rt = jax2tf.call_tf(jax2tf.convert(grad_fun_jax))
# Eager mode
assert_all_close_support_bfloat16(grad_fun_jax(x), grad_fun_jax_rt(x))
# Jit mode
assert_all_close_support_bfloat16(
jax.jit(grad_fun_jax)(x), jax.jit(grad_fun_jax_rt)(x)
)
@parameterized.named_parameters(
dict(
testcase_name=f"_{dtype.__name__}",
dtype=dtype,
)
for dtype in set(jtu.dtypes.complex)
)
def test_complex_input_gradient(self, dtype):
def tf_f(x):
res = tf.math.sin(x)
return tf.reduce_sum(res)
x = jnp.array([(5.0 + 4.0j), (6.0 + 3.0j), (7.0 + 8.0j)]).astype(dtype)
jax_f = jax2tf.call_tf(tf_f)
tf_f_rt = jax2tf.convert(jax_f)
# Eager mode
self.assertAllClose(tf_f(x), tf_f_rt(x))
# tf.function context
self.assertAllClose(tf.function(tf_f)(x), tf.function(tf_f_rt)(x))
# tf.function context with jit_compiled=True
self.assertAllClose(
tf.function(tf_f, jit_compile=True)(x),
tf.function(tf_f_rt, jit_compile=True)(x),
)
# RoundTrip test for the gradient
grad_fun_jax = jax.grad(jax2tf.call_tf(tf_f), holomorphic=True)
grad_fun_jax_rt = jax2tf.call_tf(jax2tf.convert(grad_fun_jax))
# Eager mode
self.assertAllClose(grad_fun_jax(x), grad_fun_jax_rt(x))
# Jit mode
self.assertAllClose(jax.jit(grad_fun_jax)(x), jax.jit(grad_fun_jax_rt)(x))
def test_grad_pytree_arg_with_none_leaf(self):
def tf_f(x, params):
return x * params["y"]
x = jnp.array(1.0)
y = jnp.array(2.0)
actual = jax.grad(
jax2tf.call_tf(tf_f), argnums=(1,))(x, {"y": y, "other": None})
self.assertDictEqual(actual[0], {"y": x, "other": None})
@jtu.thread_unsafe_test_class()
|
RoundTripToJaxTest
|
python
|
walkccc__LeetCode
|
solutions/1027. Longest Arithmetic Subsequence/1027.py
|
{
"start": 0,
"end": 426
}
|
class ____:
def longestArithSeqLength(self, nums: list[int]) -> int:
n = len(nums)
ans = 0
# dp[i][k] := the length of the longest arithmetic subsequence of nums[0..i]
# with k = diff + 500
dp = [[0] * 1001 for _ in range(n)]
for i in range(n):
for j in range(i):
k = nums[i] - nums[j] + 500
dp[i][k] = max(2, dp[j][k] + 1)
ans = max(ans, dp[i][k])
return ans
|
Solution
|
python
|
agronholm__apscheduler
|
src/apscheduler/triggers/cron/expressions.py
|
{
"start": 903,
"end": 2109
}
|
class ____:
value_re: ClassVar[Pattern] = re.compile(r"\*(?:/(?P<step>\d+))?$")
step: int | None = attrs.field(
converter=as_int,
validator=optional([instance_of(int), positive_number]),
default=None,
)
def validate_range(self, field_name: str, min_value: int, max_value: int) -> None:
value_range = max_value - min_value
if self.step and self.step > value_range:
raise ValueError(
f"the step value ({self.step}) is higher than the total range of the "
f"expression ({value_range})"
)
def get_next_value(self, dateval: datetime, field: BaseField) -> int | None:
start = field.get_value(dateval)
minval = field.get_min(dateval)
maxval = field.get_max(dateval)
start = max(start, minval)
if not self.step:
nextval = start
else:
distance_to_next = (self.step - (start - minval)) % self.step
nextval = start + distance_to_next
return nextval if nextval <= maxval else None
def __str__(self) -> str:
return f"*/{self.step}" if self.step else "*"
@attrs.define(kw_only=True)
|
AllExpression
|
python
|
pydantic__pydantic
|
pydantic/v1/errors.py
|
{
"start": 3058,
"end": 3464
}
|
class ____:
code: str
msg_template: str
def __init__(self, **ctx: Any) -> None:
self.__dict__ = ctx
def __str__(self) -> str:
return self.msg_template.format(**self.__dict__)
def __reduce__(self) -> Tuple[Callable[..., 'PydanticErrorMixin'], Tuple[Type['PydanticErrorMixin'], 'DictStrAny']]:
return cls_kwargs, (self.__class__, self.__dict__)
|
PydanticErrorMixin
|
python
|
numba__numba
|
numba/cuda/simulator/cudadrv/devicearray.py
|
{
"start": 3378,
"end": 13789
}
|
class ____(object):
'''
Implements the interface of a DeviceArray/DeviceRecord, but mostly just
wraps a NumPy array.
'''
__cuda_ndarray__ = True # There must be gpu_data attribute
def __init__(self, ary, stream=0):
self._ary = ary
self.stream = stream
@property
def alloc_size(self):
return self._ary.nbytes
@property
def nbytes(self):
# return nbytes -- FakeCUDAArray is a wrapper around NumPy
return self._ary.nbytes
def __getattr__(self, attrname):
try:
attr = getattr(self._ary, attrname)
return attr
except AttributeError as e:
msg = "Wrapped array has no attribute '%s'" % attrname
raise AttributeError(msg) from e
def bind(self, stream=0):
return FakeCUDAArray(self._ary, stream)
@property
def T(self):
return self.transpose()
def transpose(self, axes=None):
return FakeCUDAArray(np.transpose(self._ary, axes=axes))
def __getitem__(self, idx):
ret = self._ary.__getitem__(idx)
if type(ret) not in [np.ndarray, np.void]:
return ret
else:
return FakeCUDAArray(ret, stream=self.stream)
def __setitem__(self, idx, val):
return self._ary.__setitem__(idx, val)
def copy_to_host(self, ary=None, stream=0):
if ary is None:
ary = np.empty_like(self._ary)
else:
check_array_compatibility(self, ary)
np.copyto(ary, self._ary)
return ary
def copy_to_device(self, ary, stream=0):
'''
Copy from the provided array into this array.
This may be less forgiving than the CUDA Python implementation, which
will copy data up to the length of the smallest of the two arrays,
whereas this expects the size of the arrays to be equal.
'''
sentry_contiguous(self)
self_core, ary_core = array_core(self), array_core(ary)
if isinstance(ary, FakeCUDAArray):
sentry_contiguous(ary)
check_array_compatibility(self_core, ary_core)
else:
ary_core = np.array(
ary_core,
order='C' if self_core.flags['C_CONTIGUOUS'] else 'F',
subok=True,
copy=False if numpy_version < (2, 0) else None)
check_array_compatibility(self_core, ary_core)
np.copyto(self_core._ary, ary_core)
@property
def shape(self):
return FakeShape(self._ary.shape)
def ravel(self, *args, **kwargs):
return FakeCUDAArray(self._ary.ravel(*args, **kwargs))
def reshape(self, *args, **kwargs):
return FakeCUDAArray(self._ary.reshape(*args, **kwargs))
def view(self, *args, **kwargs):
return FakeCUDAArray(self._ary.view(*args, **kwargs))
def is_c_contiguous(self):
return self._ary.flags.c_contiguous
def is_f_contiguous(self):
return self._ary.flags.f_contiguous
def __str__(self):
return str(self._ary)
def __repr__(self):
return repr(self._ary)
def __len__(self):
return len(self._ary)
# TODO: Add inplace, bitwise, unary magic methods
# (or maybe inherit this class from numpy)?
def __eq__(self, other):
return FakeCUDAArray(self._ary == other)
def __ne__(self, other):
return FakeCUDAArray(self._ary != other)
def __lt__(self, other):
return FakeCUDAArray(self._ary < other)
def __le__(self, other):
return FakeCUDAArray(self._ary <= other)
def __gt__(self, other):
return FakeCUDAArray(self._ary > other)
def __ge__(self, other):
return FakeCUDAArray(self._ary >= other)
def __add__(self, other):
return FakeCUDAArray(self._ary + other)
def __sub__(self, other):
return FakeCUDAArray(self._ary - other)
def __mul__(self, other):
return FakeCUDAArray(self._ary * other)
def __floordiv__(self, other):
return FakeCUDAArray(self._ary // other)
def __truediv__(self, other):
return FakeCUDAArray(self._ary / other)
def __mod__(self, other):
return FakeCUDAArray(self._ary % other)
def __pow__(self, other):
return FakeCUDAArray(self._ary ** other)
def split(self, section, stream=0):
return [
FakeCUDAArray(a)
for a in np.split(self._ary, range(section, len(self), section))
]
def array_core(ary):
"""
Extract the repeated core of a broadcast array.
Broadcast arrays are by definition non-contiguous due to repeated
dimensions, i.e., dimensions with stride 0. In order to ascertain memory
contiguity and copy the underlying data from such arrays, we must create
a view without the repeated dimensions.
"""
if not ary.strides or not ary.size:
return ary
core_index = []
for stride in ary.strides:
core_index.append(0 if stride == 0 else slice(None))
return ary[tuple(core_index)]
def is_contiguous(ary):
"""
Returns True iff `ary` is C-style contiguous while ignoring
broadcasted and 1-sized dimensions.
As opposed to array_core(), it does not call require_context(),
which can be quite expensive.
"""
size = ary.dtype.itemsize
for shape, stride in zip(reversed(ary.shape), reversed(ary.strides)):
if shape > 1 and stride != 0:
if size != stride:
return False
size *= shape
return True
def sentry_contiguous(ary):
core = array_core(ary)
if not core.flags['C_CONTIGUOUS'] and not core.flags['F_CONTIGUOUS']:
raise ValueError(errmsg_contiguous_buffer)
def check_array_compatibility(ary1, ary2):
ary1sq, ary2sq = ary1.squeeze(), ary2.squeeze()
if ary1.dtype != ary2.dtype:
raise TypeError('incompatible dtype: %s vs. %s' %
(ary1.dtype, ary2.dtype))
if ary1sq.shape != ary2sq.shape:
raise ValueError('incompatible shape: %s vs. %s' %
(ary1.shape, ary2.shape))
if ary1sq.strides != ary2sq.strides:
raise ValueError('incompatible strides: %s vs. %s' %
(ary1.strides, ary2.strides))
def to_device(ary, stream=0, copy=True, to=None):
ary = np.array(ary,
copy=False if numpy_version < (2, 0) else None,
subok=True)
sentry_contiguous(ary)
if to is None:
buffer_dtype = np.int64 if ary.dtype.char in 'Mm' else ary.dtype
return FakeCUDAArray(
np.ndarray(
buffer=np.copy(array_core(ary)).view(buffer_dtype),
dtype=ary.dtype,
shape=ary.shape,
strides=ary.strides,
).view(type=type(ary)),
)
else:
to.copy_to_device(ary, stream=stream)
@contextmanager
def pinned(arg):
yield
def mapped_array(*args, **kwargs):
for unused_arg in ('portable', 'wc'):
if unused_arg in kwargs:
kwargs.pop(unused_arg)
return device_array(*args, **kwargs)
def pinned_array(shape, dtype=np.float64, strides=None, order='C'):
return np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order)
def managed_array(shape, dtype=np.float64, strides=None, order='C'):
return np.ndarray(shape=shape, strides=strides, dtype=dtype, order=order)
def device_array(*args, **kwargs):
stream = kwargs.pop('stream') if 'stream' in kwargs else 0
return FakeCUDAArray(np.ndarray(*args, **kwargs), stream=stream)
def _contiguous_strides_like_array(ary):
"""
Given an array, compute strides for a new contiguous array of the same
shape.
"""
# Don't recompute strides if the default strides will be sufficient to
# create a contiguous array.
if ary.flags['C_CONTIGUOUS'] or ary.flags['F_CONTIGUOUS'] or ary.ndim <= 1:
return None
# Otherwise, we need to compute new strides using an algorithm adapted from
# NumPy v1.17.4's PyArray_NewLikeArrayWithShape in
# core/src/multiarray/ctors.c. We permute the strides in ascending order
# then compute the stride for the dimensions with the same permutation.
# Stride permutation. E.g. a stride array (4, -2, 12) becomes
# [(1, -2), (0, 4), (2, 12)]
strideperm = [ x for x in enumerate(ary.strides) ]
strideperm.sort(key=lambda x: x[1])
# Compute new strides using permutation
strides = [0] * len(ary.strides)
stride = ary.dtype.itemsize
for i_perm, _ in strideperm:
strides[i_perm] = stride
stride *= ary.shape[i_perm]
return tuple(strides)
def _order_like_array(ary):
if ary.flags['F_CONTIGUOUS'] and not ary.flags['C_CONTIGUOUS']:
return 'F'
else:
return 'C'
def device_array_like(ary, stream=0):
strides = _contiguous_strides_like_array(ary)
order = _order_like_array(ary)
return device_array(shape=ary.shape, dtype=ary.dtype, strides=strides,
order=order)
def pinned_array_like(ary):
strides = _contiguous_strides_like_array(ary)
order = _order_like_array(ary)
return pinned_array(shape=ary.shape, dtype=ary.dtype, strides=strides,
order=order)
def auto_device(ary, stream=0, copy=True):
if isinstance(ary, FakeCUDAArray):
return ary, False
if not isinstance(ary, np.void):
ary = np.array(
ary,
copy=False if numpy_version < (2, 0) else None,
subok=True)
return to_device(ary, stream, copy), True
def is_cuda_ndarray(obj):
"Check if an object is a CUDA ndarray"
return getattr(obj, '__cuda_ndarray__', False)
def verify_cuda_ndarray_interface(obj):
"Verify the CUDA ndarray interface for an obj"
require_cuda_ndarray(obj)
def requires_attr(attr, typ):
if not hasattr(obj, attr):
raise AttributeError(attr)
if not isinstance(getattr(obj, attr), typ):
raise AttributeError('%s must be of type %s' % (attr, typ))
requires_attr('shape', tuple)
requires_attr('strides', tuple)
requires_attr('dtype', np.dtype)
requires_attr('size', int)
def require_cuda_ndarray(obj):
"Raises ValueError is is_cuda_ndarray(obj) evaluates False"
if not is_cuda_ndarray(obj):
raise ValueError('require an cuda ndarray object')
|
FakeCUDAArray
|
python
|
scrapy__scrapy
|
tests/spiders.py
|
{
"start": 10338,
"end": 11150
}
|
class ____(MetaSpider):
seed = None
callback_func = None
errback_func = None
async def start(self):
if isinstance(self.seed, Request):
yield self.seed.replace(callback=self.parse, errback=self.on_error)
else:
yield Request(self.seed, callback=self.parse, errback=self.on_error)
def parse(self, response):
self.meta.setdefault("responses", []).append(response)
if callable(self.callback_func):
return self.callback_func(response)
if "next" in response.meta:
return response.meta["next"]
return None
def on_error(self, failure):
self.meta["failure"] = failure
if callable(self.errback_func):
return self.errback_func(failure)
return None
|
SingleRequestSpider
|
python
|
doocs__leetcode
|
solution/1500-1599/1582.Special Positions in a Binary Matrix/Solution.py
|
{
"start": 0,
"end": 447
}
|
class ____:
def numSpecial(self, mat: List[List[int]]) -> int:
rows = [0] * len(mat)
cols = [0] * len(mat[0])
for i, row in enumerate(mat):
for j, x in enumerate(row):
rows[i] += x
cols[j] += x
ans = 0
for i, row in enumerate(mat):
for j, x in enumerate(row):
ans += x == 1 and rows[i] == 1 and cols[j] == 1
return ans
|
Solution
|
python
|
Pylons__pyramid
|
src/pyramid/httpexceptions.py
|
{
"start": 16786,
"end": 17275
}
|
class ____(_HTTPMove):
"""
subclass of :class:`~_HTTPMove`
This indicates that the requested resource corresponds to any one
of a set of representations, each with its own specific location,
and agent-driven negotiation information is being provided so that
the user can select a preferred representation and redirect its
request to that location.
code: 300, title: Multiple Choices
"""
code = 300
title = 'Multiple Choices'
|
HTTPMultipleChoices
|
python
|
getsentry__sentry
|
tests/sentry/receivers/outbox/test_control.py
|
{
"start": 594,
"end": 3246
}
|
class ____(TestCase):
identifier = 1
@patch("sentry.receivers.outbox.control.maybe_process_tombstone")
def test_process_integration_updates(self, mock_maybe_process: MagicMock) -> None:
process_integration_updates(
object_identifier=self.identifier, region_name=_TEST_REGION.name
)
mock_maybe_process.assert_called_with(
Integration, self.identifier, region_name=_TEST_REGION.name
)
@patch("sentry.receivers.outbox.control.maybe_process_tombstone")
def test_process_api_application_updates(self, mock_maybe_process: MagicMock) -> None:
process_api_application_updates(
object_identifier=self.identifier, region_name=_TEST_REGION.name
)
mock_maybe_process.assert_called_with(
ApiApplication, self.identifier, region_name=_TEST_REGION.name
)
@patch("sentry.sentry_apps.tasks.sentry_apps.region_caching_service")
def test_process_sentry_app_updates(self, mock_caching: MagicMock) -> None:
org = self.create_organization()
sentry_app = self.create_sentry_app()
install = self.create_sentry_app_installation(slug=sentry_app.slug, organization=org)
install_dupe = self.create_sentry_app_installation(slug=sentry_app.slug, organization=org)
org_two = self.create_organization()
install_two = self.create_sentry_app_installation(
slug=sentry_app.slug, organization=org_two
)
with self.tasks():
process_sentry_app_updates(
object_identifier=sentry_app.id, region_name=_TEST_REGION.name
)
mock_caching.clear_key.assert_any_call(
key=f"app_service.get_installation:{install.id}", region_name=_TEST_REGION.name
)
mock_caching.clear_key.assert_any_call(
key=f"app_service.get_installation:{install_dupe.id}", region_name=_TEST_REGION.name
)
mock_caching.clear_key.assert_any_call(
key=f"app_service.get_installation:{install_two.id}", region_name=_TEST_REGION.name
)
mock_caching.clear_key.assert_any_call(
key=f"app_service.get_by_application_id:{sentry_app.application_id}",
region_name=_TEST_REGION.name,
)
mock_caching.clear_key.assert_any_call(
key=f"app_service.get_installed_for_organization:{org.id}",
region_name=_TEST_REGION.name,
)
mock_caching.clear_key.assert_any_call(
key=f"app_service.get_installed_for_organization:{org_two.id}",
region_name=_TEST_REGION.name,
)
|
ProcessControlOutboxTest
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/test/utils.py
|
{
"start": 1244,
"end": 1300
}
|
class ____(TypedDict):
path: Sequence[str]
|
GqlAssetKey
|
python
|
aio-libs__aiohttp
|
aiohttp/payload.py
|
{
"start": 1001,
"end": 1108
}
|
class ____(Exception):
"""Raised when no payload factory is found for the given data type."""
|
LookupError
|
python
|
huggingface__transformers
|
src/transformers/models/data2vec/modeling_data2vec_vision.py
|
{
"start": 29980,
"end": 31345
}
|
class ____(PreTrainedModel):
config: Data2VecVisionConfig
base_model_prefix = "data2vec_vision"
input_modalities = ("image",)
main_input_name = "pixel_values"
supports_gradient_checkpointing = True
_no_split_modules = ["Data2VecVisionLayer"]
_keys_to_ignore_on_load_unexpected = [r".*relative_position_index.*"]
_supports_sdpa = True
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, Data2VecVisionEmbeddings):
init.zeros_(module.cls_token)
if module.mask_token is not None:
init.zeros_(module.mask_token)
if module.position_embeddings is not None:
init.zeros_(module.position_embeddings)
elif isinstance(module, Data2VecVisionRelativePositionBias):
init.zeros_(module.relative_position_bias_table)
elif isinstance(module, Data2VecVisionLayer):
if module.lambda_1 is not None:
init.constant_(module.lambda_1, self.config.layer_scale_init_value)
init.constant_(module.lambda_2, self.config.layer_scale_init_value)
@auto_docstring
# Copied from transformers.models.beit.modeling_beit.BeitModel with BEIT->DATA2VEC_VISION,Beit->Data2VecVision,True->False
|
Data2VecVisionPreTrainedModel
|
python
|
ray-project__ray
|
python/ray/serve/tests/common/test_modules.py
|
{
"start": 867,
"end": 1588
}
|
class ____:
def __init__(
self,
m1: DeploymentHandle,
m2: Union[DeploymentHandle, Dict[str, DeploymentHandle]],
m2_nested: bool = False,
):
self.m1 = m1
self.m2 = m2.get(NESTED_HANDLE_KEY) if m2_nested else m2
async def __call__(self, req):
if isinstance(self.m1, ActorHandle) and isinstance(self.m2, ActorHandle):
r1_ref = self.m1.forward.remote(req)
r2_ref = self.m2.forward.remote(req)
else:
r1_ref = await self.m1.forward.remote(req)._to_object_ref()
r2_ref = await self.m2.forward.remote(req)._to_object_ref()
return sum(await asyncio.gather(r1_ref, r2_ref))
@serve.deployment
|
Combine
|
python
|
getsentry__sentry
|
src/sentry/issues/endpoints/project_performance_issue_settings.py
|
{
"start": 11573,
"end": 16844
}
|
class ____(ProjectEndpoint):
owner = ApiOwner.ISSUE_DETECTION_BACKEND
publish_status = {
"DELETE": ApiPublishStatus.PRIVATE,
"GET": ApiPublishStatus.PRIVATE,
"PUT": ApiPublishStatus.PRIVATE,
}
permission_classes = (ProjectSettingPermission,)
def has_feature(self, project, request) -> bool:
return features.has(
"organizations:performance-view", project.organization, actor=request.user
)
def get(self, request: Request, project) -> Response:
"""
Retrieve performance issue settings
``````````````````
Return settings for performance issues
:pparam string organization_id_or_slug: the id or slug of the organization the
project belongs to.
:pparam string project_id_or_slug: the id or slug of the project to configure.
:auth: required
"""
if not self.has_feature(project, request):
return self.respond(status=status.HTTP_404_NOT_FOUND)
return Response(get_merged_settings(project))
def put(self, request: Request, project) -> Response:
if not self.has_feature(project, request):
return self.respond(status=status.HTTP_404_NOT_FOUND)
internal_only_settings = [setting.value for setting in InternalProjectOptions]
threshold_settings = [setting.value for setting in ConfigurableThresholds]
allowed_settings_options = [*internal_only_settings, *threshold_settings]
body_has_invalid_options = not request.data or any(
[option not in allowed_settings_options for option in request.data]
)
if body_has_invalid_options:
return Response(
{
"detail": "Invalid settings option",
},
status=status.HTTP_400_BAD_REQUEST,
)
body_has_admin_options = any([option in request.data for option in internal_only_settings])
if body_has_admin_options and not superuser_has_permission(request):
return Response(
{
"detail": {
"message": "Passed options are only modifiable internally",
"code": "superuser-required",
},
},
status=status.HTTP_403_FORBIDDEN,
)
body_has_management_options = any(
[option in get_management_options() for option in request.data]
)
serializer = ProjectPerformanceIssueSettingsSerializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
performance_issue_settings_default = projectoptions.get_well_known_default(
SETTINGS_PROJECT_OPTION_KEY,
project=project,
)
performance_issue_settings = project.get_option(
SETTINGS_PROJECT_OPTION_KEY, default=performance_issue_settings_default
)
current_settings = {**performance_issue_settings_default, **performance_issue_settings}
data = serializer.validated_data
payload_contains_disabled_threshold_setting = any(
[option in get_disabled_threshold_options(data, current_settings) for option in data]
)
if payload_contains_disabled_threshold_setting:
return Response(
{"detail": "Disabled options can not be modified"},
status=status.HTTP_403_FORBIDDEN,
)
project.update_option(
SETTINGS_PROJECT_OPTION_KEY,
{**performance_issue_settings_default, **performance_issue_settings, **data},
)
if body_has_admin_options or body_has_management_options:
self.create_audit_entry(
request=self.request,
actor=request.user,
organization=project.organization,
target_object=project.id,
event=audit_log.get_event_id("PROJECT_PERFORMANCE_ISSUE_DETECTION_CHANGE"),
data={**data, **project.get_audit_log_data()},
)
return Response(data)
def delete(self, request: Request, project) -> Response:
if not self.has_feature(project, request):
return self.respond(status=status.HTTP_404_NOT_FOUND)
project_settings = project.get_option(SETTINGS_PROJECT_OPTION_KEY, default={})
management_options = get_management_options()
threshold_options = [setting.value for setting in ConfigurableThresholds]
disabled_options = get_disabled_threshold_options(threshold_options, project_settings)
if project_settings:
unchanged_options = (
{ # Management settings and disabled threshold settings can not be reset
option: project_settings[option]
for option in project_settings
if option in management_options or option in disabled_options
}
)
project.update_option(SETTINGS_PROJECT_OPTION_KEY, unchanged_options)
return Response(status=status.HTTP_204_NO_CONTENT)
|
ProjectPerformanceIssueSettingsEndpoint
|
python
|
spack__spack
|
lib/spack/spack/build_environment.py
|
{
"start": 36774,
"end": 49276
}
|
class ____:
"""This class encapsulates the logic to determine environment modifications, and is used as
well to set globals in modules of package.py."""
def __init__(self, *specs: spack.spec.Spec, context: Context) -> None:
"""Construct a ModificationsFromDag object.
Args:
specs: single root spec for build/test context, possibly more for run context
context: build, run, or test"""
if (context == Context.BUILD or context == Context.TEST) and not len(specs) == 1:
raise ValueError("Cannot setup build environment for multiple specs")
specs_with_type = effective_deptypes(*specs, context=context)
self.specs = specs
self.context = context
self.external: List[Tuple[spack.spec.Spec, UseMode]]
self.nonexternal: List[Tuple[spack.spec.Spec, UseMode]]
# Reverse so we go from leaf to root
self.nodes_in_subdag = set(id(s) for s, _ in specs_with_type)
# Split into non-external and external, maintaining topo order per group.
self.external, self.nonexternal = stable_partition(
reversed(specs_with_type), lambda t: t[0].external
)
self.should_be_runnable = UseMode.BUILDTIME_DIRECT | UseMode.RUNTIME_EXECUTABLE
self.should_setup_run_env = (
UseMode.BUILDTIME_DIRECT | UseMode.RUNTIME | UseMode.RUNTIME_EXECUTABLE
)
self.should_setup_dependent_build_env = UseMode.BUILDTIME | UseMode.BUILDTIME_DIRECT
self.should_setup_build_env = UseMode.ROOT if context == Context.BUILD else UseMode(0)
if context == Context.RUN or context == Context.TEST:
self.should_be_runnable |= UseMode.ROOT
self.should_setup_run_env |= UseMode.ROOT
# Everything that calls setup_run_environment and setup_dependent_* needs globals set.
self.should_set_package_py_globals = (
self.should_setup_dependent_build_env | self.should_setup_run_env | UseMode.ROOT
)
# In a build context, the root needs build-specific globals set.
self.needs_build_context = UseMode.ROOT
def set_all_package_py_globals(self):
"""Set the globals in modules of package.py files."""
for dspec, flag in chain(self.external, self.nonexternal):
pkg = dspec.package
if self.should_set_package_py_globals & flag:
if self.context == Context.BUILD and self.needs_build_context & flag:
set_package_py_globals(pkg, context=Context.BUILD)
else:
# This includes runtime dependencies, also runtime deps of direct build deps.
set_package_py_globals(pkg, context=Context.RUN)
# Looping over the set of packages a second time
# ensures all globals are loaded into the module space prior to
# any package setup. This guarantees package setup methods have
# access to expected module level definitions such as "spack_cc"
for dspec, flag in chain(self.external, self.nonexternal):
pkg = dspec.package
for spec in dspec.dependents():
# Note: some specs have dependents that are unreachable from the root, so avoid
# setting globals for those.
if id(spec) not in self.nodes_in_subdag:
continue
dependent_module = ModuleChangePropagator(spec.package)
pkg.setup_dependent_package(dependent_module, spec)
dependent_module.propagate_changes_to_mro()
def get_env_modifications(self) -> EnvironmentModifications:
"""Returns the environment variable modifications for the given input specs and context.
Environment modifications include:
- Updating PATH for packages that are required at runtime
- Updating CMAKE_PREFIX_PATH and PKG_CONFIG_PATH so that their respective
tools can find Spack-built dependencies (when context=build)
- Running custom package environment modifications: setup_run_environment,
setup_dependent_run_environment, setup_build_environment,
setup_dependent_build_environment.
The (partial) order imposed on the specs is externals first, then topological
from leaf to root. That way externals cannot contribute search paths that would shadow
Spack's prefixes, and dependents override variables set by dependencies."""
env = EnvironmentModifications()
for dspec, flag in chain(self.external, self.nonexternal):
tty.debug(f"Adding env modifications for {dspec.name}")
pkg = dspec.package
if self.should_setup_dependent_build_env & flag:
self._make_buildtime_detectable(dspec, env)
for root in self.specs: # there is only one root in build context
spack.builder.create(pkg).setup_dependent_build_environment(env, root)
if self.should_setup_build_env & flag:
spack.builder.create(pkg).setup_build_environment(env)
if self.should_be_runnable & flag:
self._make_runnable(dspec, env)
if self.should_setup_run_env & flag:
run_env_mods = EnvironmentModifications()
for spec in dspec.dependents(deptype=dt.LINK | dt.RUN):
if id(spec) in self.nodes_in_subdag:
pkg.setup_dependent_run_environment(run_env_mods, spec)
pkg.setup_run_environment(run_env_mods)
external_env = (dspec.extra_attributes or {}).get("environment", {})
if external_env:
run_env_mods.extend(spack.schema.environment.parse(external_env))
if self.context == Context.BUILD:
# Don't let the runtime environment of compiler like dependencies leak into the
# build env
run_env_mods.drop("CC", "CXX", "F77", "FC")
env.extend(run_env_mods)
return env
def _make_buildtime_detectable(self, dep: spack.spec.Spec, env: EnvironmentModifications):
if is_system_path(dep.prefix):
return
env.prepend_path("CMAKE_PREFIX_PATH", dep.prefix)
for d in ("lib", "lib64", "share"):
pcdir = os.path.join(dep.prefix, d, "pkgconfig")
if os.path.isdir(pcdir):
env.prepend_path("PKG_CONFIG_PATH", pcdir)
def _make_runnable(self, dep: spack.spec.Spec, env: EnvironmentModifications):
if is_system_path(dep.prefix):
return
for d in ("bin", "bin64"):
bin_dir = os.path.join(dep.prefix, d)
if os.path.isdir(bin_dir):
env.prepend_path("PATH", bin_dir)
def load_external_modules(context: SetupContext) -> None:
"""Traverse a package's spec DAG and load any external modules.
Traverse a package's dependencies and load any external modules
associated with them.
Args:
context: A populated SetupContext object
"""
for spec, _ in context.external:
external_modules = spec.external_modules or []
for external_module in external_modules:
load_module(external_module)
def _setup_pkg_and_run(
serialized_pkg: "spack.subprocess_context.PackageInstallContext",
function: Callable,
kwargs: Dict,
write_pipe: Connection,
input_pipe: Optional[Connection],
jsfd1: Optional[Connection],
jsfd2: Optional[Connection],
):
"""Main entry point in the child process for Spack builds.
``_setup_pkg_and_run`` is called by the child process created in
``start_build_process()``, and its main job is to run ``function()`` on behalf of
some Spack installation (see :ref:`spack.installer.PackageInstaller._complete_task`).
The child process is passed a ``write_pipe``, on which it's expected to send one of
the following:
* ``StopPhase``: error raised by a build process indicating it's stopping at a
particular build phase.
* ``BaseException``: any exception raised by a child build process, which will be
wrapped in ``ChildError`` (which adds a bunch of debug info and log context) and
raised in the parent.
* The return value of ``function()``, which can be anything (except an exception).
This is returned to the caller.
Note: ``jsfd1`` and ``jsfd2`` are passed solely to ensure that the child process
does not close these file descriptors. Some ``multiprocessing`` backends will close
them automatically in the child if they are not passed at process creation time.
Arguments:
serialized_pkg: Spack package install context object (serialized form of the
package that we'll build in the child process).
function: function to call in the child process; serialized_pkg is passed to
this as the first argument.
kwargs: additional keyword arguments to pass to ``function()``.
write_pipe: multiprocessing ``Connection`` to the parent process, to which the
child *must* send a result (or an error) back to parent on.
input_multiprocess_fd: stdin from the parent (not passed currently on Windows)
jsfd1: gmake Jobserver file descriptor 1.
jsfd2: gmake Jobserver file descriptor 2.
"""
context: str = kwargs.get("context", "build")
try:
# We are in the child process. Python sets sys.stdin to open(os.devnull) to prevent our
# process and its parent from simultaneously reading from the original stdin. But, we
# assume that the parent process is not going to read from it till we are done with the
# child, so we undo Python's precaution. closefd=False since Connection has ownership.
if input_pipe is not None:
sys.stdin = os.fdopen(input_pipe.fileno(), closefd=False)
pkg = serialized_pkg.restore()
if not kwargs.get("fake", False):
kwargs["unmodified_env"] = os.environ.copy()
kwargs["env_modifications"] = setup_package(
pkg, dirty=kwargs.get("dirty", False), context=Context.from_string(context)
)
return_value = function(pkg, kwargs)
write_pipe.send(return_value)
except spack.error.StopPhase as e:
# Do not create a full ChildError from this, it's not an error
# it's a control statement.
write_pipe.send(e)
except BaseException as e:
# catch ANYTHING that goes wrong in the child process
# Need to unwind the traceback in the child because traceback
# objects can't be sent to the parent.
exc_type = type(e)
tb = e.__traceback__
tb_string = "".join(traceback.format_exception(exc_type, e, tb))
# build up some context from the offending package so we can
# show that, too.
package_context = get_package_context(tb)
logfile = None
if context == "build":
try:
if hasattr(pkg, "log_path"):
logfile = pkg.log_path
except NameError:
# 'pkg' is not defined yet
pass
elif context == "test":
logfile = os.path.join(pkg.test_suite.stage, pkg.test_suite.test_log_name(pkg.spec))
error_msg = str(e)
if isinstance(e, (spack.multimethod.NoSuchMethodError, AttributeError)):
process = "test the installation" if context == "test" else "build from sources"
error_msg = (
"The '{}' package cannot find an attribute while trying to {}. You can fix this "
"by updating the {} recipe, and you can also report the issue as a build-error or "
"a bug at https://github.com/spack/spack/issues"
).format(pkg.name, process, context)
error_msg = colorize("@*R{{{}}}".format(error_msg))
error_msg = "{}\n\n{}".format(str(e), error_msg)
# make a pickleable exception to send to parent.
msg = "%s: %s" % (exc_type.__name__, error_msg)
ce = ChildError(
msg,
exc_type.__module__,
exc_type.__name__,
tb_string,
logfile,
context,
package_context,
)
write_pipe.send(ce)
finally:
write_pipe.close()
if input_pipe is not None:
input_pipe.close()
|
SetupContext
|
python
|
spyder-ide__spyder
|
spyder/plugins/variableexplorer/widgets/arrayeditor.py
|
{
"start": 2061,
"end": 2116
}
|
class ____:
Options = 'options_menu'
|
ArrayEditorMenus
|
python
|
huggingface__transformers
|
src/transformers/models/musicgen_melody/modeling_musicgen_melody.py
|
{
"start": 33630,
"end": 57973
}
|
class ____(MusicgenMelodyPreTrainedModel, GenerationMixin):
output_modalities = ("audio",)
def __init__(self, config: MusicgenMelodyDecoderConfig):
super().__init__(config)
self.model = MusicgenMelodyModel(config)
self.num_codebooks = config.num_codebooks
self.lm_heads = nn.ModuleList(
[nn.Linear(config.hidden_size, config.vocab_size, bias=False) for _ in range(config.num_codebooks)]
)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
def get_output_embeddings(self):
return self.lm_heads
def set_output_embeddings(self, new_embeddings):
self.lm_heads = new_embeddings
@auto_docstring
# Ignore copy
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.LongTensor] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple, MusicgenMelodyOutputWithPast]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, sequence_length)`):
Indices of input sequence tokens in the vocabulary, corresponding to the sequence of audio codes.
Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes,
such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details.
[What are input IDs?](../glossary#input-ids)
<Tip warning={true}>
The `input_ids` will automatically be converted from shape `(batch_size * num_codebooks,
target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If
you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of
frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks,
target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as
`input_ids`.
</Tip>
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states representing the concatenation of the text encoder output and the processed audio encoder output.
Used as a conditional signal and will thus be concatenated to the projected `decoder_input_ids`.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing attention on conditional hidden states. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length, num_codebooks)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (labels is not None) and (input_ids is None and inputs_embeds is None):
input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.bos_token_id)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
hidden_states = outputs[0]
lm_logits = torch.stack([head(hidden_states) for head in self.lm_heads], dim=1)
loss = None
if labels is not None:
# since encoder hidden states have been concatenated to the decoder hidden states,
# we take the last timestamps corresponding to labels
logits = lm_logits[:, :, -labels.shape[1] :]
loss_fct = CrossEntropyLoss()
loss = torch.zeros([], device=self.device)
# per codebook cross-entropy
# ref: https://github.com/facebookresearch/audiocraft/blob/69fea8b290ad1b4b40d28f92d1dfc0ab01dbab85/audiocraft/solvers/musicgen.py#L242-L243
# -100 labels are ignored
labels = labels.masked_fill(labels == self.config.pad_token_id, -100)
# per codebook cross-entropy
for codebook in range(self.config.num_codebooks):
codebook_logits = logits[:, codebook].contiguous().view(-1, logits.shape[-1])
codebook_labels = labels[..., codebook].contiguous().view(-1)
loss += loss_fct(codebook_logits, codebook_labels)
loss = loss / self.config.num_codebooks
# (bsz, num_codebooks, seq_len, vocab_size) -> (bsz * num_codebooks, seq_len, vocab_size)
lm_logits = lm_logits.reshape(-1, *lm_logits.shape[2:])
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return MusicgenMelodyOutputWithPast(
loss=loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Ignore copy
def prepare_inputs_for_generation(
self,
input_ids,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=True,
delay_pattern_mask=None,
guidance_scale=None,
**kwargs,
):
# Overwritten -- MusicGen has custom processing
if delay_pattern_mask is None:
input_ids, delay_pattern_mask = self.build_delay_pattern_mask(
input_ids,
pad_token_id=self.generation_config.pad_token_id,
max_length=self.generation_config.max_length,
)
# apply the delay pattern mask
input_ids = self.apply_delay_pattern_mask(input_ids, delay_pattern_mask)
if guidance_scale is not None and guidance_scale > 1:
# for classifier free guidance we need to replicate the decoder args across the batch dim (we'll split these
# before sampling)
input_ids = input_ids.repeat((2, 1))
if attention_mask is not None:
attention_mask = attention_mask.repeat((2, 1))
if encoder_hidden_states is not None:
encoder_hidden_states = torch.concatenate(
[encoder_hidden_states, torch.zeros_like(encoder_hidden_states)], dim=0
)
if encoder_attention_mask is not None:
encoder_attention_mask = torch.concatenate(
encoder_attention_mask, torch.zeros_like(encoder_attention_mask), dim=0
)
if past_key_values is not None:
input_ids = input_ids[:, -1:]
# we only want to use conditional signal in the 1st generation step but keeping the attention mask
encoder_hidden_states = None
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
"past_key_values": past_key_values,
"use_cache": use_cache,
}
def build_delay_pattern_mask(
self, input_ids: torch.LongTensor, pad_token_id: int, max_length: Optional[int] = None
):
"""Build a delayed pattern mask to the input_ids. Each codebook is offset by the previous codebook by
one, giving a delayed pattern mask at the start of sequence and end of sequence. Take the example where there
are 4 codebooks and a max sequence length of 8, we have the delayed pattern mask of shape `(codebooks,
seq_len)`:
- [P, -1, -1, -1, -1, P, P, P]
- [P, P, -1, -1, -1, -1, P, P]
- [P, P, P, -1, -1, -1, -1, P]
- [P, P, P, P, -1, -1, -1, -1]
where P is the special padding token id and -1 indicates that the token is valid for prediction. If we include
a prompt (decoder input ids), the -1 positions indicate where new tokens should be predicted. Otherwise, the
mask is set to the value in the prompt:
- [P, a, b, -1, -1, P, P, P]
- [P, P, c, d, -1, -1, P, P]
- [P, P, P, e, f, -1, -1, P]
- [P, P, P, P, g, h, -1, -1]
where a-h indicate the input prompt (decoder input ids) that are offset by 1. Now, we only override the -1
tokens in our prediction.
"""
# (bsz * num_codebooks, seq_len) -> (bsz, num_codebooks, seq_len)
input_ids = input_ids.reshape(-1, self.num_codebooks, input_ids.shape[-1])
bsz, num_codebooks, seq_len = input_ids.shape
max_length = max_length if max_length is not None else self.generation_config.max_length
input_ids_shifted = (
torch.ones((bsz, num_codebooks, max_length), dtype=torch.long, device=input_ids.device) * -1
)
channel_codebooks = num_codebooks // 2 if self.config.audio_channels == 2 else num_codebooks
# we only apply the mask if we have a large enough seq len - otherwise we return as is
if max_length < 2 * channel_codebooks - 1:
return input_ids.reshape(bsz * num_codebooks, -1), input_ids_shifted.reshape(bsz * num_codebooks, -1)
# fill the shifted ids with the prompt entries, offset by the codebook idx
for codebook in range(channel_codebooks):
if self.config.audio_channels == 1:
# mono channel - loop over the codebooks one-by-one
input_ids_shifted[:, codebook, codebook : seq_len + codebook] = input_ids[:, codebook]
else:
# left/right channels are interleaved in the generated codebooks, so handle one then the other
input_ids_shifted[:, 2 * codebook, codebook : seq_len + codebook] = input_ids[:, 2 * codebook]
input_ids_shifted[:, 2 * codebook + 1, codebook : seq_len + codebook] = input_ids[:, 2 * codebook + 1]
# construct a pattern mask that indicates the positions of padding tokens for each codebook
# first fill the upper triangular part (the EOS padding)
delay_pattern = torch.triu(
torch.ones((channel_codebooks, max_length), dtype=torch.bool), diagonal=max_length - channel_codebooks + 1
)
# then fill the lower triangular part (the BOS padding)
delay_pattern = delay_pattern + torch.tril(torch.ones((channel_codebooks, max_length), dtype=torch.bool))
if self.config.audio_channels == 2:
# for left/right channel we need to duplicate every row of the pattern mask in an interleaved fashion
delay_pattern = delay_pattern.repeat_interleave(2, dim=0)
mask = ~delay_pattern.to(input_ids.device)
input_ids = mask * input_ids_shifted + ~mask * pad_token_id
# find the first position to start generating - this is the first place we have the -1 token
# and will always be in the first codebook (since it has no codebook offset)
first_codebook_ids = input_ids[:, 0, :]
start_ids = (first_codebook_ids == -1).nonzero()[:, 1]
if len(start_ids) > 0:
first_start_id = min(start_ids)
else:
# we have no tokens that need to be filled - return entire matrix of input ids
first_start_id = seq_len
# (bsz * num_codebooks, seq_len) -> (bsz, num_codebooks, seq_len)
pattern_mask = input_ids.reshape(bsz * num_codebooks, -1)
input_ids = input_ids[..., :first_start_id].reshape(bsz * num_codebooks, -1)
return input_ids, pattern_mask
@staticmethod
def apply_delay_pattern_mask(input_ids, decoder_pad_token_mask):
"""Apply a delay pattern mask to the decoder input ids, only preserving predictions where
the mask is set to -1, and otherwise setting to the value detailed in the mask."""
seq_len = input_ids.shape[-1]
decoder_pad_token_mask = decoder_pad_token_mask[..., :seq_len]
input_ids = torch.where(decoder_pad_token_mask == -1, input_ids, decoder_pad_token_mask)
return input_ids
@torch.no_grad()
# Ignore copy
def generate(
self,
inputs: Optional[torch.Tensor] = None,
generation_config: Optional[GenerationConfig] = None,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
synced_gpus: Optional[bool] = None,
streamer: Optional["BaseStreamer"] = None,
**kwargs,
):
"""
Generates sequences of token ids for models with a language modeling head.
<Tip warning={true}>
Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
model's default generation configuration. You can override any `generation_config` by passing the corresponding
parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.
For an overview of generation strategies and code examples, check out the [following
guide](./generation_strategies).
</Tip>
Parameters:
inputs (`torch.Tensor` of varying shape depending on the modality, *optional*):
The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the
method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs`
should be in the format `input_ids`. For encoder-decoder models *inputs* can represent any of
`input_ids`, `input_values`, `input_features`, or `pixel_values`.
generation_config (`~generation.GenerationConfig`, *optional*):
The generation configuration to be used as base parametrization for the generation call. `**kwargs`
passed to generate matching the attributes of `generation_config` will override them. If
`generation_config` is not provided, the default will be used, which had the following loading
priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
default values, whose documentation should be checked to parameterize generation.
logits_processor (`LogitsProcessorList`, *optional*):
Custom logits processors that complement the default logits processors built from arguments and
generation config. If a logit processor is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
stopping_criteria (`StoppingCriteriaList`, *optional*):
Custom stopping criteria that complement the default stopping criteria built from arguments and a
generation config. If a stopping criteria is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
synced_gpus (`bool`, *optional*, defaults to `False`):
Whether to continue running the while loop until max_length (needed to avoid deadlocking with
`FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3).
streamer (`BaseStreamer`, *optional*):
Streamer object that will be used to stream the generated sequences. Generated tokens are passed
through `streamer.put(token_ids)` and the streamer is responsible for any further processing.
kwargs (`dict[str, Any]`, *optional*):
Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder
specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.
Return:
[`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`
or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`.
If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible
[`~utils.ModelOutput`] types are:
- [`~generation.GenerateDecoderOnlyOutput`],
- [`~generation.GenerateBeamDecoderOnlyOutput`]
If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible
[`~utils.ModelOutput`] types are:
- [`~generation.GenerateEncoderDecoderOutput`],
- [`~generation.GenerateBeamEncoderDecoderOutput`]
"""
# 1. Handle `generation_config` and kwargs that might update it, and validate the resulting objects
if generation_config is None:
generation_config = self.generation_config
generation_config = copy.deepcopy(generation_config)
model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
generation_config.validate()
self._validate_model_kwargs(model_kwargs.copy())
# 2. Set generation parameters if not already defined
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
requires_attention_mask = "encoder_outputs" not in model_kwargs
kwargs_has_attention_mask = model_kwargs.get("attention_mask", None) is not None
# 3. Define model inputs`
input_ids, model_input_name, model_kwargs = self._prepare_model_inputs(
inputs, generation_config.bos_token_id, model_kwargs
)
batch_size = input_ids.shape[0] // self.num_codebooks
self._prepare_special_tokens(generation_config, kwargs_has_attention_mask, device=input_ids.device)
# 4. Define other model kwargs
model_kwargs["use_cache"] = generation_config.use_cache
model_kwargs["guidance_scale"] = generation_config.guidance_scale
if model_kwargs.get("attention_mask", None) is None and requires_attention_mask:
model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation(
input_ids, generation_config, model_kwargs
)
# 5. Prepare `max_length` depending on other stopping criteria.
input_ids_length = input_ids.shape[-1]
has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
has_default_min_length = kwargs.get("min_length") is None and generation_config.min_length is not None
generation_config = self._prepare_generated_length(
generation_config=generation_config,
has_default_max_length=has_default_max_length,
has_default_min_length=has_default_min_length,
model_input_name=model_input_name,
inputs_tensor=input_ids,
input_ids_length=input_ids_length,
)
# 6. Prepare `input_ids` which will be used for auto-regressive generation
# Build the delay pattern mask for offsetting each codebook prediction by 1 (this behaviour is specific to Musicgen)
input_ids, delay_pattern_mask = self.build_delay_pattern_mask(
input_ids,
pad_token_id=generation_config._decoder_start_token_tensor,
max_length=generation_config.max_length,
)
if streamer is not None:
streamer.put(input_ids.cpu())
# stash the delay mask so that we don't have to recompute it in each forward pass
model_kwargs["delay_pattern_mask"] = delay_pattern_mask
# 7. determine generation mode
generation_mode = generation_config.get_generation_mode()
# 8. prepare batched CFG externally (to enable coexistence with the unbatched CFG)
if generation_config.guidance_scale is not None and generation_config.guidance_scale > 1:
logits_processor.append(ClassifierFreeGuidanceLogitsProcessor(generation_config.guidance_scale))
generation_config.guidance_scale = None
# 9. prepare distribution pre_processing samplers
logits_processor = self._get_logits_processor(
generation_config=generation_config,
input_ids_seq_length=input_ids_length,
encoder_input_ids=input_ids,
prefix_allowed_tokens_fn=None,
logits_processor=logits_processor,
device=input_ids.device,
)
# 10. prepare stopping criteria
stopping_criteria = self._get_stopping_criteria(
generation_config=generation_config, stopping_criteria=stopping_criteria
)
if generation_mode in (GenerationMode.SAMPLE, GenerationMode.GREEDY_SEARCH):
# expand input_ids with `num_return_sequences` additional sequences per batch
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids=input_ids,
expand_size=generation_config.num_return_sequences,
**model_kwargs,
)
# 11. run sample
outputs = self._sample(
input_ids,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
generation_config=generation_config,
synced_gpus=synced_gpus,
streamer=streamer,
**model_kwargs,
)
else:
raise ValueError(
"Got incompatible mode for generation, should be one of greedy or sampling. "
"Ensure that beam search is de-activated by setting `num_beams=1`."
)
if generation_config.return_dict_in_generate:
output_ids = outputs.sequences
else:
output_ids = outputs
# apply the pattern mask to the final ids
output_ids = self.apply_delay_pattern_mask(output_ids, model_kwargs["delay_pattern_mask"])
# revert the pattern delay mask by filtering the pad token id
output_ids = output_ids[output_ids != generation_config._pad_token_tensor].reshape(
batch_size, self.num_codebooks, -1
)
if generation_config.return_dict_in_generate:
outputs.sequences = output_ids
return outputs
else:
return output_ids
@auto_docstring
|
MusicgenMelodyForCausalLM
|
python
|
xlwings__xlwings
|
xlwings/_xlwindows.py
|
{
"start": 64090,
"end": 64721
}
|
class ____(base_classes.Names):
def __init__(self, xl):
self.xl = xl
@property
def api(self):
return self.xl
def __call__(self, name_or_index):
return Name(xl=self.xl(name_or_index))
def contains(self, name_or_index):
try:
self.xl(name_or_index)
except pywintypes.com_error as e:
if e.hresult == -2147352567:
return False
else:
raise
return True
def __len__(self):
return self.xl.Count
def add(self, name, refers_to):
return Name(xl=self.xl.Add(name, refers_to))
|
Names
|
python
|
huggingface__transformers
|
src/transformers/models/aria/modeling_aria.py
|
{
"start": 21562,
"end": 23737
}
|
class ____(GradientCheckpointingLayer):
"""
Aria Text Decoder Layer.
This class defines a single decoder layer in the language model, incorporating self-attention and Mixture of Experts (MoE) feed-forward network.
Args:
config (`AriaTextConfig`):
Configuration object for the text component of the model.
layer_idx (`int`):
Index of the layer.
"""
def __init__(self, config: AriaTextConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = AriaTextAttention(config=config, layer_idx=layer_idx)
self.mlp = AriaTextMoELayer(config)
self.input_layernorm = AriaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = AriaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
|
AriaTextDecoderLayer
|
python
|
huggingface__transformers
|
src/transformers/models/imagegpt/modeling_imagegpt.py
|
{
"start": 32832,
"end": 37156
}
|
class ____(ImageGPTPreTrainedModel):
def __init__(self, config: ImageGPTConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = ImageGPTModel(config)
self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs: Any,
) -> Union[tuple, SequenceClassifierOutputWithPast]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoImageProcessor`]. See [`ImageGPTImageProcessor.__call__`] for details.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, ImageGPTForImageClassification
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("openai/imagegpt-small")
>>> model = ImageGPTForImageClassification.from_pretrained("openai/imagegpt-small")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
# average-pool the hidden states along the sequence dimension
pooled_hidden_states = hidden_states.mean(dim=1)
# project from (batch_size, hidden_size) to (batch_size, num_labels)
logits = self.score(pooled_hidden_states)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutputWithPast(
loss=loss,
logits=logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
__all__ = [
"ImageGPTForCausalImageModeling",
"ImageGPTForImageClassification",
"ImageGPTModel",
"ImageGPTPreTrainedModel",
]
|
ImageGPTForImageClassification
|
python
|
ray-project__ray
|
python/ray/data/tests/mock_http_server.py
|
{
"start": 423,
"end": 2463
}
|
class ____(BaseHTTPRequestHandler):
files = {
"/index/data_file": data,
"/index": index,
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _respond(self, code=200, headers=None, data=b""):
headers = headers or {}
headers.update({"User-Agent": "test"})
self.send_response(code)
for k, v in headers.items():
self.send_header(k, str(v))
self.end_headers()
if data:
self.wfile.write(data)
def do_GET(self):
file_path = self.path.rstrip("/")
file_data = self.files.get(file_path)
if file_data is None:
return self._respond(404)
if "Range" in self.headers:
ran = self.headers["Range"]
b, ran = ran.split("=")
start, end = ran.split("-")
if start:
file_data = file_data[int(start) : (int(end) + 1) if end else None]
else:
# suffix only
file_data = file_data[-int(end) :]
if "give_length" in self.headers:
response_headers = {"Content-Length": len(file_data)}
self._respond(200, response_headers, file_data)
elif "give_range" in self.headers:
self._respond(
200,
{"Content-Range": "0-%i/%i" % (len(file_data) - 1, len(file_data))},
file_data,
)
else:
self._respond(200, data=file_data)
@contextlib.contextmanager
def serve():
server_address = ("", port)
httpd = HTTPServer(server_address, HTTPTestHandler)
th = threading.Thread(target=httpd.serve_forever)
th.daemon = True
th.start()
try:
yield "http://localhost:%i" % port
finally:
httpd.socket.close()
httpd.shutdown()
th.join()
@pytest.fixture(scope="module")
def http_server():
with serve() as s:
yield s
@pytest.fixture(scope="module")
def http_file():
return data_file
|
HTTPTestHandler
|
python
|
lepture__authlib
|
authlib/oauth2/rfc8628/errors.py
|
{
"start": 600,
"end": 922
}
|
class ____(OAuth2Error):
"""The "device_code" has expired, and the device authorization
session has concluded. The client MAY commence a new device
authorization request but SHOULD wait for user interaction before
restarting to avoid unnecessary polling.
"""
error = "expired_token"
|
ExpiredTokenError
|
python
|
psf__black
|
tests/data/cases/nested_stub.py
|
{
"start": 1070,
"end": 1243
}
|
class ____:
class Nested1:
foo: int
def bar(self): ...
field = 1
class Nested2:
def bar(self): ...
foo: int
field = 1
|
TopLevel
|
python
|
realpython__materials
|
django-vue-graphql/source_code_final/back_end/blog/admin.py
|
{
"start": 184,
"end": 258
}
|
class ____(admin.ModelAdmin):
model = Tag
@admin.register(Post)
|
TagAdmin
|
python
|
run-llama__llama_index
|
llama-index-instrumentation/tests/test_dispatcher.py
|
{
"start": 1355,
"end": 1593
}
|
class ____(BaseEventHandler):
events: List[BaseEvent] = []
@classmethod
def class_name(cls):
return "_TestEventHandler"
def handle(self, e: BaseEvent): # type:ignore
self.events.append(e)
|
_TestEventHandler
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/triggers/test_neptune.py
|
{
"start": 2459,
"end": 3662
}
|
class ____:
def test_serialization(self):
"""
Asserts that the TaskStateTrigger correctly serializes its arguments
and classpath.
"""
trigger = NeptuneClusterStoppedTrigger(db_cluster_id=CLUSTER_ID)
classpath, kwargs = trigger.serialize()
assert classpath == "airflow.providers.amazon.aws.triggers.neptune.NeptuneClusterStoppedTrigger"
assert "db_cluster_id" in kwargs
assert kwargs["db_cluster_id"] == CLUSTER_ID
@pytest.mark.asyncio
@mock.patch("airflow.providers.amazon.aws.hooks.neptune.NeptuneHook.get_waiter")
@mock.patch("airflow.providers.amazon.aws.hooks.neptune.NeptuneHook.get_async_conn")
async def test_run_success(self, mock_async_conn, mock_get_waiter):
mock_async_conn.return_value.__aenter__.return_value = "stopped"
mock_get_waiter().wait = AsyncMock()
trigger = NeptuneClusterStoppedTrigger(db_cluster_id=CLUSTER_ID)
generator = trigger.run()
resp = await generator.asend(None)
assert resp == TriggerEvent({"status": "success", "db_cluster_id": CLUSTER_ID})
assert mock_get_waiter().wait.call_count == 1
|
TestNeptuneClusterStoppedTrigger
|
python
|
google__flatbuffers
|
tests/namespace_test/NamespaceC/TableInC.py
|
{
"start": 1987,
"end": 3184
}
|
class ____(object):
# TableInCT
def __init__(self):
self.referToA1 = None # type: Optional[TableInFirstNST]
self.referToA2 = None # type: Optional[SecondTableInAT]
@classmethod
def InitFromBuf(cls, buf, pos):
tableInC = TableInC()
tableInC.Init(buf, pos)
return cls.InitFromObj(tableInC)
@classmethod
def InitFromObj(cls, tableInC):
x = TableInCT()
x._UnPack(tableInC)
return x
# TableInCT
def _UnPack(self, tableInC):
if tableInC is None:
return
if tableInC.ReferToA1() is not None:
self.referToA1 = TableInFirstNST.InitFromObj(tableInC.ReferToA1())
if tableInC.ReferToA2() is not None:
self.referToA2 = SecondTableInAT.InitFromObj(tableInC.ReferToA2())
# TableInCT
def Pack(self, builder):
if self.referToA1 is not None:
referToA1 = self.referToA1.Pack(builder)
if self.referToA2 is not None:
referToA2 = self.referToA2.Pack(builder)
TableInCStart(builder)
if self.referToA1 is not None:
TableInCAddReferToA1(builder, referToA1)
if self.referToA2 is not None:
TableInCAddReferToA2(builder, referToA2)
tableInC = TableInCEnd(builder)
return tableInC
|
TableInCT
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/with2.py
|
{
"start": 254,
"end": 944
}
|
class ____(object):
def __enter__(self: _T1) -> _T1:
return self
def __exit__(
self,
t: type | None = None,
exc: BaseException | None = None,
tb: Any | None = None,
) -> bool:
return True
def requires_int(val: int):
pass
def requires_class3(val: Class3):
pass
def test1():
a2 = Class2()
a3 = Class3()
# This should generate an error because
# the __exit__ method is missing.
with a2 as foo:
requires_int(foo)
# This should generate an error because
# the __exit__ method is missing.
with a2 as foo2, a3 as foo3:
requires_int(foo2)
requires_class3(foo3)
|
Class3
|
python
|
ray-project__ray
|
python/ray/tune/tests/_test_trial_runner_pg.py
|
{
"start": 482,
"end": 9392
}
|
class ____(unittest.TestCase):
def setUp(self):
os.environ["TUNE_GLOBAL_CHECKPOINT_S"] = "10000"
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "auto" # Reset default
self.head_cpus = 8
self.head_gpus = 4
self.head_custom = 16
self.cluster = Cluster(
initialize_head=True,
connect=True,
head_node_args={
"include_dashboard": False,
"num_cpus": self.head_cpus,
"num_gpus": self.head_gpus,
"resources": {"custom": self.head_custom},
"_system_config": {
"health_check_initial_delay_ms": 0,
"health_check_period_ms": 1000,
"health_check_failure_threshold": 10,
},
},
)
# Pytest doesn't play nicely with imports
_register_all()
def tearDown(self):
ray.shutdown()
self.cluster.shutdown()
_register_all() # re-register the evicted objects
def _assertCleanup(self, trial_executor):
# Assert proper cleanup
resource_manager = trial_executor._resource_manager
self.assertFalse(resource_manager._pg_to_request)
self.assertFalse(resource_manager._acquired_pgs)
self.assertFalse(resource_manager._staging_future_to_pg)
self.assertFalse(resource_manager._pg_to_staging_future)
for rr in resource_manager._request_to_staged_pgs:
self.assertFalse(resource_manager._request_to_staged_pgs[rr])
for rr in resource_manager._request_to_ready_pgs:
self.assertFalse(resource_manager._request_to_ready_pgs[rr])
num_non_removed_pgs = len(
[p for pid, p in placement_group_table().items() if p["state"] != "REMOVED"]
)
self.assertEqual(num_non_removed_pgs, 0)
def testPlacementGroupRequests(self, reuse_actors=False, scheduled=10):
"""In this test we try to start 10 trials but only have resources
for 2. Placement groups should still be created and PENDING.
Eventually they should be scheduled sequentially (i.e. in pairs
of two)."""
# Since we check per-step placement groups, set the reconcilation
# interval to 0
os.environ["TUNE_PLACEMENT_GROUP_RECON_INTERVAL"] = "0"
def train_fn(config):
time.sleep(1)
now = time.time()
tune.report(end=now - config["start_time"])
head_bundle = {"CPU": 4, "GPU": 0, "custom": 0}
child_bundle = {"custom": 1}
# Manually calculated number of parallel trials
max_num_parallel = 2
placement_group_factory = PlacementGroupFactory(
[head_bundle, child_bundle, child_bundle]
)
trial_executor = RayTrialExecutor(reuse_actors=reuse_actors)
trial_executor.setup(max_pending_trials=max_num_parallel)
this = self
class _TestCallback(Callback):
def on_step_end(self, iteration, trials, **info):
num_finished = len(
[
t
for t in trials
if t.status == Trial.TERMINATED or t.status == Trial.ERROR
]
)
resource_manager = trial_executor._resource_manager
num_staging = sum(
len(s) for s in resource_manager._request_to_staged_pgs.values()
)
num_ready = sum(
len(s) for s in resource_manager._request_to_ready_pgs.values()
)
num_in_use = len(resource_manager._acquired_pgs)
num_cached = trial_executor._actor_cache.num_cached_objects
total_num_tracked = num_staging + num_ready + num_in_use + num_cached
# All trials should be scheduled
this.assertEqual(
scheduled,
min(scheduled, len(trials)),
msg=f"Num trials iter {iteration}",
)
# The following two tests were relaxed for reuse_actors=True
# so that up to `max_num_parallel` more placement groups can
# exist than we would expect. This is because caching
# relies on reconciliation for cleanup to avoid overscheduling
# of new placement groups.
num_parallel_reuse = int(reuse_actors) * max_num_parallel
# The number of PGs should decrease when trials finish
# We allow a constant excess of 1 here because the trial will
# be TERMINATED and the resources only returned after the trainable
# cleanup future succeeded. Because num_finished will increase,
# this still asserts that the number of PGs goes down over time.
this.assertGreaterEqual(
max(scheduled, len(trials)) - num_finished + 1 + num_parallel_reuse,
total_num_tracked,
msg=f"Num tracked iter {iteration}, {len(trials)}, "
f"{scheduled}, {num_finished}, {num_parallel_reuse}",
)
start = time.time()
out = tune.run(
train_fn,
config={"start_time": start},
resources_per_trial=placement_group_factory,
num_samples=10,
trial_executor=trial_executor,
callbacks=[_TestCallback()],
reuse_actors=reuse_actors,
verbose=2,
)
trial_end_times = sorted(t.last_result["end"] for t in out.trials)
print("Trial end times:", trial_end_times)
max_diff = trial_end_times[-1] - trial_end_times[0]
# Not all trials have been run in parallel
self.assertGreater(max_diff, 3)
# Some trials should have run in parallel
# Todo: Re-enable when using buildkite
# self.assertLess(max_diff, 10)
self._assertCleanup(trial_executor)
def testPlacementGroupRequestsWithActorReuse(self):
"""Assert that reuse actors doesn't leak placement groups"""
self.testPlacementGroupRequests(reuse_actors=True)
def testPlacementGroupLimitedRequests(self):
"""Assert that maximum number of placement groups is enforced."""
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "6"
self.testPlacementGroupRequests(scheduled=6)
def testPlacementGroupLimitedRequestsWithActorReuse(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "6"
self.testPlacementGroupRequests(reuse_actors=True, scheduled=6)
def testPlacementGroupDistributedTraining(self, reuse_actors=False):
"""Run distributed training using placement groups.
Each trial requests 4 CPUs and starts 4 remote training workers.
"""
head_bundle = {"CPU": 1, "GPU": 0, "custom": 0}
child_bundle = {"CPU": 1}
placement_group_factory = PlacementGroupFactory(
[head_bundle, child_bundle, child_bundle, child_bundle]
)
@ray.remote
class TrainingActor:
def train(self, val):
time.sleep(1)
return val
def train_fn(config):
base = config["base"]
actors = [TrainingActor.remote() for _ in range(4)]
futures = [
actor.train.remote(base + 2 * i) for i, actor in enumerate(actors)
]
results = ray.get(futures)
end = time.time() - config["start_time"]
tune.report(avg=np.mean(results), end=end)
trial_executor = RayTrialExecutor(reuse_actors=reuse_actors)
start = time.time()
out = tune.run(
train_fn,
config={
"start_time": start,
"base": tune.grid_search(list(range(0, 100, 10))),
},
resources_per_trial=placement_group_factory,
num_samples=1,
trial_executor=trial_executor,
reuse_actors=reuse_actors,
verbose=2,
)
avgs = sorted(t.last_result["avg"] for t in out.trials)
self.assertSequenceEqual(avgs, list(range(3, 103, 10)))
trial_end_times = sorted(t.last_result["end"] for t in out.trials)
print("Trial end times:", trial_end_times)
max_diff = trial_end_times[-1] - trial_end_times[0]
# Not all trials have been run in parallel
self.assertGreater(max_diff, 3)
# Some trials should have run in parallel
# Todo: Re-enable when using buildkite
# self.assertLess(max_diff, 10)
self._assertCleanup(trial_executor)
def testPlacementGroupDistributedTrainingWithActorReuse(self):
self.testPlacementGroupDistributedTraining(reuse_actors=True)
|
TrialRunnerPlacementGroupTest
|
python
|
huggingface__transformers
|
src/transformers/models/janus/modular_janus.py
|
{
"start": 31885,
"end": 33740
}
|
class ____(ChameleonVQVAE):
_no_split_modules = [
"JanusVQVAEAttnBlock",
"JanusVQVAEResnetBlock",
"JanusVQVAEVectorQuantizer",
]
main_input_name = "pixel_values"
def __init__(self, config: JanusVQVAEConfig):
super().__init__(config)
self.decoder = JanusVQVAEDecoder(config)
self.gradient_checkpointing = False
# Initialize the VQVAE model.
self.post_init()
def decode(self, image_tokens: torch.LongTensor) -> torch.FloatTensor:
"""
Decodes quantized token IDs into pixel values.
Args:
image_tokens (torch.LongTensor): Batch of token IDs.
Returns:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
Pixel values decoded from the token IDs.
"""
if image_tokens.shape[1] != self.quantize.quant_state_dims[0] * self.quantize.quant_state_dims[1]:
raise ValueError(
f"Expected `image_tokens` to have shape `(batch_size, {self.quantize.quant_state_dims[0] * self.quantize.quant_state_dims[1]})`, "
f"but got shape `{image_tokens.shape}`."
)
codebook_entry = self.quantize.get_codebook_entry(image_tokens)
hidden_states = self.post_quant_conv(codebook_entry)
pixel_values = self.decoder(hidden_states)
return pixel_values
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
) -> tuple[torch.FloatTensor, torch.FloatTensor]:
batch_size = pixel_values.shape[0]
quant, embedding_loss, indices = self.encode(pixel_values)
decoded_pixel_values = self.decode(indices.view(batch_size, -1))
return JanusVQVAEOutput(decoded_pixel_values, embedding_loss)
|
JanusVQVAE
|
python
|
huggingface__transformers
|
src/transformers/models/janus/modeling_janus.py
|
{
"start": 32599,
"end": 35694
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.num_resolutions = len(config.channel_multiplier)
self.num_res_blocks = config.num_res_blocks
base_channels = config.base_channels
in_channels = config.in_channels
double_latent = config.double_latent
latent_channels = config.latent_channels
channel_multiplier = config.channel_multiplier
self.conv_in = torch.nn.Conv2d(in_channels, base_channels, kernel_size=3, stride=1, padding=1)
in_channel_multiplier = (1,) + tuple(channel_multiplier)
self.in_channel_multiplier = in_channel_multiplier
self.down = nn.ModuleList()
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
block_in = base_channels * in_channel_multiplier[i_level]
block_out = base_channels * channel_multiplier[i_level]
for i_block in range(self.num_res_blocks):
block.append(
JanusVQVAEResnetBlock(
config=config,
in_channels=block_in,
out_channels=block_out,
)
)
block_in = block_out
if i_level == self.num_resolutions - 1:
attn.append(JanusVQVAEAttnBlock(block_in))
down = nn.Module()
down.block = block
down.attn = attn
if i_level != self.num_resolutions - 1:
down.downsample = JanusVQVAEConvDownsample(block_in)
self.down.append(down)
self.mid = JanusVQVAEMidBlock(config, block_in)
self.norm_out = torch.nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True)
self.conv_out = torch.nn.Conv2d(
block_in,
2 * latent_channels if double_latent else latent_channels,
kernel_size=3,
stride=1,
padding=1,
)
def forward(self, pixel_values: torch.LongTensor):
# downsampling
hidden_states = [self.conv_in(pixel_values)]
for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks):
hidden_state = self.down[i_level].block[i_block](
hidden_states[-1],
)
if len(self.down[i_level].attn) > 0:
hidden_state = self.down[i_level].attn[i_block](hidden_state)
hidden_states.append(hidden_state)
if i_level != self.num_resolutions - 1:
hidden_states.append(self.down[i_level].downsample(hidden_states[-1]))
# middle
last_hidden_state = hidden_states[-1]
last_hidden_state = self.mid(last_hidden_state)
# end
last_hidden_state = self.norm_out(last_hidden_state)
last_hidden_state *= torch.sigmoid(last_hidden_state)
last_hidden_state = self.conv_out(last_hidden_state)
return last_hidden_state
|
JanusVQVAEEncoder
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/dml/test_bulk_statements.py
|
{
"start": 38790,
"end": 72141
}
|
class ____:
use_sentinel = False
randomize_returning = False
def assert_for_downgrade(self, *, sort_by_parameter_order):
if (
not sort_by_parameter_order
or not self.randomize_returning
or not testing.against(["postgresql", "mssql", "mariadb"])
):
return contextlib.nullcontext()
else:
return expect_warnings("Batches were downgraded")
@classmethod
def setup_bind(cls):
if cls.randomize_returning:
new_eng = config.db.execution_options()
@event.listens_for(new_eng, "engine_connect")
def eng_connect(connection):
fixtures.insertmanyvalues_fixture(
connection,
randomize_rows=True,
# there should be no sentinel downgrades for any of
# these three dbs. sqlite has downgrades
warn_on_downgraded=testing.against(
["postgresql", "mssql", "mariadb"]
),
)
return new_eng
else:
return config.db
def test_insert_col_key_also_works_currently(self):
"""using the column key, not mapped attr key.
right now this passes through to the INSERT. when doing this with
an UPDATE, it tends to fail because the synchronize session
strategies can't match "xcol" back. however w/ INSERT we aren't
doing that, so there's no place this gets checked. UPDATE also
succeeds if synchronize_session is turned off.
"""
A, B = self.classes("A", "B")
s = fixture_session(bind=self.bind)
s.execute(insert(A).values(type="a", data="d", xcol=10))
eq_(s.scalars(select(A.x)).all(), [10])
@testing.combinations("default", "session_disable", "opt_disable")
def test_autoflush(self, autoflush_option):
A = self.classes.A
s = fixture_session(bind=self.bind)
a1 = A(data="x1")
s.add(a1)
if autoflush_option == "default":
s.execute(insert(A).values(type="a", data="x2"))
assert inspect(a1).persistent
eq_(s.scalars(select(A.data).order_by(A.id)).all(), ["x1", "x2"])
elif autoflush_option == "session_disable":
with s.no_autoflush:
s.execute(insert(A).values(type="a", data="x2"))
assert inspect(a1).pending
eq_(s.scalars(select(A.data).order_by(A.id)).all(), ["x2"])
elif autoflush_option == "opt_disable":
s.execute(
insert(A).values(type="a", data="x2"),
execution_options={"autoflush": False},
)
assert inspect(a1).pending
with s.no_autoflush:
eq_(s.scalars(select(A.data).order_by(A.id)).all(), ["x2"])
else:
assert False
@testing.variation("use_returning", [True, False])
@testing.variation("sort_by_parameter_order", [True, False])
def test_heterogeneous_keys(self, use_returning, sort_by_parameter_order):
A, B = self.classes("A", "B")
values = [
{"data": "d3", "x": 5, "type": "a"},
{"data": "d4", "x": 6, "type": "a"},
{"data": "d5", "type": "a"},
{"data": "d6", "x": 8, "y": 9, "type": "a"},
{"data": "d7", "x": 12, "y": 12, "type": "a"},
{"data": "d8", "x": 7, "type": "a"},
]
s = fixture_session(bind=self.bind)
stmt = insert(A)
if use_returning:
stmt = stmt.returning(
A, sort_by_parameter_order=bool(sort_by_parameter_order)
)
with self.sql_execution_asserter() as asserter:
result = s.execute(stmt, values)
if use_returning:
if self.use_sentinel and sort_by_parameter_order:
_sentinel_col = ", _sentinel"
_sentinel_returning = ", a._sentinel"
_sentinel_param = ", :_sentinel"
else:
_sentinel_col = _sentinel_param = _sentinel_returning = ""
# note no sentinel col is used when there is only one row
asserter.assert_(
CompiledSQL(
f"INSERT INTO a (type, data, xcol{_sentinel_col}) VALUES "
f"(:type, :data, :xcol{_sentinel_param}) "
f"RETURNING a.id, a.type, a.data, a.xcol, a.y"
f"{_sentinel_returning}",
[
{"type": "a", "data": "d3", "xcol": 5},
{"type": "a", "data": "d4", "xcol": 6},
],
),
CompiledSQL(
"INSERT INTO a (type, data) VALUES (:type, :data) "
"RETURNING a.id, a.type, a.data, a.xcol, a.y",
[{"type": "a", "data": "d5"}],
),
CompiledSQL(
f"INSERT INTO a (type, data, xcol, y{_sentinel_col}) "
f"VALUES (:type, :data, :xcol, :y{_sentinel_param}) "
f"RETURNING a.id, a.type, a.data, a.xcol, a.y"
f"{_sentinel_returning}",
[
{"type": "a", "data": "d6", "xcol": 8, "y": 9},
{"type": "a", "data": "d7", "xcol": 12, "y": 12},
],
),
CompiledSQL(
"INSERT INTO a (type, data, xcol) "
"VALUES (:type, :data, :xcol) "
"RETURNING a.id, a.type, a.data, a.xcol, a.y",
[{"type": "a", "data": "d8", "xcol": 7}],
),
)
else:
asserter.assert_(
CompiledSQL(
"INSERT INTO a (type, data, xcol) VALUES "
"(:type, :data, :xcol)",
[
{"type": "a", "data": "d3", "xcol": 5},
{"type": "a", "data": "d4", "xcol": 6},
],
),
CompiledSQL(
"INSERT INTO a (type, data) VALUES (:type, :data)",
[{"type": "a", "data": "d5"}],
),
CompiledSQL(
"INSERT INTO a (type, data, xcol, y) "
"VALUES (:type, :data, :xcol, :y)",
[
{"type": "a", "data": "d6", "xcol": 8, "y": 9},
{"type": "a", "data": "d7", "xcol": 12, "y": 12},
],
),
CompiledSQL(
"INSERT INTO a (type, data, xcol) "
"VALUES (:type, :data, :xcol)",
[{"type": "a", "data": "d8", "xcol": 7}],
),
)
if use_returning:
with self.assert_statement_count(testing.db, 0):
eq_(
set(result.scalars().all()),
{
A(data="d3", id=mock.ANY, type="a", x=5, y=None),
A(data="d4", id=mock.ANY, type="a", x=6, y=None),
A(data="d5", id=mock.ANY, type="a", x=None, y=None),
A(data="d6", id=mock.ANY, type="a", x=8, y=9),
A(data="d7", id=mock.ANY, type="a", x=12, y=12),
A(data="d8", id=mock.ANY, type="a", x=7, y=None),
},
)
@testing.combinations(
"strings",
"cols",
"strings_w_exprs",
"cols_w_exprs",
argnames="paramstyle",
)
@testing.variation(
"single_element", [True, (False, testing.requires.multivalues_inserts)]
)
def test_single_values_returning_fn(self, paramstyle, single_element):
"""test using insert().values().
these INSERT statements go straight in as a single execute without any
insertmanyreturning or bulk_insert_mappings thing going on. the
advantage here is that SQL expressions can be used in the values also.
Disadvantage is none of the automation for inheritance mappers.
"""
A, B = self.classes("A", "B")
if paramstyle == "strings":
values = [
{"data": "d3", "x": 5, "y": 9, "type": "a"},
{"data": "d4", "x": 10, "y": 8, "type": "a"},
]
elif paramstyle == "cols":
values = [
{A.data: "d3", A.x: 5, A.y: 9, A.type: "a"},
{A.data: "d4", A.x: 10, A.y: 8, A.type: "a"},
]
elif paramstyle == "strings_w_exprs":
values = [
{"data": func.lower("D3"), "x": 5, "y": 9, "type": "a"},
{
"data": "d4",
"x": literal_column("5") + 5,
"y": 8,
"type": "a",
},
]
elif paramstyle == "cols_w_exprs":
values = [
{A.data: func.lower("D3"), A.x: 5, A.y: 9, A.type: "a"},
{
A.data: "d4",
A.x: literal_column("5") + 5,
A.y: 8,
A.type: "a",
},
]
else:
assert False
s = fixture_session(bind=self.bind)
if single_element:
if paramstyle.startswith("strings"):
stmt = (
insert(A)
.values(**values[0])
.returning(A, func.upper(A.data, type_=String))
)
else:
stmt = (
insert(A)
.values(values[0])
.returning(A, func.upper(A.data, type_=String))
)
else:
stmt = (
insert(A)
.values(values)
.returning(A, func.upper(A.data, type_=String))
)
for i in range(3):
result = s.execute(stmt)
expected: List[Any] = [(A(data="d3", x=5, y=9), "D3")]
if not single_element:
expected.append((A(data="d4", x=10, y=8), "D4"))
eq_(result.all(), expected)
def test_bulk_w_sql_expressions(self):
A, B = self.classes("A", "B")
data = [
{"x": 5, "y": 9, "type": "a"},
{
"x": 10,
"y": 8,
"type": "a",
},
]
s = fixture_session(bind=self.bind)
stmt = (
insert(A)
.values(data=func.lower("DD"))
.returning(A, func.upper(A.data, type_=String))
)
for i in range(3):
result = s.execute(stmt, data)
expected: Set[Any] = {
(A(data="dd", x=5, y=9), "DD"),
(A(data="dd", x=10, y=8), "DD"),
}
eq_(set(result.all()), expected)
def test_bulk_w_sql_expressions_subclass(self):
A, B = self.classes("A", "B")
data = [
{"bd": "bd1", "x": 1, "y": 2, "z": 3, "q": 4},
{"bd": "bd2", "x": 5, "y": 6, "z": 7, "q": 8},
]
s = fixture_session(bind=self.bind)
stmt = (
insert(B)
.values(data=func.lower("DD"))
.returning(B, func.upper(B.data, type_=String))
)
for i in range(3):
result = s.execute(stmt, data)
expected: Set[Any] = {
(B(bd="bd1", data="dd", q=4, type="b", x=1, y=2, z=3), "DD"),
(B(bd="bd2", data="dd", q=8, type="b", x=5, y=6, z=7), "DD"),
}
eq_(set(result), expected)
@testing.combinations(True, False, argnames="use_ordered")
def test_bulk_upd_w_sql_expressions_no_ordered_values(self, use_ordered):
A, B = self.classes("A", "B")
s = fixture_session(bind=self.bind)
stmt = update(B).ordered_values(
("data", func.lower("DD_UPDATE")),
("z", literal_column("3 + 12")),
)
with expect_raises_message(
exc.InvalidRequestError,
r"bulk ORM UPDATE does not support ordered_values\(\) "
r"for custom UPDATE",
):
s.execute(
stmt,
[
{"id": 5, "bd": "bd1_updated"},
{"id": 6, "bd": "bd2_updated"},
],
)
def test_bulk_upd_w_sql_expressions_subclass(self):
A, B = self.classes("A", "B")
s = fixture_session(bind=self.bind)
data = [
{"data": "d3", "bd": "bd1", "x": 1, "y": 2, "z": 3, "q": 4},
{"data": "d4", "bd": "bd2", "x": 5, "y": 6, "z": 7, "q": 8},
]
ids = {
row.data: row.id
for row in s.execute(insert(B).returning(B.id, B.data), data)
}
stmt = update(B).values(
data=func.lower("DD_UPDATE"), z=literal_column("3 + 12")
)
result = s.execute(
stmt,
[
{"id": ids["d3"], "bd": "bd1_updated"},
{"id": ids["d4"], "bd": "bd2_updated"},
],
)
# this is a nullresult at the moment
assert result is not None
eq_(
set(s.scalars(select(B))),
{
B(
bd="bd1_updated",
data="dd_update",
id=ids["d3"],
q=4,
type="b",
x=1,
y=2,
z=15,
),
B(
bd="bd2_updated",
data="dd_update",
id=ids["d4"],
q=8,
type="b",
x=5,
y=6,
z=15,
),
},
)
def test_single_returning_fn(self):
A, B = self.classes("A", "B")
s = fixture_session(bind=self.bind)
for i in range(3):
result = s.execute(
insert(A).returning(A, func.upper(A.data, type_=String)),
[{"data": "d3"}, {"data": "d4"}],
)
eq_(set(result), {(A(data="d3"), "D3"), (A(data="d4"), "D4")})
@testing.variation("single_element", [True, False])
def test_subclass_no_returning(self, single_element):
A, B = self.classes("A", "B")
s = fixture_session(bind=self.bind)
if single_element:
data = {"data": "d3", "bd": "bd1", "x": 1, "y": 2, "z": 3, "q": 4}
else:
data = [
{"data": "d3", "bd": "bd1", "x": 1, "y": 2, "z": 3, "q": 4},
{"data": "d4", "bd": "bd2", "x": 5, "y": 6, "z": 7, "q": 8},
]
result = s.execute(insert(B), data)
assert result._soft_closed
@testing.variation("sort_by_parameter_order", [True, False])
@testing.variation("single_element", [True, False])
def test_subclass_load_only(self, single_element, sort_by_parameter_order):
"""test that load_only() prevents additional attributes from being
populated.
"""
A, B = self.classes("A", "B")
s = fixture_session(bind=self.bind)
if single_element:
data = {"data": "d3", "bd": "bd1", "x": 1, "y": 2, "z": 3, "q": 4}
else:
data = [
{"data": "d3", "bd": "bd1", "x": 1, "y": 2, "z": 3, "q": 4},
{"data": "d4", "bd": "bd2", "x": 5, "y": 6, "z": 7, "q": 8},
]
for i in range(3):
# tests both caching and that the data dictionaries aren't
# mutated...
result = s.execute(
insert(B)
.returning(
B,
sort_by_parameter_order=bool(sort_by_parameter_order),
)
.options(load_only(B.data, B.y, B.q)),
data,
)
objects = result.scalars().all()
for obj in objects:
assert "data" in obj.__dict__
assert "q" in obj.__dict__
assert "z" not in obj.__dict__
assert "x" not in obj.__dict__
expected = [
B(data="d3", bd="bd1", x=1, y=2, z=3, q=4),
]
if not single_element:
expected.append(B(data="d4", bd="bd2", x=5, y=6, z=7, q=8))
if sort_by_parameter_order:
coll = list
else:
coll = set
eq_(coll(objects), coll(expected))
@testing.variation("single_element", [True, False])
def test_subclass_load_only_doesnt_fetch_cols(self, single_element):
"""test that when using load_only(), the actual INSERT statement
does not include the deferred columns
"""
A, B = self.classes("A", "B")
s = fixture_session(bind=self.bind)
data = [
{"data": "d3", "bd": "bd1", "x": 1, "y": 2, "z": 3, "q": 4},
{"data": "d4", "bd": "bd2", "x": 5, "y": 6, "z": 7, "q": 8},
]
if single_element:
data = data[0]
with self.sql_execution_asserter() as asserter:
# tests both caching and that the data dictionaries aren't
# mutated...
# note that if we don't put B.id here, accessing .id on the
# B object for joined inheritance is triggering a SELECT
# (and not for single inheritance). this seems not great, but is
# likely a different issue
result = s.execute(
insert(B)
.returning(B)
.options(load_only(B.id, B.data, B.y, B.q)),
data,
)
objects = result.scalars().all()
if single_element:
id0 = objects[0].id
id1 = None
else:
id0, id1 = objects[0].id, objects[1].id
if inspect(B).single or inspect(B).concrete:
expected_params = [
{
"type": "b",
"data": "d3",
"xcol": 1,
"y": 2,
"bd": "bd1",
"zcol": 3,
"q": 4,
},
{
"type": "b",
"data": "d4",
"xcol": 5,
"y": 6,
"bd": "bd2",
"zcol": 7,
"q": 8,
},
]
if single_element:
expected_params[1:] = []
# RETURNING only includes PK, discriminator, then the cols
# we asked for data, y, q. xcol, z, bd are omitted
if inspect(B).single:
asserter.assert_(
CompiledSQL(
"INSERT INTO a (type, data, xcol, y, bd, zcol, q) "
"VALUES "
"(:type, :data, :xcol, :y, :bd, :zcol, :q) "
"RETURNING a.id, a.type, a.data, a.y, a.q",
expected_params,
),
)
else:
asserter.assert_(
CompiledSQL(
"INSERT INTO b (type, data, xcol, y, bd, zcol, q) "
"VALUES "
"(:type, :data, :xcol, :y, :bd, :zcol, :q) "
"RETURNING b.id, b.type, b.data, b.y, b.q",
expected_params,
),
)
else:
a_data = [
{"type": "b", "data": "d3", "xcol": 1, "y": 2},
{"type": "b", "data": "d4", "xcol": 5, "y": 6},
]
b_data = [
{"id": id0, "bd": "bd1", "zcol": 3, "q": 4},
{"id": id1, "bd": "bd2", "zcol": 7, "q": 8},
]
if single_element:
a_data[1:] = []
b_data[1:] = []
# RETURNING only includes PK, discriminator, then the cols
# we asked for data, y, q. xcol, z, bd are omitted. plus they
# are broken out correctly in the two statements.
asserter.assert_(
Conditional(
self.use_sentinel and not single_element,
[
CompiledSQL(
"INSERT INTO a (type, data, xcol, y, _sentinel) "
"VALUES "
"(:type, :data, :xcol, :y, :_sentinel) "
"RETURNING a.id, a.type, a.data, a.y, a._sentinel",
a_data,
),
CompiledSQL(
"INSERT INTO b (id, bd, zcol, q, _sentinel) "
"VALUES (:id, :bd, :zcol, :q, :_sentinel) "
"RETURNING b.id, b.q, b._sentinel",
b_data,
),
],
[
CompiledSQL(
"INSERT INTO a (type, data, xcol, y) VALUES "
"(:type, :data, :xcol, :y) "
"RETURNING a.id, a.type, a.data, a.y",
a_data,
),
Conditional(
single_element,
[
CompiledSQL(
"INSERT INTO b (id, bd, zcol, q) "
"VALUES (:id, :bd, :zcol, :q) "
"RETURNING b.id, b.q",
b_data,
),
],
[
CompiledSQL(
"INSERT INTO b (id, bd, zcol, q) "
"VALUES (:id, :bd, :zcol, :q) "
"RETURNING b.id, b.q, b.id AS id__1",
b_data,
),
],
),
],
)
)
@testing.variation("single_element", [True, False])
def test_subclass_returning_bind_expr(self, single_element):
A, B = self.classes("A", "B")
s = fixture_session(bind=self.bind)
if single_element:
data = {"data": "d3", "bd": "bd1", "x": 1, "y": 2, "z": 3, "q": 4}
else:
data = [
{"data": "d3", "bd": "bd1", "x": 1, "y": 2, "z": 3, "q": 4},
{"data": "d4", "bd": "bd2", "x": 5, "y": 6, "z": 7, "q": 8},
]
# note there's a fix in compiler.py ->
# _deliver_insertmanyvalues_batches
# for this re: the parameter rendering that isn't tested anywhere
# else. two different versions of the bug for both positional
# and non
result = s.execute(insert(B).returning(B.data, B.y, B.q + 5), data)
if single_element:
eq_(result.all(), [("d3", 2, 9)])
else:
eq_(set(result), {("d3", 2, 9), ("d4", 6, 13)})
def test_subclass_bulk_update(self):
A, B = self.classes("A", "B")
s = fixture_session(bind=self.bind)
data = [
{"data": "d3", "bd": "bd1", "x": 1, "y": 2, "z": 3, "q": 4},
{"data": "d4", "bd": "bd2", "x": 5, "y": 6, "z": 7, "q": 8},
]
ids = {
row.data: row.id
for row in s.execute(insert(B).returning(B.id, B.data), data).all()
}
result = s.execute(
update(B),
[
{"id": ids["d3"], "data": "d3_updated", "bd": "bd1_updated"},
{"id": ids["d4"], "data": "d4_updated", "bd": "bd2_updated"},
],
)
# this is a nullresult at the moment
assert result is not None
eq_(
set(s.scalars(select(B))),
{
B(
bd="bd1_updated",
data="d3_updated",
id=ids["d3"],
q=4,
type="b",
x=1,
y=2,
z=3,
),
B(
bd="bd2_updated",
data="d4_updated",
id=ids["d4"],
q=8,
type="b",
x=5,
y=6,
z=7,
),
},
)
@testing.variation("single_element", [True, False])
@testing.variation("sort_by_parameter_order", [True, False])
def test_subclass_return_just_subclass_ids(
self, single_element, sort_by_parameter_order
):
A, B = self.classes("A", "B")
s = fixture_session(bind=self.bind)
if single_element:
data = {"data": "d3", "bd": "bd1", "x": 1, "y": 2, "z": 3, "q": 4}
else:
data = [
{"data": "d3", "bd": "bd1", "x": 1, "y": 2, "z": 3, "q": 4},
{"data": "d4", "bd": "bd2", "x": 5, "y": 6, "z": 7, "q": 8},
]
ids = s.execute(
insert(B).returning(
B.id,
B.data,
sort_by_parameter_order=bool(sort_by_parameter_order),
),
data,
)
actual_ids = s.execute(select(B.id, B.data).order_by(B.id))
if sort_by_parameter_order:
coll = list
else:
coll = set
eq_(coll(ids), coll(actual_ids))
@testing.variation(
"insert_strategy",
["orm", "bulk", "bulk_ordered", "bulk_w_embedded_bindparam"],
)
@testing.requires.provisioned_upsert
def test_base_class_upsert(self, insert_strategy):
"""upsert is really tricky. if you dont have any data updated,
then you dont get the rows back and things dont work so well.
so we need to be careful how much we document this because this is
still a thorny use case.
"""
A = self.classes.A
s = fixture_session(bind=self.bind)
initial_data = [
{"data": "d3", "x": 1, "y": 2, "q": 4},
{"data": "d4", "x": 5, "y": 6, "q": 8},
]
ids = {
row.data: row.id
for row in s.execute(
insert(A).returning(A.id, A.data), initial_data
)
}
upsert_data = [
{
"id": ids["d3"],
"type": "a",
"data": "d3",
"x": 1,
"y": 2,
},
{
"id": 32,
"type": "a",
"data": "d32",
"x": 19,
"y": 5,
},
{
"id": ids["d4"],
"type": "a",
"data": "d4",
"x": 5,
"y": 6,
},
{
"id": 28,
"type": "a",
"data": "d28",
"x": 9,
"y": 15,
},
]
stmt = provision.upsert(
config,
A,
(A,),
set_lambda=lambda inserted: {"data": inserted.data + " upserted"},
sort_by_parameter_order=insert_strategy.bulk_ordered,
)
if insert_strategy.orm:
result = s.scalars(stmt.values(upsert_data))
elif insert_strategy.bulk or insert_strategy.bulk_ordered:
with self.assert_for_downgrade(
sort_by_parameter_order=insert_strategy.bulk_ordered
):
result = s.scalars(stmt, upsert_data)
elif insert_strategy.bulk_w_embedded_bindparam:
# test related to #9583, specific user case in
# https://github.com/sqlalchemy/sqlalchemy/discussions/9581#discussioncomment-5504077 # noqa: E501
stmt = stmt.values(
y=select(bindparam("qq1", type_=Integer)).scalar_subquery()
)
for d in upsert_data:
d["qq1"] = d.pop("y")
result = s.scalars(stmt, upsert_data)
else:
insert_strategy.fail()
eq_(
set(result.all()),
{
A(data="d3 upserted", id=ids["d3"], type="a", x=1, y=2),
A(data="d32", id=32, type="a", x=19, y=5),
A(data="d4 upserted", id=ids["d4"], type="a", x=5, y=6),
A(data="d28", id=28, type="a", x=9, y=15),
},
)
@testing.combinations(
"orm",
"bulk",
argnames="insert_strategy",
)
@testing.variation("sort_by_parameter_order", [True, False])
@testing.requires.provisioned_upsert
def test_subclass_upsert(self, insert_strategy, sort_by_parameter_order):
"""note this is overridden in the joined version to expect failure"""
A, B = self.classes("A", "B")
s = fixture_session(bind=self.bind)
idd3 = 1
idd4 = 2
id32 = 32
id28 = 28
initial_data = [
{
"id": idd3,
"data": "d3",
"bd": "bd1",
"x": 1,
"y": 2,
"z": 3,
"q": 4,
},
{
"id": idd4,
"data": "d4",
"bd": "bd2",
"x": 5,
"y": 6,
"z": 7,
"q": 8,
},
]
ids = {
row.data: row.id
for row in s.execute(
insert(B).returning(
B.id, B.data, sort_by_parameter_order=True
),
initial_data,
)
}
upsert_data = [
{
"id": ids["d3"],
"type": "b",
"data": "d3",
"bd": "bd1_upserted",
"x": 1,
"y": 2,
"z": 33,
"q": 44,
},
{
"id": id32,
"type": "b",
"data": "d32",
"bd": "bd 32",
"x": 19,
"y": 5,
"z": 20,
"q": 21,
},
{
"id": ids["d4"],
"type": "b",
"bd": "bd2_upserted",
"data": "d4",
"x": 5,
"y": 6,
"z": 77,
"q": 88,
},
{
"id": id28,
"type": "b",
"data": "d28",
"bd": "bd 28",
"x": 9,
"y": 15,
"z": 10,
"q": 11,
},
]
stmt = provision.upsert(
config,
B,
(B,),
set_lambda=lambda inserted: {
"data": inserted.data + " upserted",
"bd": inserted.bd + " upserted",
},
sort_by_parameter_order=bool(sort_by_parameter_order),
)
with self.assert_for_downgrade(
sort_by_parameter_order=bool(sort_by_parameter_order)
):
result = s.scalars(stmt, upsert_data)
eq_(
set(result),
{
B(
bd="bd1_upserted upserted",
data="d3 upserted",
id=ids["d3"],
q=4,
type="b",
x=1,
y=2,
z=3,
),
B(
bd="bd 32",
data="d32",
id=32,
q=21,
type="b",
x=19,
y=5,
z=20,
),
B(
bd="bd2_upserted upserted",
data="d4 upserted",
id=ids["d4"],
q=8,
type="b",
x=5,
y=6,
z=7,
),
B(
bd="bd 28",
data="d28",
id=28,
q=11,
type="b",
x=9,
y=15,
z=10,
),
},
)
@testing.combinations(
(
"no_sentinel",
False,
),
(
"w_sentinel",
True,
),
argnames="use_sentinel",
id_="ia",
)
@testing.combinations(
(
"nonrandom",
False,
),
(
"random",
True,
),
argnames="randomize_returning",
id_="ia",
)
|
BulkDMLReturningInhTest
|
python
|
ray-project__ray
|
python/ray/train/v2/tests/test_metrics.py
|
{
"start": 628,
"end": 9002
}
|
class ____:
"""Mock class for ray.util.metrics.Gauge."""
def __init__(self, name: str, description: str, tag_keys: tuple = ()):
self._values: dict[set[str], float] = {}
def set(self, value: float, tags: dict):
self._values[frozenset(tags.items())] = value
@pytest.fixture
def mock_gauge(monkeypatch):
"""Fixture that replaces ray.util.metrics.Gauge with MockGauge."""
monkeypatch.setattr(ray.train.v2._internal.metrics.base, "Gauge", MockGauge)
return MockGauge
def mock_time_monotonic(monkeypatch, time_values: list[float]):
time_index = 0
def mock_time():
nonlocal time_index
value = time_values[time_index]
time_index = time_index + 1
return value
monkeypatch.setattr(
ray.train.v2._internal.callbacks.metrics, "time_monotonic", mock_time
)
def mock_start_end_time(monkeypatch, time_values: list[tuple[float, float]]):
"""Mock the time_monotonic function to return the start and end times.
This assumes that time_monotonic is called in the order of the start and end times.
"""
all_times = []
for start, end in time_values:
all_times.append(start)
all_times.append(end)
mock_time_monotonic(monkeypatch, all_times)
def test_time_metric(monkeypatch, mock_gauge):
base_tags = {"run_name": "test_run"}
metric = TimeMetric(
name="test_time",
description="Test time metric",
base_tags=base_tags,
)
# Test recording values
metric.record(1.0)
assert metric.get_value() == 1.0
# Test updating metric
metric.record(2.0)
assert metric.get_value() == 3.0
# Test reset
metric.reset()
assert metric.get_value() == 0.0
def test_enum_metric(monkeypatch, mock_gauge):
class TestEnum(enum.Enum):
A = "A"
B = "B"
C = "C"
base_tags = {"run_name": "test_run"}
metric = EnumMetric[TestEnum](
name="test_enum",
description="Test enum metric",
base_tags=base_tags,
enum_tag_key="state",
)
# Test recording values
metric.record(TestEnum.A)
assert metric.get_value(TestEnum.A) == 1
assert metric.get_value(TestEnum.B) == 0
assert metric.get_value(TestEnum.C) == 0
metric.record(TestEnum.B)
assert metric.get_value(TestEnum.A) == 0
assert metric.get_value(TestEnum.B) == 1
assert metric.get_value(TestEnum.C) == 0
metric.record(TestEnum.C)
assert metric.get_value(TestEnum.A) == 0
assert metric.get_value(TestEnum.B) == 0
assert metric.get_value(TestEnum.C) == 1
# Test reset
metric.reset()
assert metric.get_value(TestEnum.A) == 0
assert metric.get_value(TestEnum.B) == 0
assert metric.get_value(TestEnum.C) == 0
def test_worker_metrics_callback(monkeypatch, mock_gauge):
t1 = 0.0
t2 = 1.0
t3 = 10.0
t4 = 12.0
mock_start_end_time(monkeypatch, [(t1, t2), (t3, t4)])
mock_train_context = MagicMock()
mock_train_context.get_world_rank.return_value = 1
mock_train_context.train_run_context = create_dummy_run_context()
monkeypatch.setattr(
ray.train.v2._internal.callbacks.metrics,
"get_train_context",
lambda: mock_train_context,
)
callback = WorkerMetricsCallback(train_run_context=create_dummy_run_context())
callback.after_init_train_context()
# Check if the gauges is updated with the correct metrics
with callback.on_report():
pass
assert (
callback._metrics[WorkerMetrics.REPORT_TOTAL_BLOCKED_TIME_S].get_value()
== t2 - t1
)
# Check if the gauges is updated with the correct metrics
with callback.on_report():
pass
assert callback._metrics[WorkerMetrics.REPORT_TOTAL_BLOCKED_TIME_S].get_value() == (
t2 - t1
) + (t4 - t3)
callback.before_shutdown()
assert (
callback._metrics[WorkerMetrics.REPORT_TOTAL_BLOCKED_TIME_S].get_value() == 0.0
)
def test_controller_metrics_callback(monkeypatch, mock_gauge):
t1 = 0.0
t2 = 1.0
t3 = 10.0
t4 = 12.0
mock_start_end_time(monkeypatch, [(t1, t2), (t3, t4)])
mock_train_context = MagicMock()
mock_train_context.get_run_config.return_value = RunConfig(name="test_run_name")
monkeypatch.setattr(
ray.train.v2._internal.execution.context,
"get_train_context",
lambda: mock_train_context,
)
callback = ControllerMetricsCallback()
callback.after_controller_start(train_run_context=create_dummy_run_context())
# Check if the gauges is updated with the correct metrics
with callback.on_worker_group_start():
pass
assert (
callback._metrics[ControllerMetrics.WORKER_GROUP_START_TOTAL_TIME_S].get_value()
== t2 - t1
)
assert (
callback._metrics[
ControllerMetrics.WORKER_GROUP_SHUTDOWN_TOTAL_TIME_S
].get_value()
== 0.0
)
# Check if the gauges is updated with the correct metrics
with callback.on_worker_group_shutdown():
pass
assert (
callback._metrics[ControllerMetrics.WORKER_GROUP_START_TOTAL_TIME_S].get_value()
== t2 - t1
)
assert (
callback._metrics[
ControllerMetrics.WORKER_GROUP_SHUTDOWN_TOTAL_TIME_S
].get_value()
== t4 - t3
)
callback.before_controller_shutdown()
assert (
callback._metrics[ControllerMetrics.WORKER_GROUP_START_TOTAL_TIME_S].get_value()
== 0.0
)
assert (
callback._metrics[
ControllerMetrics.WORKER_GROUP_SHUTDOWN_TOTAL_TIME_S
].get_value()
== 0.0
)
def test_controller_state_metrics(monkeypatch, mock_gauge):
"""Test controller state transition metrics."""
mock_train_context = MagicMock()
mock_train_context.get_run_config.return_value = RunConfig(name="test_run_name")
monkeypatch.setattr(
ray.train.v2._internal.execution.context,
"get_train_context",
lambda: mock_train_context,
)
callback = ControllerMetricsCallback()
callback.after_controller_start(train_run_context=create_dummy_run_context())
# Test initial state
assert (
callback._metrics[ControllerMetrics.CONTROLLER_STATE].get_value(
TrainControllerStateType.INITIALIZING
)
== 1
)
# Test state transition
previous_state = TrainControllerState(TrainControllerStateType.INITIALIZING)
current_state = TrainControllerState(TrainControllerStateType.RUNNING)
callback.after_controller_state_update(previous_state, current_state)
# Verify state counts
assert (
callback._metrics[ControllerMetrics.CONTROLLER_STATE].get_value(
TrainControllerStateType.INITIALIZING
)
== 0
)
assert (
callback._metrics[ControllerMetrics.CONTROLLER_STATE].get_value(
TrainControllerStateType.RUNNING
)
== 1
)
# Test another state transition
previous_state = TrainControllerState(TrainControllerStateType.RUNNING)
current_state = TrainControllerState(TrainControllerStateType.FINISHED)
callback.after_controller_state_update(previous_state, current_state)
# Verify updated state counts
assert (
callback._metrics[ControllerMetrics.CONTROLLER_STATE].get_value(
TrainControllerStateType.INITIALIZING
)
== 0
)
assert (
callback._metrics[ControllerMetrics.CONTROLLER_STATE].get_value(
TrainControllerStateType.RUNNING
)
== 0
)
assert (
callback._metrics[ControllerMetrics.CONTROLLER_STATE].get_value(
TrainControllerStateType.FINISHED
)
== 1
)
callback.before_controller_shutdown()
assert (
callback._metrics[ControllerMetrics.CONTROLLER_STATE].get_value(
TrainControllerStateType.INITIALIZING
)
== 0
)
assert (
callback._metrics[ControllerMetrics.CONTROLLER_STATE].get_value(
TrainControllerStateType.RUNNING
)
== 0
)
assert (
callback._metrics[ControllerMetrics.CONTROLLER_STATE].get_value(
TrainControllerStateType.FINISHED
)
== 0
)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
|
MockGauge
|
python
|
huggingface__transformers
|
examples/modular-transformers/modeling_dummy_bert.py
|
{
"start": 22448,
"end": 24453
}
|
class ____(PreTrainedModel):
config_class = DummyBertConfig
base_model_prefix = "dummy_bert"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": DummyBertLayer,
"attentions": DummyBertSelfAttention,
"cross_attentions": DummyBertCrossAttention,
}
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
module.weight.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.zero_()
elif isinstance(module, nn.Embedding):
module.weight.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.zero_()
module.weight.fill_(1.0)
elif isinstance(module, DummyBertLMPredictionHead):
module.bias.zero_()
@auto_docstring(
custom_intro="""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in [Attention is
all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
`add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
"""
)
|
DummyBertPreTrainedModel
|
python
|
Lightning-AI__lightning
|
tests/tests_pytorch/callbacks/test_stochastic_weight_avg.py
|
{
"start": 1879,
"end": 3179
}
|
class ____(BoringModel):
def __init__(
self, batchnorm: bool = True, interval: str = "epoch", iterable_dataset: bool = False, crash_on_epoch=None
):
super().__init__()
layers = [nn.Linear(32, 32)]
if batchnorm:
layers.append(nn.BatchNorm1d(32))
layers += [nn.ReLU(), nn.Linear(32, 2)]
self.layer = nn.Sequential(*layers)
self.interval = interval
self.iterable_dataset = iterable_dataset
self.crash_on_epoch = crash_on_epoch
def training_step(self, batch, batch_idx):
if self.crash_on_epoch and self.trainer.current_epoch >= self.crash_on_epoch:
raise Exception("SWA crash test")
return super().training_step(batch, batch_idx)
def train_dataloader(self):
dset_cls = RandomIterableDataset if self.iterable_dataset else RandomDataset
dset = dset_cls(32, 64)
return DataLoader(dset, batch_size=2)
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": torch.optim.lr_scheduler.StepLR(optimizer, step_size=1),
"interval": self.interval,
},
}
|
SwaTestModel
|
python
|
pypa__pipenv
|
pipenv/utils/dependencies.py
|
{
"start": 3103,
"end": 39601
}
|
class ____:
"""A hack, which allows us to tell resolver which version of Python we're using."""
def __init__(self, python_path):
self.python_path = python_path
def __enter__(self):
if self.python_path:
os.environ["PIP_PYTHON_PATH"] = str(self.python_path)
def __exit__(self, *args):
pass
def get_canonical_names(packages):
"""Canonicalize a list of packages and return a set of canonical names"""
from pipenv.patched.pip._vendor.packaging.utils import canonicalize_name
if not isinstance(packages, Sequence):
if not isinstance(packages, str):
return packages
packages = [packages]
return {canonicalize_name(pkg) for pkg in packages if pkg}
def pep440_version(version):
"""Normalize version to PEP 440 standards"""
return str(parse(version))
def pep423_name(name):
"""Normalize package name to PEP 423 style standard."""
name = name.lower()
if any(i not in name for i in (VCS_LIST + SCHEME_LIST)):
return name.replace("_", "-")
else:
return name
def translate_markers(pipfile_entry):
from pipenv.patched.pip._vendor.packaging.markers import default_environment
allowed_marker_keys = ["markers"] + list(default_environment().keys())
provided_keys = list(pipfile_entry.keys()) if hasattr(pipfile_entry, "keys") else []
pipfile_markers = set(provided_keys) & set(allowed_marker_keys)
new_pipfile = dict(pipfile_entry).copy()
marker_set = set()
os_name_marker = None
if "markers" in new_pipfile:
marker_str = new_pipfile.pop("markers")
if marker_str:
marker = str(Marker(marker_str))
if "extra" not in marker:
marker_set.add(marker)
for m in pipfile_markers:
entry = f"{pipfile_entry[m]}"
if m != "markers":
if m != "os_name":
marker_set.add(str(Marker(f"{m} {entry}")))
new_pipfile.pop(m)
if marker_set:
markers_str = " and ".join(
f"{s}" if " and " in s else s for s in sorted(dict.fromkeys(marker_set))
)
if os_name_marker:
markers_str = f"({markers_str}) and {os_name_marker}"
new_pipfile["markers"] = str(Marker(markers_str)).replace('"', "'")
return new_pipfile
def unearth_hashes_for_dep(project, dep):
hashes = []
index_url = "https://pypi.org/simple/"
source = "pypi"
for source in project.sources:
if source.get("name") == dep.get("index"):
index_url = source.get("url")
break
# 1 Try to get hashes directly form index
install_req, markers, _ = install_req_from_pipfile(dep["name"], dep)
if not install_req or not install_req.req:
return []
if "https://pypi.org/simple/" in index_url:
hashes = project.get_hashes_from_pypi(install_req, source)
elif index_url:
hashes = project.get_hashes_from_remote_index_urls(install_req, source)
if hashes:
return hashes
return []
def extract_vcs_url(vcs_url):
# Remove leading/trailing whitespace
vcs_url = vcs_url.strip()
# Check if it's a file URI
parsed = urlparse(vcs_url)
if parsed.scheme == "file":
# For file URIs, we want to keep the entire URL intact
return vcs_url
# Remove the package name and '@' if present at the start
if "@" in vcs_url and not vcs_url.startswith(tuple(f"{vcs}+" for vcs in VCS_LIST)):
vcs_url = vcs_url.split("@", 1)[1]
# Remove the VCS prefix (e.g., 'git+')
for prefix in VCS_LIST:
vcs_prefix = f"{prefix}+"
if vcs_url.startswith(vcs_prefix):
vcs_url = vcs_url[len(vcs_prefix) :]
break
# Parse the URL
parsed = urlparse(vcs_url)
# Reconstruct the URL, preserving authentication details
clean_url = urlunparse(
(
parsed.scheme,
parsed.netloc,
parsed.path,
"", # params
"", # query
"", # fragment
)
)
return clean_url
def clean_resolved_dep(project, dep, is_top_level=False, current_entry=None):
from pipenv.patched.pip._vendor.packaging.requirements import (
Requirement as PipRequirement,
)
name = dep["name"]
lockfile = {}
# Evaluate Markers
if "markers" in dep and dep.get("markers", "").strip():
if not is_top_level:
translated = translate_markers(dep).get("markers", "").strip()
if translated:
try:
lockfile["markers"] = translated
except TypeError:
pass
else:
try:
pipfile_entry = translate_markers(dep)
if pipfile_entry.get("markers"):
lockfile["markers"] = pipfile_entry.get("markers")
except TypeError:
pass
version = dep.get("version", None)
if version and not version.startswith("=="):
version = f"=={version}"
if version == "==*":
if current_entry:
version = current_entry.get("version")
dep["version"] = version
else:
version = None
is_vcs_or_file = False
for vcs_type in VCS_LIST:
if vcs_type in dep:
vcs_url = dep[vcs_type]
if "[" in vcs_url and "]" in vcs_url:
extras_section = vcs_url.split("[").pop().replace("]", "")
lockfile["extras"] = sorted(
[extra.strip() for extra in extras_section.split(",")]
)
lockfile[vcs_type] = vcs_url
lockfile["ref"] = dep.get("ref")
if "subdirectory" in dep:
lockfile["subdirectory"] = dep["subdirectory"]
is_vcs_or_file = True
if "editable" in dep:
lockfile["editable"] = dep["editable"]
preferred_file_keys = ["path", "file"]
dependency_file_key = next(iter(k for k in preferred_file_keys if k in dep), None)
if dependency_file_key:
lockfile[dependency_file_key] = dep[dependency_file_key]
is_vcs_or_file = True
if "editable" in dep:
lockfile["editable"] = dep["editable"]
if version and not is_vcs_or_file:
if isinstance(version, PipRequirement):
if version.specifier:
lockfile["version"] = str(version.specifier)
if version.extras:
lockfile["extras"] = sorted(version.extras)
elif version:
lockfile["version"] = version
if dep.get("hashes"):
lockfile["hashes"] = dep["hashes"]
elif is_top_level:
potential_hashes = unearth_hashes_for_dep(project, dep)
if potential_hashes:
lockfile["hashes"] = potential_hashes
if dep.get("index"):
lockfile["index"] = dep["index"]
if dep.get("extras"):
lockfile["extras"] = sorted(dep["extras"])
# In case we lock a uri or a file when the user supplied a path
# remove the uri or file keys from the entry and keep the path
if dep and isinstance(dep, dict):
for k in preferred_file_keys:
if k in dep.keys():
lockfile[k] = dep[k]
break
if "markers" in dep:
markers = dep["markers"]
if markers:
markers = Marker(markers)
if not markers.evaluate() and current_entry:
current_entry.update(lockfile)
return {name: current_entry}
return {name: lockfile}
def as_pipfile(dep: InstallRequirement) -> Dict[str, Any]:
"""Create a pipfile entry for the given InstallRequirement."""
pipfile_dict = {}
name = dep.name
version = dep.req.specifier
# Construct the pipfile entry
pipfile_dict[name] = {
"version": str(version),
"editable": dep.editable,
"extras": list(dep.extras),
}
if dep.link:
# If it's a VCS link
if dep.link.is_vcs:
vcs = dep.link.scheme.split("+")[0]
pipfile_dict[name][vcs] = dep.link.url_without_fragment
# If it's a URL link
elif dep.link.scheme.startswith("http"):
pipfile_dict[name]["file"] = dep.link.url_without_fragment
# If it's a local file
elif dep.link.is_file:
pipfile_dict[name]["path"] = dep.link.file_path
# Convert any markers to their string representation
if dep.markers:
pipfile_dict[name]["markers"] = str(dep.markers)
# If a hash is available, add it to the pipfile entry
if dep.hash_options:
pipfile_dict[name]["hashes"] = dep.hash_options
return pipfile_dict
def is_star(val):
return isinstance(val, str) and val == "*"
def is_pinned(val):
if isinstance(val, Mapping):
val = val.get("version")
return isinstance(val, str) and val.startswith("==")
def is_pinned_requirement(ireq):
"""
Returns whether an InstallRequirement is a "pinned" requirement.
"""
if ireq.editable:
return False
if ireq.req is None or len(ireq.specifier) != 1:
return False
spec = next(iter(ireq.specifier))
return spec.operator in {"==", "==="} and not spec.version.endswith(".*")
def is_editable_path(path):
"""
Determine if a path is editable by checking if it's a directory.
:param path: A path as string or Path object
:return: True if the path is a directory, False otherwise
"""
return Path(path).is_dir()
def dependency_as_pip_install_line(
dep_name: str,
dep: Union[str, Mapping],
include_hashes: bool,
include_markers: bool,
include_index: bool,
indexes: list,
constraint: bool = False,
):
if isinstance(dep, str):
if is_star(dep):
return dep_name
elif not COMPARE_OP.match(dep):
return f"{dep_name}=={dep}"
return f"{dep_name}{dep}"
line = []
is_constraint = False
vcs = next(iter([vcs for vcs in VCS_LIST if vcs in dep]), None)
if not vcs:
for k in ["file", "path"]:
if k in dep:
if dep.get("editable") and is_editable_path(dep[k]):
line.append("-e")
extras = ""
if "extras" in dep:
extras = f"[{','.join(dep['extras'])}]"
location = dep["file"] if "file" in dep else dep["path"]
if location.startswith(("http:", "https:")):
line.append(f"{dep_name}{extras} @ {location}")
else:
line.append(f"{location}{extras}")
break
else:
# Normal/Named Requirements
is_constraint = True
line.append(dep_name)
if "extras" in dep:
line[-1] += f"[{','.join(dep['extras'])}]"
if "version" in dep:
version = dep["version"]
if version and not is_star(version):
if not COMPARE_OP.match(version):
version = f"=={version}"
line[-1] += version
if include_markers and dep.get("markers"):
line[-1] = f'{line[-1]}; {dep["markers"]}'
if include_hashes and dep.get("hashes"):
line.extend([f" --hash={hash}" for hash in dep["hashes"]])
if include_index:
if dep.get("index"):
indexes = [s for s in indexes if s.get("name") == dep["index"]]
else:
indexes = [indexes[0]] if indexes else []
index_list = prepare_pip_source_args(indexes)
line.extend(index_list)
elif vcs and vcs in dep: # VCS Requirements
extras = ""
ref = ""
if dep.get("ref"):
ref = f"@{dep['ref']}"
if "extras" in dep:
extras = f"[{','.join(dep['extras'])}]"
include_vcs = "" if f"{vcs}+" in dep[vcs] else f"{vcs}+"
vcs_url = dep[vcs]
# legacy format is the only format supported for editable installs https://github.com/pypa/pip/issues/9106
if is_editable_path(dep[vcs]) or "file://" in dep[vcs]:
if "#egg=" not in dep[vcs]:
git_req = f"-e {include_vcs}{dep[vcs]}{ref}#egg={dep_name}{extras}"
else:
git_req = f"-e {include_vcs}{dep[vcs]}{ref}"
if "subdirectory" in dep:
git_req += f"&subdirectory={dep['subdirectory']}"
else:
if "#egg=" in vcs_url:
vcs_url = vcs_url.split("#egg=")[0]
git_req = f"{dep_name}{extras} @ {include_vcs}{vcs_url}{ref}"
if "subdirectory" in dep:
git_req += f"#subdirectory={dep['subdirectory']}"
line.append(git_req)
if constraint and not is_constraint:
pip_line = ""
else:
pip_line = " ".join(line)
return pip_line
def convert_deps_to_pip(
deps,
indexes=None,
include_hashes=True,
include_markers=True,
include_index=False,
):
""" "Converts a Pipfile-formatted dependency to a pip-formatted one."""
dependencies = {}
if indexes is None:
indexes = []
for dep_name, dep in deps.items():
req = dependency_as_pip_install_line(
dep_name, dep, include_hashes, include_markers, include_index, indexes
)
dependencies[dep_name] = req
return dependencies
def parse_metadata_file(content: str):
"""
Parse a METADATA file to get the package name.
Parameters:
content (str): Contents of the METADATA file.
Returns:
str: Name of the package or None if not found.
"""
for line in content.splitlines():
if line.startswith("Name:"):
return line.split("Name: ")[1].strip()
return None
def parse_pkginfo_file(content: str):
"""
Parse a PKG-INFO file to get the package name.
Parameters:
content (str): Contents of the PKG-INFO file.
Returns:
str: Name of the package or None if not found.
"""
for line in content.splitlines():
if line.startswith("Name:"):
return line.split("Name: ")[1].strip()
return None
def parse_setup_file(content):
# A dictionary to store variable names and their values
variables = {}
try:
tree = ast.parse(content)
for node in ast.walk(tree):
# Extract variable assignments and store them
if isinstance(node, ast.Assign):
for target in node.targets:
if isinstance(target, ast.Name):
if isinstance(node.value, ast.Str): # for Python versions < 3.8
variables[target.id] = node.value.s
elif isinstance(node.value, ast.Constant) and isinstance(
node.value.value, str
):
variables[target.id] = node.value.value
# Check function calls to extract the 'name' attribute from the setup function
if isinstance(node, ast.Call):
if (
getattr(node.func, "id", "") == "setup"
or isinstance(node.func, ast.Attribute)
and node.func.attr == "setup"
):
for keyword in node.keywords:
if keyword.arg == "name":
# If it's a variable, retrieve its value
if (
isinstance(keyword.value, ast.Name)
and keyword.value.id in variables
):
return variables[keyword.value.id]
# Otherwise, check if it's directly provided
elif isinstance(keyword.value, ast.Str):
return keyword.value.s
elif isinstance(keyword.value, ast.Constant) and isinstance(
keyword.value.value, str
):
return keyword.value.value
# Additional handling for Python versions and specific ways of defining the name
elif sys.version_info < (3, 9) and isinstance(
keyword.value, ast.Subscript
):
if (
isinstance(keyword.value.value, ast.Name)
and keyword.value.value.id == "about"
):
if isinstance(
keyword.value.slice, ast.Index
) and isinstance(keyword.value.slice.value, ast.Str):
return keyword.value.slice.value.s
return keyword.value.s
elif sys.version_info >= (3, 9) and isinstance(
keyword.value, ast.Subscript
):
if (
isinstance(keyword.value.value, ast.Name)
and isinstance(keyword.value.slice, ast.Str)
and keyword.value.value.id == "about"
):
return keyword.value.slice.s
except ValueError:
pass # We will not exec unsafe code to determine the name pre-resolver
return None
def parse_cfg_file(content):
config = configparser.ConfigParser()
config.read_string(content)
try:
return config["metadata"]["name"]
except configparser.NoSectionError:
return None
except KeyError:
return None
def parse_toml_file(content):
toml_dict = tomli.loads(content)
if "project" in toml_dict and "name" in toml_dict["project"]:
return toml_dict["project"]["name"]
if "tool" in toml_dict and "poetry" in toml_dict["tool"]:
return toml_dict["tool"]["poetry"]["name"]
return None
def find_package_name_from_tarball(tarball_filepath):
if tarball_filepath.startswith("file://") and os.name != "nt":
tarball_filepath = tarball_filepath[7:]
with tarfile.open(tarball_filepath, "r") as tar_ref:
for filename in tar_ref.getnames():
if filename.endswith(RELEVANT_PROJECT_FILES):
with tar_ref.extractfile(filename) as file:
possible_name = find_package_name_from_filename(filename, file)
if possible_name:
return possible_name
def find_package_name_from_zipfile(zip_filepath):
if zip_filepath.startswith("file://") and os.name != "nt":
zip_filepath = zip_filepath[7:]
with zipfile.ZipFile(zip_filepath, "r") as zip_ref:
for filename in zip_ref.namelist():
if filename.endswith(RELEVANT_PROJECT_FILES):
with zip_ref.open(filename) as file:
possible_name = find_package_name_from_filename(file.name, file)
if possible_name:
return possible_name
def find_package_name_from_directory(directory):
parsed_url = urlparse(directory)
directory_path = Path(parsed_url.path) if parsed_url.scheme else Path(directory)
# Handle egg fragment for direct dependencies
if "#egg=" in str(directory_path):
expected_name = str(directory_path).split("#egg=")[1]
return expected_name
# Windows path normalization
directory_str = str(directory_path)
if os.name == "nt":
if directory_str.startswith("\\") and (
":\\" in directory_str or ":/" in directory_str
):
directory_path = Path(directory_str[1:])
if directory_str.startswith("\\\\"):
directory_path = Path(directory_str[1:])
try:
# Sort contents - files first, then directories to search parent
# directories before leaf directories.
directory_contents = sorted(
directory_path.iterdir(), key=lambda x: (x.is_dir(), x.name)
)
for path in directory_contents:
if path.is_file():
if path.name.endswith(RELEVANT_PROJECT_FILES):
with path.open("rb") as file:
possible_name = find_package_name_from_filename(path.name, file)
if possible_name:
return possible_name
elif path.is_dir():
possible_name = find_package_name_from_directory(str(path))
if possible_name:
return possible_name
except (FileNotFoundError, PermissionError):
# Handle cases where the directory doesn't exist or isn't accessible
pass
return None
def ensure_path_is_relative(file_path):
abs_path = Path(file_path).resolve()
current_dir = Path.cwd()
# Check if the paths are on different drives
if abs_path.drive != current_dir.drive:
# If on different drives, return the absolute path
return str(abs_path)
try:
# Try to create a relative path
return str(abs_path.relative_to(current_dir))
except ValueError:
# If the direct relative_to fails, manually compute the relative path
common_parts = 0
for part_a, part_b in zip(abs_path.parts, current_dir.parts):
if part_a == part_b:
common_parts += 1
else:
break
# Number of ".." needed are the extra parts in the current directory
# beyond the common parts
up_levels = [".."] * (len(current_dir.parts) - common_parts)
# The relative path is constructed by going up as needed and then
# appending the non-common parts of the absolute path
rel_parts = up_levels + list(abs_path.parts[common_parts:])
relative_path = Path(*rel_parts)
return str(relative_path)
def determine_path_specifier(package: InstallRequirement):
if package.link:
if package.link.scheme in ["http", "https"]:
return package.link.url_without_fragment
if package.link.scheme == "file":
return ensure_path_is_relative(package.link.file_path)
def determine_vcs_specifier(package: InstallRequirement):
if package.link and package.link.scheme in VCS_SCHEMES:
vcs_specifier = package.link.url_without_fragment
return vcs_specifier
def get_vcs_backend(vcs_type):
backend = VcsSupport().get_backend(vcs_type)
return backend
def generate_temp_dir_path():
# Create a temporary directory using mkdtemp
temp_dir = tempfile.mkdtemp()
# Remove the created directory
os.rmdir(temp_dir)
return temp_dir
def determine_vcs_revision_hash(
package: InstallRequirement, vcs_type: str, revision: str
):
try: # Windows python 3.7 will sometimes raise PermissionError cleaning up
checkout_directory = generate_temp_dir_path()
repo_backend = get_vcs_backend(vcs_type)
repo_backend.obtain(checkout_directory, hide_url(package.link.url), verbosity=1)
return repo_backend.get_revision(checkout_directory)
except Exception as e:
err.print(
f"Error {e} obtaining {vcs_type} revision hash for {package}; falling back to {revision}."
)
return revision
@lru_cache(maxsize=None)
def determine_package_name(package: InstallRequirement):
req_name = None
if package.name:
req_name = package.name
elif "#egg=" in str(package):
req_name = str(package).split("#egg=")[1]
req_name = req_name.split("[")[0]
elif " @ " in str(package):
req_name = str(package).split("@ ")[0]
req_name = req_name.split("[")[0]
elif package.link and package.link.scheme in REMOTE_SCHEMES:
try: # Windows python 3.7 will sometimes raise PermissionError cleaning up
with TemporaryDirectory() as td:
cmd = get_pip_command()
options, _ = cmd.parser.parse_args([])
session = cmd._build_session(options)
local_file = unpack_url(
link=package.link,
location=td,
download=Downloader(session, "off", resume_retries=5),
verbosity=1,
)
if local_file.path.endswith(".whl") or local_file.path.endswith(".zip"):
req_name = find_package_name_from_zipfile(local_file.path)
elif local_file.path.endswith(".tar.gz") or local_file.path.endswith(
".tar.bz2"
):
req_name = find_package_name_from_tarball(local_file.path)
else:
req_name = find_package_name_from_directory(local_file.path)
except PermissionError:
pass
elif package.link and package.link.scheme in [
"bzr+file",
"git+file",
"hg+file",
"svn+file",
]:
parsed_url = urlparse(package.link.url)
repository_path = parsed_url.path
repository_path = repository_path.rsplit("@", 1)[
0
] # extract the actual directory path
repository_path = repository_path.split("#egg=")[0]
req_name = find_package_name_from_directory(repository_path)
elif package.link and package.link.scheme == "file":
if package.link.file_path.endswith(".whl") or package.link.file_path.endswith(
".zip"
):
req_name = find_package_name_from_zipfile(package.link.file_path)
elif package.link.file_path.endswith(
".tar.gz"
) or package.link.file_path.endswith(".tar.bz2"):
req_name = find_package_name_from_tarball(package.link.file_path)
else:
req_name = find_package_name_from_directory(package.link.file_path)
if req_name:
return req_name
else:
raise ValueError(f"Could not determine package name from {package}")
def find_package_name_from_filename(filename, file):
# Extract basename to handle both full paths (from archives) and basenames (from directories)
basename = Path(filename).name
if basename == "METADATA":
content = file.read().decode()
possible_name = parse_metadata_file(content)
if possible_name:
return possible_name
if basename == "PKG-INFO":
content = file.read().decode()
possible_name = parse_pkginfo_file(content)
if possible_name:
return possible_name
if basename == "setup.py":
content = file.read().decode()
possible_name = parse_setup_file(content)
if possible_name:
return possible_name
if basename == "setup.cfg":
content = file.read().decode()
possible_name = parse_cfg_file(content)
if possible_name:
return possible_name
if basename == "pyproject.toml":
content = file.read().decode()
possible_name = parse_toml_file(content)
if possible_name:
return possible_name
return None
def create_link(link):
# type: (AnyStr) -> Link
if not isinstance(link, str):
raise TypeError("must provide a string to instantiate a new link")
return Link(link)
def get_link_from_line(line):
"""Parse link information from given requirement line. Return a
6-tuple:
- `vcs_type` indicates the VCS to use (e.g. "git"), or None.
- `prefer` is either "file", "path" or "uri", indicating how the
information should be used in later stages.
- `relpath` is the relative path to use when recording the dependency,
instead of the absolute path/URI used to perform installation.
This can be None (to prefer the absolute path or URI).
- `path` is the absolute file path to the package. This will always use
forward slashes. Can be None if the line is a remote URI.
- `uri` is the absolute URI to the package. Can be None if the line is
not a URI.
- `link` is an instance of :class:`pipenv.patched.pip._internal.index.Link`,
representing a URI parse result based on the value of `uri`.
This function is provided to deal with edge cases concerning URIs
without a valid netloc. Those URIs are problematic to a straight
``urlsplit` call because they cannot be reliably reconstructed with
``urlunsplit`` due to a bug in the standard library:
>>> from urllib.parse import urlsplit, urlunsplit
>>> urlunsplit(urlsplit('git+file:///this/breaks'))
'git+file:/this/breaks'
>>> urlunsplit(urlsplit('file:///this/works'))
'file:///this/works'
See `https://bugs.python.org/issue23505#msg277350`.
"""
# Git allows `git@github.com...` lines that are not really URIs.
# Add "ssh://" so we can parse correctly, and restore afterward.
fixed_line = add_ssh_scheme_to_git_uri(line) # type: str
# We can assume a lot of things if this is a local filesystem path.
if "://" not in fixed_line:
p = Path(fixed_line).absolute() # type: Path
p.as_posix() # type: Optional[str]
uri = p.as_uri() # type: str
link = create_link(uri) # type: Link
return link
# This is an URI. We'll need to perform some elaborated parsing.
parsed_url = urlsplit(fixed_line) # type: SplitResult
# Split the VCS part out if needed.
original_scheme = parsed_url.scheme # type: str
if "+" in original_scheme:
vcs_type, _, scheme = original_scheme.partition("+")
parsed_url = parsed_url._replace(scheme=scheme) # type: ignore
else:
pass
# Re-attach VCS prefix to build a Link.
link = create_link(
urlunsplit(parsed_url._replace(scheme=original_scheme)) # type: ignore
)
return link
def has_name_with_extras(requirement):
pattern = r"^([a-zA-Z0-9_-]+(\[[a-zA-Z0-9_-]+\])?) @ .*"
match = re.match(pattern, requirement)
return match is not None
def expand_env_variables(line) -> AnyStr:
"""Expand the env vars in a line following pip's standard.
https://pip.pypa.io/en/stable/reference/pip_install/#id10.
Matches environment variable-style values in '${MY_VARIABLE_1}' with
the variable name consisting of only uppercase letters, digits or
the '_'
"""
def replace_with_env(match):
value = os.getenv(match.group(1))
return value if value else match.group()
return re.sub(r"\$\{([A-Z0-9_]+)\}", replace_with_env, line)
def expansive_install_req_from_line(
pip_line: str,
comes_from: Optional[Union[str, InstallRequirement]] = None,
*,
use_pep517: Optional[bool] = None,
isolated: bool = False,
global_options: Optional[List[str]] = None,
hash_options: Optional[Dict[str, List[str]]] = None,
constraint: bool = False,
line_source: Optional[str] = None,
user_supplied: bool = False,
config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
expand_env: bool = False,
) -> (InstallRequirement, str):
"""Create an InstallRequirement from a pip-style requirement line.
InstallRequirement is a pip internal construct that represents an installable requirement,
and is used as an intermediary between the pip command and the resolver.
:param pip_line: A pip-style requirement line.
:param comes_from: The path to the requirements file the line was found in.
:param use_pep517: Whether to use PEP 517/518 when installing the
requirement.
:param isolated: Whether to isolate the requirements when installing them. (likely unused)
:param global_options: Extra global options to be used when installing the install req (likely unused)
:param hash_options: Extra hash options to be used when installing the install req (likely unused)
:param constraint: Whether the requirement is a constraint.
:param line_source: The source of the line (e.g. "requirements.txt").
:param user_supplied: Whether the requirement was directly provided by the user.
:param config_settings: Configuration settings to be used when installing the install req (likely unused)
:param expand_env: Whether to expand environment variables in the line. (definitely used)
:return: A tuple of the InstallRequirement and the name of the package (if determined).
"""
name = None
pip_line = pip_line.strip("'").lstrip(" ")
# Handle paths with escaped spaces
if os.path.exists(os.path.expanduser(pip_line.replace("\\ ", " "))):
pip_line = pip_line.replace("\\ ", " ")
for new_req_symbol in ("@ ", " @ "): # Check for new style pip lines
if new_req_symbol in pip_line:
pip_line_parts = pip_line.split(new_req_symbol, 1)
name = pip_line_parts[0]
pip_line = pip_line_parts[1]
if pip_line.startswith("-e "): # Editable requirements
pip_line = pip_line.split("-e ")[1]
return install_req_from_editable(pip_line, line_source), name
if expand_env:
pip_line = expand_env_variables(pip_line)
vcs_part = pip_line
for vcs in VCS_LIST:
if vcs_part.startswith(f"{vcs}+"):
link = get_link_from_line(vcs_part)
install_req = InstallRequirement(
None,
comes_from,
link=link,
use_pep517=use_pep517,
isolated=isolated,
global_options=global_options,
hash_options=hash_options,
constraint=constraint,
user_supplied=user_supplied,
)
return install_req, name
if urlparse(pip_line).scheme in ("http", "https", "file") or any(
pip_line.endswith(s) for s in INSTALLABLE_EXTENSIONS
):
parts = parse_req_from_line(pip_line, line_source)
else:
# It's a requirement
if "--index" in pip_line:
pip_line = pip_line.split("--index")[0]
if " -i " in pip_line:
pip_line = pip_line.split(" -i ")[0]
# handle local version identifiers (like the ones torch uses in their public index)
if "+" in pip_line:
pip_line = pip_line.split("+")[0]
parts = parse_req_from_line(pip_line, line_source)
install_req = InstallRequirement(
parts.requirement,
comes_from,
link=parts.link,
markers=parts.markers,
use_pep517=use_pep517,
isolated=isolated,
global_options=global_options,
hash_options=hash_options,
config_settings=config_settings,
constraint=constraint,
extras=parts.extras,
user_supplied=user_supplied,
)
return install_req, name
def normalize_editable_path_for_pip(path_str):
"""Normalize an editable package path for pip."""
# Windows paths need to be converted to POSIX paths otherwise path
# separators (back slashes) are interpreted as escape characters.
return path_str.replace(os.path.sep, "/")
def file_path_from_pipfile(path_str, pipfile_entry):
"""Creates an installable file path from a pipfile entry.
Handles local and remote paths, files and directories;
supports extras and editable specification.
Outputs a pip installable line.
"""
if path_str.startswith(("http:", "https:", "ftp:")):
req_str = path_str
else:
req_str = ensure_path_is_relative(path_str)
if pipfile_entry.get("extras"):
req_str = f"{req_str}[{','.join(pipfile_entry['extras'])}]"
if pipfile_entry.get("editable", False):
req_str = f"-e {normalize_editable_path_for_pip(req_str)}"
return req_str
def normalize_vcs_url(vcs_url):
"""Return vcs_url and possible vcs_ref from a given vcs_url."""
# We have to handle the fact that some vcs urls have a ref in them
# and some have a netloc with a username and password in them, and some have both
vcs_ref = ""
if "@" in vcs_url:
parsed_url = urlparse(vcs_url)
if "@" in parsed_url.path:
url_parts = vcs_url.rsplit("@", 1)
vcs_url = url_parts[0]
vcs_ref = url_parts[1]
return vcs_url, vcs_ref
|
HackedPythonVersion
|
python
|
pypa__setuptools
|
pkg_resources/__init__.py
|
{
"start": 121840,
"end": 126232
}
|
class ____(Warning):
"""
Base class for warning about deprecations in ``pkg_resources``
This class is not derived from ``DeprecationWarning``, and as such is
visible by default.
"""
# Ported from ``setuptools`` to avoid introducing an import inter-dependency:
_LOCALE_ENCODING = "locale" if sys.version_info >= (3, 10) else None
# This must go before calls to `_call_aside`. See https://github.com/pypa/setuptools/pull/4422
def _read_utf8_with_fallback(file: str, fallback_encoding=_LOCALE_ENCODING) -> str:
"""See setuptools.unicode_utils._read_utf8_with_fallback"""
try:
with open(file, "r", encoding="utf-8") as f:
return f.read()
except UnicodeDecodeError: # pragma: no cover
msg = f"""\
********************************************************************************
`encoding="utf-8"` fails with {file!r}, trying `encoding={fallback_encoding!r}`.
This fallback behaviour is considered **deprecated** and future versions of
`setuptools/pkg_resources` may not implement it.
Please encode {file!r} with "utf-8" to ensure future builds will succeed.
If this file was produced by `setuptools` itself, cleaning up the cached files
and re-building/re-installing the package with a newer version of `setuptools`
(e.g. by updating `build-system.requires` in its `pyproject.toml`)
might solve the problem.
********************************************************************************
"""
# TODO: Add a deadline?
# See comment in setuptools.unicode_utils._Utf8EncodingNeeded
warnings.warn(msg, PkgResourcesDeprecationWarning, stacklevel=2)
with open(file, "r", encoding=fallback_encoding) as f:
return f.read()
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()) -> None:
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
g.update(
(name, getattr(manager, name))
for name in dir(manager)
if not name.startswith('_')
)
@_call_aside
def _initialize_master_working_set() -> None:
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = _declare_state('object', 'working_set', WorkingSet._build_master())
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path with replace=False and
# ensure that all distributions added to the working set in the future
# (e.g. by calling ``require()``) will get activated as well,
# with higher priority (replace=True).
tuple(dist.activate(replace=False) for dist in working_set)
add_activation_listener(
lambda dist: dist.activate(replace=True),
existing=False,
)
working_set.entries = []
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
if TYPE_CHECKING:
# All of these are set by the @_call_aside methods above
__resource_manager = ResourceManager() # Won't exist at runtime
resource_exists = __resource_manager.resource_exists
resource_isdir = __resource_manager.resource_isdir
resource_filename = __resource_manager.resource_filename
resource_stream = __resource_manager.resource_stream
resource_string = __resource_manager.resource_string
resource_listdir = __resource_manager.resource_listdir
set_extraction_path = __resource_manager.set_extraction_path
cleanup_resources = __resource_manager.cleanup_resources
working_set = WorkingSet()
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
run_main = run_script
|
PkgResourcesDeprecationWarning
|
python
|
kamyu104__LeetCode-Solutions
|
Python/furthest-point-from-origin.py
|
{
"start": 36,
"end": 401
}
|
class ____(object):
def furthestDistanceFromOrigin(self, moves):
"""
:type moves: str
:rtype: int
"""
curr = cnt = 0
for x in moves:
if x == 'L':
curr -= 1
elif x == 'R':
curr += 1
else:
cnt += 1
return abs(curr)+cnt
|
Solution
|
python
|
graphql-python__graphene
|
graphene/types/structures.py
|
{
"start": 1734,
"end": 3121
}
|
class ____(Structure):
"""
Non-Null Modifier
A non-null is a kind of type marker, a wrapping type which points to another
type. Non-null types enforce that their values are never null and can ensure
an error is raised if this ever occurs during a request. It is useful for
fields which you can make a strong guarantee on non-nullability, for example
usually the id field of a database row will never be null.
Note: the enforcement of non-nullability occurs within the executor.
NonNull can also be indicated on all Mounted types with the keyword argument ``required``.
.. code:: python
from graphene import NonNull, String
field_name = NonNull(String, description='This field will not be null')
another_field = String(required=True, description='This is equivalent to the above')
"""
def __init__(self, *args, **kwargs):
super(NonNull, self).__init__(*args, **kwargs)
assert not isinstance(
self._of_type, NonNull
), f"Can only create NonNull of a Nullable GraphQLType but got: {self._of_type}."
def __str__(self):
return f"{self.of_type}!"
def __eq__(self, other):
return isinstance(other, NonNull) and (
self.of_type == other.of_type
and self.args == other.args
and self.kwargs == other.kwargs
)
|
NonNull
|
python
|
numba__numba
|
numba/core/typeinfer.py
|
{
"start": 29650,
"end": 30752
}
|
class ____(SetItemRefinement):
def __init__(self, target, index, value, loc):
self.target = target
self.index = index
self.value = value
self.loc = loc
def __call__(self, typeinfer):
with new_error_context("typing of setitem at {loc}", loc=self.loc):
typevars = typeinfer.typevars
if not all(typevars[var.name].defined
for var in (self.target, self.index, self.value)):
return
targetty = typevars[self.target.name].getone()
idxty = typevars[self.index.name].getone()
valty = typevars[self.value.name].getone()
sig = typeinfer.context.resolve_setitem(targetty, idxty, valty)
if sig is None:
raise TypingError("Cannot resolve setitem: %s[%s] = %s" %
(targetty, idxty, valty), loc=self.loc)
self.signature = sig
self._refine_target_type(typeinfer, targetty, idxty, valty, sig)
def get_call_signature(self):
return self.signature
|
SetItemConstraint
|
python
|
ray-project__ray
|
python/ray/dashboard/modules/reporter/gpu_providers.py
|
{
"start": 13522,
"end": 16900
}
|
class ____(GpuProvider):
"""AMD GPU provider using pyamdsmi."""
def __init__(self):
super().__init__()
self._pyamdsmi = None
def get_provider_name(self) -> GpuProviderType:
return GpuProviderType.AMD
def is_available(self) -> bool:
"""Check if AMD GPUs are available."""
try:
import ray._private.thirdparty.pyamdsmi as pyamdsmi
pyamdsmi.smi_initialize()
pyamdsmi.smi_shutdown()
return True
except Exception as e:
logger.debug(f"AMD GPU not available: {e}")
return False
def _initialize(self) -> bool:
"""Initialize the AMD GPU provider."""
if self._initialized:
return True
try:
import ray._private.thirdparty.pyamdsmi as pyamdsmi
self._pyamdsmi = pyamdsmi
self._pyamdsmi.smi_initialize()
self._initialized = True
return True
except Exception as e:
logger.debug(f"Failed to initialize AMD GPU provider: {e}")
return False
def _shutdown(self):
"""Shutdown the AMD GPU provider."""
if self._initialized and self._pyamdsmi:
try:
self._pyamdsmi.smi_shutdown()
except Exception as e:
logger.debug(f"Error shutting down AMD GPU provider: {e}")
finally:
self._initialized = False
def get_gpu_utilization(self) -> List[GpuUtilizationInfo]:
"""Get GPU utilization information for all AMD GPUs."""
if not self._initialized:
if not self._initialize():
return []
gpu_utilizations = []
try:
num_gpus = self._pyamdsmi.smi_get_device_count()
processes = self._pyamdsmi.smi_get_device_compute_process()
for i in range(num_gpus):
utilization = self._pyamdsmi.smi_get_device_utilization(i)
if utilization == -1:
utilization = -1
# Get running processes
processes_pids = {}
for process in self._pyamdsmi.smi_get_compute_process_info_by_device(
i, processes
):
if process.vram_usage:
processes_pids[int(process.process_id)] = ProcessGPUInfo(
pid=int(process.process_id),
gpu_memory_usage=int(process.vram_usage) // MB,
gpu_utilization=None,
)
info = GpuUtilizationInfo(
index=i,
name=self._decode(self._pyamdsmi.smi_get_device_name(i)),
uuid=hex(self._pyamdsmi.smi_get_device_unique_id(i)),
utilization_gpu=utilization,
memory_used=int(self._pyamdsmi.smi_get_device_memory_used(i)) // MB,
memory_total=int(self._pyamdsmi.smi_get_device_memory_total(i))
// MB,
processes_pids=processes_pids,
)
gpu_utilizations.append(info)
except Exception as e:
logger.warning(f"Error getting AMD GPU utilization: {e}")
finally:
self._shutdown()
return gpu_utilizations
|
AmdGpuProvider
|
python
|
allegroai__clearml
|
clearml/storage/helper.py
|
{
"start": 42280,
"end": 58574
}
|
class ____(_Driver):
scheme = "azure"
_containers = {}
_max_connections = deferred_config("azure.storage.max_connections", 0)
class _Container(object):
def __init__(
self,
name: str,
config: AzureContainerConfigurations,
account_url: str,
) -> None:
self.MAX_SINGLE_PUT_SIZE = 4 * 1024 * 1024
self.SOCKET_TIMEOUT = (300, 2000)
self.name = name
self.config = config
self.account_url = account_url
try:
from azure.storage.blob import BlobServiceClient # noqa
self.__legacy = False
except ImportError:
try:
from azure.storage.blob import BlockBlobService # noqa
from azure.common import AzureHttpError # noqa
self.__legacy = True
except ImportError:
raise UsageError(
"Azure blob storage driver not found. "
"Please install driver using: 'pip install clearml[azure]' or "
"pip install '\"azure.storage.blob>=12.0.0\"'"
)
if self.__legacy:
self.__blob_service = BlockBlobService(
account_name=self.config.account_name,
account_key=self.config.account_key,
)
self.__blob_service.MAX_SINGLE_PUT_SIZE = self.MAX_SINGLE_PUT_SIZE
self.__blob_service.socket_timeout = self.SOCKET_TIMEOUT
else:
credential = {
"account_name": self.config.account_name,
"account_key": self.config.account_key,
}
self.__blob_service = BlobServiceClient(
account_url=account_url,
credential=credential,
max_single_put_size=self.MAX_SINGLE_PUT_SIZE,
)
@staticmethod
def _get_max_connections_dict(
max_connections: Optional[Union[int, str]] = None,
key: str = "max_connections",
) -> Dict[str, int]:
# must cast for deferred resolving
try:
max_connections = max_connections or int(_AzureBlobServiceStorageDriver._max_connections)
except (AttributeError, TypeError):
return {}
return {key: int(max_connections)} if max_connections else {}
def create_blob_from_data(
self,
container_name: str,
object_name: str,
blob_name: str,
data: bytes,
max_connections: Optional[int] = None,
progress_callback: Optional[Callable] = None,
content_settings: Optional["ContentSettings"] = None,
) -> None:
if self.__legacy:
self.__blob_service.create_blob_from_bytes(
container_name,
object_name,
data,
progress_callback=progress_callback,
**self._get_max_connections_dict(max_connections),
)
else:
client = self.__blob_service.get_blob_client(container_name, blob_name)
client.upload_blob(
data,
overwrite=True,
content_settings=content_settings,
**self._get_max_connections_dict(max_connections, key="max_concurrency"),
)
def create_blob_from_path(
self,
container_name: str,
blob_name: str,
path: str,
max_connections: Optional[int] = None,
content_settings: Optional["ContentSettings"] = None,
progress_callback: Optional[Callable[[int, int], None]] = None,
) -> None:
if self.__legacy:
self.__blob_service.create_blob_from_path(
container_name,
blob_name,
path,
content_settings=content_settings,
progress_callback=progress_callback,
**self._get_max_connections_dict(max_connections),
)
else:
with open(path, "rb") as f:
self.create_blob_from_data(
container_name,
None,
blob_name,
f,
content_settings=content_settings,
max_connections=max_connections,
)
def delete_blob(self, container_name: str, blob_name: str) -> None:
if self.__legacy:
self.__blob_service.delete_blob(
container_name,
blob_name,
)
else:
client = self.__blob_service.get_blob_client(container_name, blob_name)
client.delete_blob()
def exists(self, container_name: str, blob_name: str) -> bool:
if self.__legacy:
return not self.__blob_service.exists(container_name, blob_name)
else:
client = self.__blob_service.get_blob_client(container_name, blob_name)
return client.exists()
def list_blobs(self, container_name: str, prefix: Optional[str] = None) -> Any:
if self.__legacy:
return self.__blob_service.list_blobs(container_name=container_name, prefix=prefix)
else:
client = self.__blob_service.get_container_client(container_name)
return client.list_blobs(name_starts_with=prefix)
def get_blob_properties(self, container_name: str, blob_name: str) -> Any:
if self.__legacy:
return self.__blob_service.get_blob_properties(container_name, blob_name)
else:
client = self.__blob_service.get_blob_client(container_name, blob_name)
return client.get_blob_properties()
def get_blob_to_bytes(
self,
container_name: str,
blob_name: str,
progress_callback: Optional[Callable[[int, int], None]] = None,
) -> bytes:
if self.__legacy:
return self.__blob_service.get_blob_to_bytes(
container_name,
blob_name,
progress_callback=progress_callback,
)
else:
client = self.__blob_service.get_blob_client(container_name, blob_name)
return client.download_blob().content_as_bytes()
def get_blob_to_path(
self,
container_name: str,
blob_name: str,
path: str,
max_connections: Optional[int] = None,
progress_callback: Optional[Callable] = None,
) -> None:
if self.__legacy:
return self.__blob_service.get_blob_to_path(
container_name,
blob_name,
path,
progress_callback=progress_callback,
**self._get_max_connections_dict(max_connections),
)
else:
client = self.__blob_service.get_blob_client(container_name, blob_name)
with open(path, "wb") as file:
return client.download_blob(
**self._get_max_connections_dict(max_connections, key="max_concurrency")
).download_to_stream(file)
def is_legacy(self) -> bool:
return self.__legacy
@property
def blob_service(self) -> Any:
return self.__blob_service
@attrs
class _Object(object):
container = attrib()
blob_name = attrib()
content_length = attrib()
def get_container(
self,
container_name: Optional[str] = None,
config: Optional[Any] = None,
account_url: Optional[str] = None,
**kwargs: Any,
) -> _Container:
container_name = container_name or config.container_name
if container_name not in self._containers:
self._containers[container_name] = self._Container(
name=container_name, config=config, account_url=account_url
)
return self._containers[container_name]
def upload_object_via_stream(
self,
iterator: Any,
container: Any,
object_name: str,
callback: Any = None,
extra: dict = None,
max_connections: int = None,
**kwargs: Any,
) -> bool:
try:
from azure.common import AzureHttpError # noqa
except ImportError:
from azure.core.exceptions import HttpResponseError # noqa
AzureHttpError = HttpResponseError # noqa
blob_name = self._blob_name_from_object_path(object_name, container.name) # noqa: F841
try:
container.create_blob_from_data(
container.name,
object_name,
blob_name,
iterator.read() if hasattr(iterator, "read") else bytes(iterator),
max_connections=max_connections,
progress_callback=callback,
)
return True
except AzureHttpError as ex:
self.get_logger().error("Failed uploading (Azure error): %s" % ex)
except Exception as ex:
self.get_logger().error("Failed uploading: %s" % ex)
return False
def upload_object(
self,
file_path: str,
container: Any,
object_name: str,
callback: Any = None,
extra: dict = None,
max_connections: int = None,
**kwargs: Any,
) -> bool:
try:
from azure.common import AzureHttpError # noqa
except ImportError:
from azure.core.exceptions import HttpResponseError # noqa
AzureHttpError = HttpResponseError # noqa
blob_name = self._blob_name_from_object_path(object_name, container.name)
try:
from azure.storage.blob import ContentSettings # noqa
container.create_blob_from_path(
container.name,
blob_name,
file_path,
max_connections=max_connections,
content_settings=ContentSettings(content_type=get_file_mimetype(object_name or file_path)),
progress_callback=callback,
)
return True
except AzureHttpError as ex:
self.get_logger().error("Failed uploading (Azure error): %s" % ex)
except Exception as ex:
self.get_logger().error("Failed uploading: %s" % ex)
def list_container_objects(
self,
container: Any,
ex_prefix: Optional[str] = None,
**kwargs: Any,
) -> List[Any]:
return list(container.list_blobs(container_name=container.name, prefix=ex_prefix))
def delete_object(self, object: Any, **kwargs: Any) -> bool:
container = object.container
container.delete_blob(
container.name,
object.blob_name,
)
return not object.container.exists(container.name, object.blob_name)
def get_object(
self,
container_name: str,
object_name: str,
*args: Any,
**kwargs: Any,
) -> _Object:
container = self._containers.get(container_name)
if not container:
raise StorageError("Container `{}` not found for object {}".format(container_name, object_name))
# blob_name = self._blob_name_from_object_path(object_name, container_name)
blob = container.get_blob_properties(container.name, object_name)
if container.is_legacy():
return self._Object(
container=container,
blob_name=blob.name,
content_length=blob.properties.content_length,
)
else:
return self._Object(container=container, blob_name=blob.name, content_length=blob.size)
def download_object_as_stream(self, obj: Any, verbose: bool, *_: Any, **__: Any) -> bytes:
container = obj.container
total_size_mb = obj.content_length / (1024.0 * 1024.0)
remote_path = os.path.join(
"{}://".format(self.scheme),
container.config.account_name,
container.name,
obj.blob_name,
)
cb = DownloadProgressReport(total_size_mb, verbose, remote_path, self.get_logger())
blob = container.get_blob_to_bytes(
container.name,
obj.blob_name,
progress_callback=cb,
)
cb.close()
if container.is_legacy():
return blob.content
else:
return blob
def download_object(
self,
obj: Any,
local_path: str,
overwrite_existing: bool = True,
delete_on_failure: bool = True,
callback: Callable[[int, int], None] = None,
max_connections: Optional[int] = None,
**_: Any,
) -> None:
p = Path(local_path)
if not overwrite_existing and p.is_file():
self.get_logger().warning("Failed saving after download: overwrite=False and file exists (%s)" % str(p))
return
download_done = SafeEvent()
download_done.counter = 0
def callback_func(current: int, total: int) -> None:
if callback:
chunk = current - download_done.counter
download_done.counter += chunk
callback(chunk)
if current >= total:
download_done.set()
container = obj.container
container.blob_service.MAX_SINGLE_GET_SIZE = 5 * 1024 * 1024
_ = container.get_blob_to_path(
container.name,
obj.blob_name,
local_path,
max_connections=max_connections,
progress_callback=callback_func,
)
if container.is_legacy():
download_done.wait()
def test_upload(self, test_path: str, config: Any, **_: Any) -> bool:
container = self.get_container(config=config)
try:
container.blob_service.get_container_properties(container.name)
except Exception:
return False
else:
# Using the account Key, we can always upload...
return True
@classmethod
def _blob_name_from_object_path(cls, name: str, container_name: str) -> Union[Tuple[str, str], str]:
scheme = urlparse(name).scheme
if scheme:
if scheme != cls.scheme:
raise StorageError(
"When using a URL, only the `{}` scheme is supported for Azure storage: {}",
cls.scheme,
name,
)
f = furl(name)
if not f.path.segments:
raise StorageError(
"Missing container name in URL {}",
name,
)
parsed_container_name = f.path.segments[0]
if parsed_container_name != container_name:
raise StorageError(
"Container name mismatch (expected {}, found {}) in {}",
container_name,
parsed_container_name,
name,
)
if len(f.path.segments) == 1:
raise StorageError(
"No path found following container name {} in {}",
container_name,
name,
)
return f.path.segments[0], os.path.join(*f.path.segments[1:])
return name
def get_direct_access(self, remote_path: str, **_: Any) -> None:
return None
def exists_file(self, container_name: str, object_name: str) -> bool:
container = self.get_container(container_name)
return container.exists(container_name, blob_name=object_name)
|
_AzureBlobServiceStorageDriver
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/triggers/bedrock.py
|
{
"start": 3964,
"end": 5423
}
|
class ____(AwsBaseWaiterTrigger):
"""
Trigger when a provisioned throughput job is complete.
:param provisioned_model_id: The ARN or name of the provisioned throughput.
:param waiter_delay: The amount of time in seconds to wait between attempts. (default: 120)
:param waiter_max_attempts: The maximum number of attempts to be made. (default: 75)
:param aws_conn_id: The Airflow connection used for AWS credentials.
"""
def __init__(
self,
*,
provisioned_model_id: str,
waiter_delay: int = 120,
waiter_max_attempts: int = 75,
aws_conn_id: str | None = None,
) -> None:
super().__init__(
serialized_fields={"provisioned_model_id": provisioned_model_id},
waiter_name="provisioned_model_throughput_complete",
waiter_args={"provisionedModelId": provisioned_model_id},
failure_message="Bedrock provisioned throughput job failed.",
status_message="Status of Bedrock provisioned throughput job is",
status_queries=["status"],
return_key="provisioned_model_id",
return_value=provisioned_model_id,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return BedrockHook(aws_conn_id=self.aws_conn_id)
|
BedrockProvisionModelThroughputCompletedTrigger
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/batch/base.py
|
{
"start": 5834,
"end": 6003
}
|
class ____:
concurrency: int
_BatchMode: TypeAlias = Union[
_DynamicBatching, _FixedSizeBatching, _RateLimitedBatching, _ServerSideBatching
]
|
_ServerSideBatching
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/series_methods.py
|
{
"start": 1278,
"end": 1731
}
|
class ____:
params = ["int", "datetime"]
param_names = ["dtype"]
def setup(self, dtype):
N = 10**6
data = {
"int": np.random.randint(1, 10, N),
"datetime": date_range("2000-01-01", freq="s", periods=N),
}
self.s = Series(data[dtype])
if dtype == "datetime":
self.s[np.random.randint(1, N, 100)] = NaT
def time_dropna(self, dtype):
self.s.dropna()
|
Dropna
|
python
|
walkccc__LeetCode
|
solutions/1672. Richest Customer Wealth/1672.py
|
{
"start": 0,
"end": 112
}
|
class ____:
def maximumWealth(self, accounts: list[list[int]]) -> int:
return max(map(sum, accounts))
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/api/endpoints/project_rule_enable.py
|
{
"start": 708,
"end": 2705
}
|
class ____(ProjectEndpoint):
publish_status = {
"PUT": ApiPublishStatus.EXPERIMENTAL,
}
owner = ApiOwner.ISSUES
permission_classes = (ProjectAlertRulePermission,)
def put(self, request: Request, project, rule_id) -> Response:
try:
rule = Rule.objects.get(id=rule_id, project=project)
except Rule.DoesNotExist:
raise ResourceDoesNotExist
if rule.status != ObjectStatus.DISABLED:
return Response(
{
"detail": "Rule is not disabled.",
},
status=status.HTTP_400_BAD_REQUEST,
)
if not rule.data.get("actions", []):
return Response(
{
"detail": "Cannot enable a rule with no action.",
},
status=status.HTTP_400_BAD_REQUEST,
)
duplicate_rule = find_duplicate_rule(project=project, rule_id=rule_id, rule=rule)
if duplicate_rule:
return Response(
{
"detail": f"This rule is an exact duplicate of '{duplicate_rule.label}' in this project and may not be enabled unless it's edited."
},
status=status.HTTP_400_BAD_REQUEST,
)
rule.status = ObjectStatus.ACTIVE
rule.save()
self.create_audit_entry(
request=request,
organization=project.organization,
target_object=rule.id,
event=audit_log.get_event_id("RULE_EDIT"),
data=rule.get_audit_log_data(),
)
try:
analytics.record(
RuleReenableExplicit(
rule_id=rule.id,
user_id=request.user.id,
organization_id=project.organization.id,
)
)
except Exception as e:
sentry_sdk.capture_exception(e)
return Response(status=202)
|
ProjectRuleEnableEndpoint
|
python
|
openai__openai-python
|
tests/api_resources/fine_tuning/test_jobs.py
|
{
"start": 482,
"end": 13404
}
|
class ____:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
def test_method_create(self, client: OpenAI) -> None:
job = client.fine_tuning.jobs.create(
model="gpt-4o-mini",
training_file="file-abc123",
)
assert_matches_type(FineTuningJob, job, path=["response"])
@parametrize
def test_method_create_with_all_params(self, client: OpenAI) -> None:
job = client.fine_tuning.jobs.create(
model="gpt-4o-mini",
training_file="file-abc123",
hyperparameters={
"batch_size": "auto",
"learning_rate_multiplier": "auto",
"n_epochs": "auto",
},
integrations=[
{
"type": "wandb",
"wandb": {
"project": "my-wandb-project",
"entity": "entity",
"name": "name",
"tags": ["custom-tag"],
},
}
],
metadata={"foo": "string"},
method={
"type": "supervised",
"dpo": {
"hyperparameters": {
"batch_size": "auto",
"beta": "auto",
"learning_rate_multiplier": "auto",
"n_epochs": "auto",
}
},
"reinforcement": {
"grader": {
"input": "input",
"name": "name",
"operation": "eq",
"reference": "reference",
"type": "string_check",
},
"hyperparameters": {
"batch_size": "auto",
"compute_multiplier": "auto",
"eval_interval": "auto",
"eval_samples": "auto",
"learning_rate_multiplier": "auto",
"n_epochs": "auto",
"reasoning_effort": "default",
},
},
"supervised": {
"hyperparameters": {
"batch_size": "auto",
"learning_rate_multiplier": "auto",
"n_epochs": "auto",
}
},
},
seed=42,
suffix="x",
validation_file="file-abc123",
)
assert_matches_type(FineTuningJob, job, path=["response"])
@parametrize
def test_raw_response_create(self, client: OpenAI) -> None:
response = client.fine_tuning.jobs.with_raw_response.create(
model="gpt-4o-mini",
training_file="file-abc123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = response.parse()
assert_matches_type(FineTuningJob, job, path=["response"])
@parametrize
def test_streaming_response_create(self, client: OpenAI) -> None:
with client.fine_tuning.jobs.with_streaming_response.create(
model="gpt-4o-mini",
training_file="file-abc123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = response.parse()
assert_matches_type(FineTuningJob, job, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_retrieve(self, client: OpenAI) -> None:
job = client.fine_tuning.jobs.retrieve(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert_matches_type(FineTuningJob, job, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: OpenAI) -> None:
response = client.fine_tuning.jobs.with_raw_response.retrieve(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = response.parse()
assert_matches_type(FineTuningJob, job, path=["response"])
@parametrize
def test_streaming_response_retrieve(self, client: OpenAI) -> None:
with client.fine_tuning.jobs.with_streaming_response.retrieve(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = response.parse()
assert_matches_type(FineTuningJob, job, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_retrieve(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
client.fine_tuning.jobs.with_raw_response.retrieve(
"",
)
@parametrize
def test_method_list(self, client: OpenAI) -> None:
job = client.fine_tuning.jobs.list()
assert_matches_type(SyncCursorPage[FineTuningJob], job, path=["response"])
@parametrize
def test_method_list_with_all_params(self, client: OpenAI) -> None:
job = client.fine_tuning.jobs.list(
after="string",
limit=0,
metadata={"foo": "string"},
)
assert_matches_type(SyncCursorPage[FineTuningJob], job, path=["response"])
@parametrize
def test_raw_response_list(self, client: OpenAI) -> None:
response = client.fine_tuning.jobs.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = response.parse()
assert_matches_type(SyncCursorPage[FineTuningJob], job, path=["response"])
@parametrize
def test_streaming_response_list(self, client: OpenAI) -> None:
with client.fine_tuning.jobs.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = response.parse()
assert_matches_type(SyncCursorPage[FineTuningJob], job, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_cancel(self, client: OpenAI) -> None:
job = client.fine_tuning.jobs.cancel(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert_matches_type(FineTuningJob, job, path=["response"])
@parametrize
def test_raw_response_cancel(self, client: OpenAI) -> None:
response = client.fine_tuning.jobs.with_raw_response.cancel(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = response.parse()
assert_matches_type(FineTuningJob, job, path=["response"])
@parametrize
def test_streaming_response_cancel(self, client: OpenAI) -> None:
with client.fine_tuning.jobs.with_streaming_response.cancel(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = response.parse()
assert_matches_type(FineTuningJob, job, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_cancel(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
client.fine_tuning.jobs.with_raw_response.cancel(
"",
)
@parametrize
def test_method_list_events(self, client: OpenAI) -> None:
job = client.fine_tuning.jobs.list_events(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert_matches_type(SyncCursorPage[FineTuningJobEvent], job, path=["response"])
@parametrize
def test_method_list_events_with_all_params(self, client: OpenAI) -> None:
job = client.fine_tuning.jobs.list_events(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
after="string",
limit=0,
)
assert_matches_type(SyncCursorPage[FineTuningJobEvent], job, path=["response"])
@parametrize
def test_raw_response_list_events(self, client: OpenAI) -> None:
response = client.fine_tuning.jobs.with_raw_response.list_events(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = response.parse()
assert_matches_type(SyncCursorPage[FineTuningJobEvent], job, path=["response"])
@parametrize
def test_streaming_response_list_events(self, client: OpenAI) -> None:
with client.fine_tuning.jobs.with_streaming_response.list_events(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = response.parse()
assert_matches_type(SyncCursorPage[FineTuningJobEvent], job, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_list_events(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
client.fine_tuning.jobs.with_raw_response.list_events(
"",
)
@parametrize
def test_method_pause(self, client: OpenAI) -> None:
job = client.fine_tuning.jobs.pause(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert_matches_type(FineTuningJob, job, path=["response"])
@parametrize
def test_raw_response_pause(self, client: OpenAI) -> None:
response = client.fine_tuning.jobs.with_raw_response.pause(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = response.parse()
assert_matches_type(FineTuningJob, job, path=["response"])
@parametrize
def test_streaming_response_pause(self, client: OpenAI) -> None:
with client.fine_tuning.jobs.with_streaming_response.pause(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = response.parse()
assert_matches_type(FineTuningJob, job, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_pause(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
client.fine_tuning.jobs.with_raw_response.pause(
"",
)
@parametrize
def test_method_resume(self, client: OpenAI) -> None:
job = client.fine_tuning.jobs.resume(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert_matches_type(FineTuningJob, job, path=["response"])
@parametrize
def test_raw_response_resume(self, client: OpenAI) -> None:
response = client.fine_tuning.jobs.with_raw_response.resume(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = response.parse()
assert_matches_type(FineTuningJob, job, path=["response"])
@parametrize
def test_streaming_response_resume(self, client: OpenAI) -> None:
with client.fine_tuning.jobs.with_streaming_response.resume(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = response.parse()
assert_matches_type(FineTuningJob, job, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_resume(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
client.fine_tuning.jobs.with_raw_response.resume(
"",
)
|
TestJobs
|
python
|
docker__docker-py
|
docker/api/swarm.py
|
{
"start": 198,
"end": 18089
}
|
class ____:
def create_swarm_spec(self, *args, **kwargs):
"""
Create a :py:class:`docker.types.SwarmSpec` instance that can be used
as the ``swarm_spec`` argument in
:py:meth:`~docker.api.swarm.SwarmApiMixin.init_swarm`.
Args:
task_history_retention_limit (int): Maximum number of tasks
history stored.
snapshot_interval (int): Number of logs entries between snapshot.
keep_old_snapshots (int): Number of snapshots to keep beyond the
current snapshot.
log_entries_for_slow_followers (int): Number of log entries to
keep around to sync up slow followers after a snapshot is
created.
heartbeat_tick (int): Amount of ticks (in seconds) between each
heartbeat.
election_tick (int): Amount of ticks (in seconds) needed without a
leader to trigger a new election.
dispatcher_heartbeat_period (int): The delay for an agent to send
a heartbeat to the dispatcher.
node_cert_expiry (int): Automatic expiry for nodes certificates.
external_cas (:py:class:`list`): Configuration for forwarding
signing requests to an external certificate authority. Use
a list of :py:class:`docker.types.SwarmExternalCA`.
name (string): Swarm's name
labels (dict): User-defined key/value metadata.
signing_ca_cert (str): The desired signing CA certificate for all
swarm node TLS leaf certificates, in PEM format.
signing_ca_key (str): The desired signing CA key for all swarm
node TLS leaf certificates, in PEM format.
ca_force_rotate (int): An integer whose purpose is to force swarm
to generate a new signing CA certificate and key, if none have
been specified.
autolock_managers (boolean): If set, generate a key and use it to
lock data stored on the managers.
log_driver (DriverConfig): The default log driver to use for tasks
created in the orchestrator.
Returns:
:py:class:`docker.types.SwarmSpec`
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> spec = client.api.create_swarm_spec(
snapshot_interval=5000, log_entries_for_slow_followers=1200
)
>>> client.api.init_swarm(
advertise_addr='eth0', listen_addr='0.0.0.0:5000',
force_new_cluster=False, swarm_spec=spec
)
"""
ext_ca = kwargs.pop('external_ca', None)
if ext_ca:
kwargs['external_cas'] = [ext_ca]
return types.SwarmSpec(self._version, *args, **kwargs)
@utils.minimum_version('1.24')
def get_unlock_key(self):
"""
Get the unlock key for this Swarm manager.
Returns:
A ``dict`` containing an ``UnlockKey`` member
"""
return self._result(self._get(self._url('/swarm/unlockkey')), True)
@utils.minimum_version('1.24')
def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
force_new_cluster=False, swarm_spec=None,
default_addr_pool=None, subnet_size=None,
data_path_addr=None, data_path_port=None):
"""
Initialize a new Swarm using the current connected engine as the first
node.
Args:
advertise_addr (string): Externally reachable address advertised
to other nodes. This can either be an address/port combination
in the form ``192.168.1.1:4567``, or an interface followed by a
port number, like ``eth0:4567``. If the port number is omitted,
the port number from the listen address is used. If
``advertise_addr`` is not specified, it will be automatically
detected when possible. Default: None
listen_addr (string): Listen address used for inter-manager
communication, as well as determining the networking interface
used for the VXLAN Tunnel Endpoint (VTEP). This can either be
an address/port combination in the form ``192.168.1.1:4567``,
or an interface followed by a port number, like ``eth0:4567``.
If the port number is omitted, the default swarm listening port
is used. Default: '0.0.0.0:2377'
force_new_cluster (bool): Force creating a new Swarm, even if
already part of one. Default: False
swarm_spec (dict): Configuration settings of the new Swarm. Use
``APIClient.create_swarm_spec`` to generate a valid
configuration. Default: None
default_addr_pool (list of strings): Default Address Pool specifies
default subnet pools for global scope networks. Each pool
should be specified as a CIDR block, like '10.0.0.0/8'.
Default: None
subnet_size (int): SubnetSize specifies the subnet size of the
networks created from the default subnet pool. Default: None
data_path_addr (string): Address or interface to use for data path
traffic. For example, 192.168.1.1, or an interface, like eth0.
data_path_port (int): Port number to use for data path traffic.
Acceptable port range is 1024 to 49151. If set to ``None`` or
0, the default port 4789 will be used. Default: None
Returns:
(str): The ID of the created node.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/swarm/init')
if swarm_spec is not None and not isinstance(swarm_spec, dict):
raise TypeError('swarm_spec must be a dictionary')
if default_addr_pool is not None:
if utils.version_lt(self._version, '1.39'):
raise errors.InvalidVersion(
'Address pool is only available for API version >= 1.39'
)
# subnet_size becomes 0 if not set with default_addr_pool
if subnet_size is None:
subnet_size = DEFAULT_SWARM_SUBNET_SIZE
if subnet_size is not None:
if utils.version_lt(self._version, '1.39'):
raise errors.InvalidVersion(
'Subnet size is only available for API version >= 1.39'
)
# subnet_size is ignored if set without default_addr_pool
if default_addr_pool is None:
default_addr_pool = DEFAULT_SWARM_ADDR_POOL
data = {
'AdvertiseAddr': advertise_addr,
'ListenAddr': listen_addr,
'DefaultAddrPool': default_addr_pool,
'SubnetSize': subnet_size,
'ForceNewCluster': force_new_cluster,
'Spec': swarm_spec,
}
if data_path_addr is not None:
if utils.version_lt(self._version, '1.30'):
raise errors.InvalidVersion(
'Data address path is only available for '
'API version >= 1.30'
)
data['DataPathAddr'] = data_path_addr
if data_path_port is not None:
if utils.version_lt(self._version, '1.40'):
raise errors.InvalidVersion(
'Data path port is only available for '
'API version >= 1.40'
)
data['DataPathPort'] = data_path_port
response = self._post_json(url, data=data)
return self._result(response, json=True)
@utils.minimum_version('1.24')
def inspect_swarm(self):
"""
Retrieve low-level information about the current swarm.
Returns:
A dictionary containing data about the swarm.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/swarm')
return self._result(self._get(url), True)
@utils.check_resource('node_id')
@utils.minimum_version('1.24')
def inspect_node(self, node_id):
"""
Retrieve low-level information about a swarm node
Args:
node_id (string): ID of the node to be inspected.
Returns:
A dictionary containing data about this node.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/nodes/{0}', node_id)
return self._result(self._get(url), True)
@utils.minimum_version('1.24')
def join_swarm(self, remote_addrs, join_token, listen_addr='0.0.0.0:2377',
advertise_addr=None, data_path_addr=None):
"""
Make this Engine join a swarm that has already been created.
Args:
remote_addrs (:py:class:`list`): Addresses of one or more manager
nodes already participating in the Swarm to join.
join_token (string): Secret token for joining this Swarm.
listen_addr (string): Listen address used for inter-manager
communication if the node gets promoted to manager, as well as
determining the networking interface used for the VXLAN Tunnel
Endpoint (VTEP). Default: ``'0.0.0.0:2377``
advertise_addr (string): Externally reachable address advertised
to other nodes. This can either be an address/port combination
in the form ``192.168.1.1:4567``, or an interface followed by a
port number, like ``eth0:4567``. If the port number is omitted,
the port number from the listen address is used. If
AdvertiseAddr is not specified, it will be automatically
detected when possible. Default: ``None``
data_path_addr (string): Address or interface to use for data path
traffic. For example, 192.168.1.1, or an interface, like eth0.
Returns:
``True`` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
data = {
'RemoteAddrs': remote_addrs,
'ListenAddr': listen_addr,
'JoinToken': join_token,
'AdvertiseAddr': advertise_addr,
}
if data_path_addr is not None:
if utils.version_lt(self._version, '1.30'):
raise errors.InvalidVersion(
'Data address path is only available for '
'API version >= 1.30'
)
data['DataPathAddr'] = data_path_addr
url = self._url('/swarm/join')
response = self._post_json(url, data=data)
self._raise_for_status(response)
return True
@utils.minimum_version('1.24')
def leave_swarm(self, force=False):
"""
Leave a swarm.
Args:
force (bool): Leave the swarm even if this node is a manager.
Default: ``False``
Returns:
``True`` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/swarm/leave')
response = self._post(url, params={'force': force})
# Ignore "this node is not part of a swarm" error
if force and response.status_code == http_client.NOT_ACCEPTABLE:
return True
# FIXME: Temporary workaround for 1.13.0-rc bug
# https://github.com/docker/docker/issues/29192
if force and response.status_code == http_client.SERVICE_UNAVAILABLE:
return True
self._raise_for_status(response)
return True
@utils.minimum_version('1.24')
def nodes(self, filters=None):
"""
List swarm nodes.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id``, ``name``, ``membership`` and ``role``.
Default: ``None``
Returns:
A list of dictionaries containing data about each swarm node.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/nodes')
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
return self._result(self._get(url, params=params), True)
@utils.check_resource('node_id')
@utils.minimum_version('1.24')
def remove_node(self, node_id, force=False):
"""
Remove a node from the swarm.
Args:
node_id (string): ID of the node to be removed.
force (bool): Force remove an active node. Default: `False`
Raises:
:py:class:`docker.errors.NotFound`
If the node referenced doesn't exist in the swarm.
:py:class:`docker.errors.APIError`
If the server returns an error.
Returns:
`True` if the request was successful.
"""
url = self._url('/nodes/{0}', node_id)
params = {
'force': force
}
res = self._delete(url, params=params)
self._raise_for_status(res)
return True
@utils.minimum_version('1.24')
def unlock_swarm(self, key):
"""
Unlock a locked swarm.
Args:
key (string): The unlock key as provided by
:py:meth:`get_unlock_key`
Raises:
:py:class:`docker.errors.InvalidArgument`
If the key argument is in an incompatible format
:py:class:`docker.errors.APIError`
If the server returns an error.
Returns:
`True` if the request was successful.
Example:
>>> key = client.api.get_unlock_key()
>>> client.unlock_swarm(key)
"""
if isinstance(key, dict):
if 'UnlockKey' not in key:
raise errors.InvalidArgument('Invalid unlock key format')
else:
key = {'UnlockKey': key}
url = self._url('/swarm/unlock')
res = self._post_json(url, data=key)
self._raise_for_status(res)
return True
@utils.minimum_version('1.24')
def update_node(self, node_id, version, node_spec=None):
"""
Update the node's configuration
Args:
node_id (string): ID of the node to be updated.
version (int): The version number of the node object being
updated. This is required to avoid conflicting writes.
node_spec (dict): Configuration settings to update. Any values
not provided will be removed. Default: ``None``
Returns:
`True` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> node_spec = {'Availability': 'active',
'Name': 'node-name',
'Role': 'manager',
'Labels': {'foo': 'bar'}
}
>>> client.api.update_node(node_id='24ifsmvkjbyhk', version=8,
node_spec=node_spec)
"""
url = self._url('/nodes/{0}/update?version={1}', node_id, str(version))
res = self._post_json(url, data=node_spec)
self._raise_for_status(res)
return True
@utils.minimum_version('1.24')
def update_swarm(self, version, swarm_spec=None,
rotate_worker_token=False,
rotate_manager_token=False,
rotate_manager_unlock_key=False):
"""
Update the Swarm's configuration
Args:
version (int): The version number of the swarm object being
updated. This is required to avoid conflicting writes.
swarm_spec (dict): Configuration settings to update. Use
:py:meth:`~docker.api.swarm.SwarmApiMixin.create_swarm_spec` to
generate a valid configuration. Default: ``None``.
rotate_worker_token (bool): Rotate the worker join token. Default:
``False``.
rotate_manager_token (bool): Rotate the manager join token.
Default: ``False``.
rotate_manager_unlock_key (bool): Rotate the manager unlock key.
Default: ``False``.
Returns:
``True`` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/swarm/update')
params = {
'rotateWorkerToken': rotate_worker_token,
'rotateManagerToken': rotate_manager_token,
'version': version
}
if rotate_manager_unlock_key:
if utils.version_lt(self._version, '1.25'):
raise errors.InvalidVersion(
'Rotate manager unlock key '
'is only available for API version >= 1.25'
)
params['rotateManagerUnlockKey'] = rotate_manager_unlock_key
response = self._post_json(url, data=swarm_spec, params=params)
self._raise_for_status(response)
return True
|
SwarmApiMixin
|
python
|
zarr-developers__zarr-python
|
src/zarr/core/dtype/npy/bytes.py
|
{
"start": 3218,
"end": 3992
}
|
class ____(DTypeConfig_V2[Literal["|O"], Literal["vlen-bytes"]]):
"""
A wrapper around the JSON representation of the ``VariableLengthBytes`` data type in Zarr V2.
The ``name`` field of this class contains the value that would appear under the
``dtype`` field in Zarr V2 array metadata. The ``object_codec_id`` field is always ``"vlen-bytes"``
References
----------
The structure of the ``name`` field is defined in the Zarr V2
[specification document](https://github.com/zarr-developers/zarr-specs/blob/main/docs/v2/v2.0.rst#data-type-encoding).
Examples
--------
```python
{
"name": "|O",
"object_codec_id": "vlen-bytes"
}
```
"""
@dataclass(frozen=True, kw_only=True)
|
VariableLengthBytesJSON_V2
|
python
|
huggingface__transformers
|
src/transformers/models/align/modeling_align.py
|
{
"start": 22832,
"end": 25311
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.attention_dropout = config.attention_probs_dropout_prob
self.scaling = self.attention_head_size**-0.5
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
**kwargs,
) -> tuple[torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.attention_head_size)
query_states = self.query(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.key(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.value(hidden_states).view(hidden_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->AlignText
|
AlignTextSelfAttention
|
python
|
google__jax
|
docs/autodidax.py
|
{
"start": 16910,
"end": 22473
}
|
class ____(Trace):
pure = lift = lambda self, val: JVPTracer(self, val, zeros_like(val))
def process_primitive(self, primitive, tracers, params):
primals_in, tangents_in = unzip2((t.primal, t.tangent) for t in tracers)
jvp_rule = jvp_rules[primitive]
primal_outs, tangent_outs = jvp_rule(primals_in, tangents_in, **params)
return [JVPTracer(self, x, t) for x, t in zip(primal_outs, tangent_outs)]
jvp_rules = {}
# -
# Notice both `pure` and `lift` package a value into a `JVPTracer` with the
# minimal amount of context, which is a zero tangent value.
#
# Let's add some JVP rules for primitives:
# +
def add_jvp(primals, tangents):
(x, y), (x_dot, y_dot) = primals, tangents
return [x + y], [x_dot + y_dot]
jvp_rules[add_p] = add_jvp
def mul_jvp(primals, tangents):
(x, y), (x_dot, y_dot) = primals, tangents
return [x * y], [x_dot * y + x * y_dot]
jvp_rules[mul_p] = mul_jvp
def sin_jvp(primals, tangents):
(x,), (x_dot,) = primals, tangents
return [sin(x)], [cos(x) * x_dot]
jvp_rules[sin_p] = sin_jvp
def cos_jvp(primals, tangents):
(x,), (x_dot,) = primals, tangents
return [cos(x)], [-sin(x) * x_dot]
jvp_rules[cos_p] = cos_jvp
def neg_jvp(primals, tangents):
(x,), (x_dot,) = primals, tangents
return [neg(x)], [neg(x_dot)]
jvp_rules[neg_p] = neg_jvp
def reduce_sum_jvp(primals, tangents, *, axis):
(x,), (x_dot,) = primals, tangents
return [reduce_sum(x, axis)], [reduce_sum(x_dot, axis)]
jvp_rules[reduce_sum_p] = reduce_sum_jvp
def greater_jvp(primals, tangents):
(x, y), _ = primals, tangents
out_primal = greater(x, y)
return [out_primal], [zeros_like(out_primal)]
jvp_rules[greater_p] = greater_jvp
def less_jvp(primals, tangents):
(x, y), _ = primals, tangents
out_primal = less(x, y)
return [out_primal], [zeros_like(out_primal)]
jvp_rules[less_p] = less_jvp
# -
# Finally, we add a transformation API to kick off the trace:
def jvp_v1(f, primals, tangents):
with new_main(JVPTrace) as main:
trace = JVPTrace(main)
tracers_in = [JVPTracer(trace, x, t) for x, t in zip(primals, tangents)]
out = f(*tracers_in)
tracer_out = full_raise(trace, out)
primal_out, tangent_out = tracer_out.primal, tracer_out.tangent
return primal_out, tangent_out
# And with that, we can differentiate!
x = 3.0
y, sin_deriv_at_3 = jvp_v1(sin, (x,), (1.0,))
print(sin_deriv_at_3)
print(cos(3.0))
# +
def f(x):
y = sin(x) * 2.
z = - y + x
return z
x, xdot = 3., 1.
y, ydot = jvp_v1(f, (x,), (xdot,))
print(y)
print(ydot)
# +
def deriv(f):
return lambda x: jvp_v1(f, (x,), (1.,))[1]
print(deriv(sin)(3.))
print(deriv(deriv(sin))(3.))
print(deriv(deriv(deriv(sin)))(3.))
print(deriv(deriv(deriv(deriv(sin))))(3.))
# +
def f(x):
if x > 0.: # Python control flow
return 2. * x
else:
return x
print(deriv(f)(3.))
print(deriv(f)(-3.))
# -
# ## Pytrees and flattening user functions' inputs and outputs
# A limitation with `jvp_v1` is that it assumes the user function accepts arrays
# as positional arguments and produces a single array as output. What if it
# produced a list as output? Or accepted nested containers as inputs? It would
# be a pain to deal with all the possible containers in inputs and outputs at
# every layer of the stack. Instead, we can wrap the user function so that the
# wrapped version accepts arrays as inputs and returns a flat list of arrays as
# output. The wrapper just needs to unflatten its input, call the user function,
# and flatten the output.
#
# Here's how we'd like to write `jvp`, assuming the user always gives us
# functions that take arrays as inputs and produces a flat list of arrays as
# outputs:
def jvp_flat(f, primals, tangents):
with new_main(JVPTrace) as main:
trace = JVPTrace(main)
tracers_in = [JVPTracer(trace, x, t) for x, t in zip(primals, tangents)]
outs = f(*tracers_in)
tracers_out = [full_raise(trace, out) for out in outs]
primals_out, tangents_out = unzip2((t.primal, t.tangent) for t in tracers_out)
return primals_out, tangents_out
# To support user functions that have arbitrary containers in the inputs and
# outputs, here's how we'd write the user-facing `jvp` wrapper:
def jvp(f, primals, tangents):
primals_flat, in_tree = tree_flatten(primals)
tangents_flat, in_tree2 = tree_flatten(tangents)
if in_tree != in_tree2: raise TypeError
f, out_tree = flatten_fun(f, in_tree)
primals_out_flat, tangents_out_flat = jvp_flat(f, primals_flat, tangents_flat)
primals_out = tree_unflatten(out_tree(), primals_out_flat)
tangents_out = tree_unflatten(out_tree(), tangents_out_flat)
return primals_out, tangents_out
# Notice that we had to plumb the tree structure of the user function output
# back to the caller of `flatten_fun`. That information isn't available until we
# actually run the user function, so `flatten_fun` just returns a reference to a
# mutable cell, represented as a thunk. These side-effects are safe because we
# always run the user function exactly once. (This safe regime is the reason for
# the "linear" name in `linear_util.py`, in the sense of [linear
# types](https://en.wikipedia.org/wiki/Substructural_type_system).)
#
# All that remains is to write `tree_flatten`, `tree_unflatten`, and
# `flatten_fun`.
# + tags=["hide-input"]
def flatten_fun(f, in_tree):
store = Store()
def flat_fun(*args_flat):
pytree_args = tree_unflatten(in_tree, args_flat)
out = f(*pytree_args)
out_flat, out_tree = tree_flatten(out)
store.set_value(out_tree)
return out_flat
return flat_fun, store
|
JVPTrace
|
python
|
prompt-toolkit__python-prompt-toolkit
|
src/prompt_toolkit/win32_types.py
|
{
"start": 5151,
"end": 5551
}
|
class ____(Structure):
"""
http://msdn.microsoft.com/en-us/library/windows/desktop/aa379560(v=vs.85).aspx
"""
if TYPE_CHECKING:
nLength: int
lpSecurityDescriptor: int
bInheritHandle: int # BOOL comes back as 'int'.
_fields_ = [
("nLength", DWORD),
("lpSecurityDescriptor", LPVOID),
("bInheritHandle", BOOL),
]
|
SECURITY_ATTRIBUTES
|
python
|
mlflow__mlflow
|
mlflow/telemetry/events.py
|
{
"start": 3662,
"end": 3927
}
|
class ____(Event):
name: str = "create_registered_model"
@classmethod
def parse(cls, arguments: dict[str, Any]) -> dict[str, Any] | None:
tags = arguments.get("tags") or {}
return {"is_prompt": _is_prompt(tags)}
|
CreateRegisteredModelEvent
|
python
|
walkccc__LeetCode
|
solutions/828. Count Unique Characters of All Substrings of a Given String/828-2.py
|
{
"start": 0,
"end": 628
}
|
class ____:
def uniqueLetterString(self, s: str) -> int:
ans = 0
# lastSeen[c] := the index of the last time ('a' + i) appeared
lastSeen = collections.defaultdict(lambda: -1)
# prevSeen[c] := the previous index of the last time ('a' + i) appeared
prevLastSeen = collections.defaultdict(lambda: -1)
for i, c in enumerate(s):
if c in lastSeen:
ans += (i - lastSeen[c]) * (lastSeen[c] - prevLastSeen[c])
prevLastSeen[c] = lastSeen[c]
lastSeen[c] = i
for c in string.ascii_uppercase:
ans += (len(s) - lastSeen[c]) * (lastSeen[c] - prevLastSeen[c])
return ans
|
Solution
|
python
|
ray-project__ray
|
python/ray/autoscaler/_private/gcp/node_provider.py
|
{
"start": 1818,
"end": 12623
}
|
class ____(NodeProvider):
def __init__(self, provider_config: dict, cluster_name: str):
NodeProvider.__init__(self, provider_config, cluster_name)
self.lock = RLock()
self._construct_clients()
self.cache_stopped_nodes = provider_config.get("cache_stopped_nodes", False)
# Cache of node objects from the last nodes() call. This avoids
# excessive DescribeInstances requests.
self.cached_nodes: Dict[str, GCPNode] = {}
def _construct_clients(self):
_, _, compute, tpu = construct_clients_from_provider_config(
self.provider_config
)
# Dict of different resources provided by GCP.
# At this moment - Compute and TPUs
self.resources: Dict[GCPNodeType, GCPResource] = {}
# Compute is always required
self.resources[GCPNodeType.COMPUTE] = GCPCompute(
compute,
self.provider_config["project_id"],
self.provider_config["availability_zone"],
self.cluster_name,
)
# if there are no TPU nodes defined in config, tpu will be None.
if tpu is not None:
self.resources[GCPNodeType.TPU] = GCPTPU(
tpu,
self.provider_config["project_id"],
self.provider_config["availability_zone"],
self.cluster_name,
)
def _get_resource_depending_on_node_name(self, node_name: str) -> GCPResource:
"""Return the resource responsible for the node, based on node_name.
This expects the name to be in format '[NAME]-[UUID]-[TYPE]',
where [TYPE] is either 'compute' or 'tpu' (see ``GCPNodeType``).
"""
return self.resources[GCPNodeType.name_to_type(node_name)]
@_retry
def non_terminated_nodes(self, tag_filters: dict):
with self.lock:
instances = []
for resource in self.resources.values():
node_instances = resource.list_instances(tag_filters)
instances += node_instances
# Note: All the operations use "name" as the unique instance id
self.cached_nodes = {i["name"]: i for i in instances}
return [i["name"] for i in instances]
def is_running(self, node_id: str):
with self.lock:
node = self._get_cached_node(node_id)
return node.is_running()
def is_terminated(self, node_id: str):
with self.lock:
node = self._get_cached_node(node_id)
return node.is_terminated()
def node_tags(self, node_id: str):
with self.lock:
node = self._get_cached_node(node_id)
return node.get_labels()
@_retry
def set_node_tags(self, node_id: str, tags: dict):
with self.lock:
labels = tags
node = self._get_node(node_id)
resource = self._get_resource_depending_on_node_name(node_id)
result = resource.set_labels(node=node, labels=labels)
return result
def external_ip(self, node_id: str):
with self.lock:
node = self._get_cached_node(node_id)
ip = node.get_external_ip()
if ip is None:
node = self._get_node(node_id)
ip = node.get_external_ip()
return ip
def internal_ip(self, node_id: str):
with self.lock:
node = self._get_cached_node(node_id)
ip = node.get_internal_ip()
if ip is None:
node = self._get_node(node_id)
ip = node.get_internal_ip()
return ip
@_retry
def create_node(self, base_config: dict, tags: dict, count: int) -> Dict[str, dict]:
"""Creates instances.
Returns dict mapping instance id to each create operation result for the created
instances.
"""
with self.lock:
labels = tags # gcp uses "labels" instead of aws "tags"
node_type = get_node_type(base_config)
resource = self.resources[node_type]
all_nodes = {}
if self.cache_stopped_nodes:
filters = {
"ray-node-name": labels["ray-node-name"],
"ray-node-type": labels["ray-node-type"],
"ray-user-node-type": labels["ray-user-node-type"],
}
reuse_nodes = resource.list_instances(filters, True)[:count]
if reuse_nodes:
reused_nodes_dict = {
n["name"]: resource.start_instance(n["name"])
for n in reuse_nodes
}
all_nodes.update(reused_nodes_dict)
count -= len(reuse_nodes)
if count > 0:
results: List[Tuple[dict, str]] = resource.create_instances(
base_config, labels, count
)
created_nodes_dict = {
instance_id: result for result, instance_id in results
}
all_nodes.update(created_nodes_dict)
return all_nodes
def _thread_unsafe_terminate_node(self, node_id: str):
# Assumes the global lock is held for the duration of this operation.
# The lock may be held by a different thread if in `terminate_nodes()` case.
logger.info("NodeProvider: {}: Terminating node".format(node_id))
resource = self._get_resource_depending_on_node_name(node_id)
try:
result = resource.delete_instance(
node_id=node_id,
)
except googleapiclient.errors.HttpError as http_error:
if http_error.resp.status == 404:
logger.warning(
f"Tried to delete the node with id {node_id} "
"but it was already gone."
)
result = None
else:
raise http_error from None
return result
@_retry
def terminate_node(self, node_id: str):
with self.lock:
resource = self._get_resource_depending_on_node_name(node_id)
try:
if self.cache_stopped_nodes:
node = self._get_cached_node(node_id)
if node.is_running():
result = resource.stop_instance(node_id=node_id)
else:
result = None
else:
result = resource.delete_instance(
node_id=node_id,
)
except googleapiclient.errors.HttpError as http_error:
if http_error.resp.status == 404:
logger.warning(
f"Tried to delete the node with id {node_id} "
"but it was already gone."
)
else:
raise http_error from None
return result
@_retry
def _get_node(self, node_id: str) -> GCPNode:
self.non_terminated_nodes({}) # Side effect: updates cache
with self.lock:
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
resource = self._get_resource_depending_on_node_name(node_id)
instance = resource.get_instance(node_id=node_id)
return instance
def _get_cached_node(self, node_id: str) -> GCPNode:
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
return self._get_node(node_id)
@staticmethod
def bootstrap_config(cluster_config):
return bootstrap_gcp(cluster_config)
@staticmethod
def fillout_available_node_types_resources(
cluster_config: Dict[str, Any]
) -> Dict[str, Any]:
"""Fill out TPU resources to the cluster config.
To enable TPU pod autoscaling, we provide the TPU accelerator
type as a resource that only exists on worker 0 of the pod slice.
For instance, a v4-16 should have the resource labels:
worker 0: resources = {"TPU": 4, "TPU-v4-16-head": 1}
worker 1: resources = {"TPU": 4}
For the autoscaler to correctly process the demands of
creating a new TPU pod, then the autoscaler must know what
a TPU pod is in the form of the TPU accelerator resource.
Therefore we fill out TPU pods appropriately by providing the
expected resource which we can deduce from the cluster config.
"""
if "available_node_types" not in cluster_config:
return cluster_config
cluster_config = copy.deepcopy(cluster_config)
available_node_types = cluster_config["available_node_types"]
for node_type in available_node_types:
node_config = available_node_types[node_type]["node_config"]
if get_node_type(node_config) == GCPNodeType.TPU:
autodetected_resources = {}
accelerator_type = ""
if "acceleratorType" in node_config:
accelerator_type = node_config["acceleratorType"]
elif "acceleratorConfig" in node_config:
accelerator_type = tpu_accelerator_config_to_type(
node_config["acceleratorConfig"]
)
if not accelerator_type:
continue
autodetected_resources[f"TPU-{accelerator_type}-head"] = 1
available_node_types[node_type]["resources"].update(
autodetected_resources
)
return cluster_config
def get_command_runner(
self,
log_prefix: str,
node_id: str,
auth_config: Dict[str, Any],
cluster_name: str,
process_runner: ModuleType,
use_internal_ip: bool,
docker_config: Optional[Dict[str, Any]] = None,
) -> CommandRunnerInterface:
"""Returns a TPU command runner as applicable."""
resource = self._get_resource_depending_on_node_name(node_id)
instance = resource.get_instance(node_id)
common_args = {
"docker_config": docker_config,
"log_prefix": log_prefix,
"node_id": node_id,
"auth_config": auth_config,
"cluster_name": cluster_name,
"process_runner": process_runner,
"use_internal_ip": use_internal_ip,
}
if (
GCPNodeType.TPU in self.resources
and resource == self.resources[GCPNodeType.TPU]
):
return TPUCommandRunner(instance=instance, provider=self, **common_args)
else:
return super().get_command_runner(**common_args)
|
GCPNodeProvider
|
python
|
lxml__lxml
|
src/lxml/tests/test_external_document.py
|
{
"start": 230,
"end": 4748
}
|
class ____(HelperTestCase):
def setUp(self):
try:
import ctypes
from ctypes import pythonapi
from ctypes.util import find_library
except ImportError:
raise unittest.SkipTest("ctypes support missing")
def wrap(func, restype, *argtypes):
func.restype = restype
func.argtypes = list(argtypes)
return func
self.get_capsule_name = wrap(pythonapi.PyCapsule_GetName,
ctypes.c_char_p, ctypes.py_object)
self.capsule_is_valid = wrap(pythonapi.PyCapsule_IsValid, ctypes.c_int,
ctypes.py_object, ctypes.c_char_p)
self.new_capsule = wrap(pythonapi.PyCapsule_New, ctypes.py_object,
ctypes.c_void_p, ctypes.c_char_p,
ctypes.c_void_p)
self.set_capsule_name = wrap(pythonapi.PyCapsule_SetName, ctypes.c_int,
ctypes.py_object, ctypes.c_char_p)
self.set_capsule_context = wrap(pythonapi.PyCapsule_SetContext,
ctypes.c_int, ctypes.py_object,
ctypes.c_char_p)
self.get_capsule_context = wrap(pythonapi.PyCapsule_GetContext,
ctypes.c_char_p, ctypes.py_object)
self.get_capsule_pointer = wrap(pythonapi.PyCapsule_GetPointer,
ctypes.c_void_p, ctypes.py_object,
ctypes.c_char_p)
self.set_capsule_pointer = wrap(pythonapi.PyCapsule_SetPointer,
ctypes.c_int, ctypes.py_object,
ctypes.c_void_p)
self.set_capsule_destructor = wrap(pythonapi.PyCapsule_SetDestructor,
ctypes.c_int, ctypes.py_object,
ctypes.c_void_p)
self.PyCapsule_Destructor = ctypes.CFUNCTYPE(None, ctypes.py_object)
libxml2 = ctypes.CDLL(find_library('xml2'))
self.create_doc = wrap(libxml2.xmlReadMemory, ctypes.c_void_p,
ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p,
ctypes.c_char_p, ctypes.c_int)
self.free_doc = wrap(libxml2.xmlFreeDoc, None, ctypes.c_void_p)
def as_capsule(self, text, capsule_name=DOC_NAME):
if not isinstance(text, bytes):
text = text.encode('utf-8')
doc = self.create_doc(text, len(text), b'base.xml', b'utf-8', 0)
ans = self.new_capsule(doc, capsule_name, None)
self.set_capsule_context(ans, DESTRUCTOR_NAME)
return ans
def test_external_document_adoption(self):
xml = '<r a="1">t</r>'
self.assertRaises(TypeError, etree.adopt_external_document, None)
capsule = self.as_capsule(xml)
self.assertTrue(self.capsule_is_valid(capsule, DOC_NAME))
self.assertEqual(DOC_NAME, self.get_capsule_name(capsule))
# Create an lxml tree from the capsule (this is a move not a copy)
root = etree.adopt_external_document(capsule).getroot()
self.assertIsNone(self.get_capsule_name(capsule))
self.assertEqual(root.text, 't')
root.text = 'new text'
# Now reset the capsule so we can copy it
self.assertEqual(0, self.set_capsule_name(capsule, DOC_NAME))
self.assertEqual(0, self.set_capsule_context(capsule, b'invalid'))
# Create an lxml tree from the capsule (this is a copy not a move)
root2 = etree.adopt_external_document(capsule).getroot()
self.assertEqual(self.get_capsule_context(capsule), b'invalid')
# Check that the modification to the tree using the transferred
# document was successful
self.assertEqual(root.text, root2.text)
# Check that further modifications do not show up in the copy (they are
# disjoint)
root.text = 'other text'
self.assertNotEqual(root.text, root2.text)
# delete root and ensure root2 survives
del root
self.assertEqual(root2.text, 'new text')
def test_suite():
suite = unittest.TestSuite()
if sys.platform != 'win32':
suite.addTests([unittest.defaultTestLoader.loadTestsFromTestCase(ExternalDocumentTestCase)])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
|
ExternalDocumentTestCase
|
python
|
keras-team__keras
|
keras/src/ops/nn.py
|
{
"start": 1305,
"end": 2055
}
|
class ____(Operation):
def call(self, x):
return backend.nn.relu6(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.relu6", "keras.ops.nn.relu6"])
def relu6(x):
"""Rectified linear unit activation function with upper bound of 6.
It is defined as `f(x) = np.clip(x, 0, 6)`.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = keras.ops.convert_to_tensor([-3.0, -2.0, 0.1, 0.2, 6.0, 8.0])
>>> keras.ops.relu6(x)
array([0.0, 0.0, 0.1, 0.2, 6.0, 6.0], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Relu6().symbolic_call(x)
return backend.nn.relu6(x)
|
Relu6
|
python
|
django-haystack__django-haystack
|
test_haystack/test_utils.py
|
{
"start": 746,
"end": 2238
}
|
class ____(TestCase):
fixtures = ["base_data"]
def test_get_identifier(self):
self.assertEqual(get_identifier("core.mockmodel.1"), "core.mockmodel.1")
# Valid object.
mock = MockModel.objects.get(pk=1)
self.assertEqual(get_identifier(mock), "core.mockmodel.1")
@override_settings(
HAYSTACK_IDENTIFIER_METHOD="test_haystack.core.custom_identifier.get_identifier_method"
)
def test_haystack_identifier_method(self):
# The custom implementation returns the MD-5 hash of the key value by
# default:
get_identifier = _lookup_identifier_method()
self.assertEqual(get_identifier("a.b.c"), "553f764f7b436175c0387e22b4a19213")
# … but it also supports a custom override mechanism which would
# definitely fail with the default implementation:
class custom_id_class:
def get_custom_haystack_id(self):
return "CUSTOM"
self.assertEqual(get_identifier(custom_id_class()), "CUSTOM")
@override_settings(
HAYSTACK_IDENTIFIER_METHOD="test_haystack.core.custom_identifier.not_there"
)
def test_haystack_identifier_method_bad_path(self):
self.assertRaises(AttributeError, _lookup_identifier_method)
@override_settings(HAYSTACK_IDENTIFIER_METHOD="core.not_there.not_there")
def test_haystack_identifier_method_bad_module(self):
self.assertRaises(ImportError, _lookup_identifier_method)
|
GetFacetFieldNameTestCase
|
python
|
pydantic__pydantic
|
tests/test_forward_ref.py
|
{
"start": 7981,
"end": 10210
}
|
class ____(BaseModel):
name: str
subaccounts: list[Account] = []
"""
)
Account = module.Account
assert Account.model_json_schema() == {
'$ref': '#/$defs/Account',
'$defs': {
'Account': {
'title': 'Account',
'type': 'object',
'properties': {
'name': {'title': 'Name', 'type': 'string'},
'subaccounts': {
'title': 'Subaccounts',
'default': [],
'type': 'array',
'items': {'$ref': '#/$defs/Account'},
},
},
'required': ['name'],
}
},
}
def test_circular_reference_json_schema(create_module):
@create_module
def module():
from pydantic import BaseModel
class Owner(BaseModel):
account: 'Account'
class Account(BaseModel):
name: str
owner: 'Owner'
subaccounts: list['Account'] = []
Account = module.Account
assert Account.model_json_schema() == {
'$ref': '#/$defs/Account',
'$defs': {
'Account': {
'title': 'Account',
'type': 'object',
'properties': {
'name': {'title': 'Name', 'type': 'string'},
'owner': {'$ref': '#/$defs/Owner'},
'subaccounts': {
'title': 'Subaccounts',
'default': [],
'type': 'array',
'items': {'$ref': '#/$defs/Account'},
},
},
'required': ['name', 'owner'],
},
'Owner': {
'title': 'Owner',
'type': 'object',
'properties': {'account': {'$ref': '#/$defs/Account'}},
'required': ['account'],
},
},
}
def test_circular_reference_json_schema_with_future_annotations(create_module):
module = create_module(
# language=Python
"""
from __future__ import annotations
from pydantic import BaseModel
|
Account
|
python
|
kamyu104__LeetCode-Solutions
|
Python/detect-cycles-in-2d-grid.py
|
{
"start": 521,
"end": 1549
}
|
class ____(object):
def containsCycle(self, grid):
"""
:type grid: List[List[str]]
:rtype: bool
"""
def index(n, i, j):
return i*n + j
union_find = UnionFind(len(grid)*len(grid[0]))
for i in xrange(len(grid)):
for j in xrange(len(grid[0])):
if i and j and grid[i][j] == grid[i-1][j] == grid[i][j-1] and \
union_find.find_set(index(len(grid[0]), i-1, j)) == \
union_find.find_set(index(len(grid[0]), i, j-1)):
return True
if i and grid[i][j] == grid[i-1][j]:
union_find.union_set(index(len(grid[0]), i-1, j),
index(len(grid[0]),i, j))
if j and grid[i][j] == grid[i][j-1]:
union_find.union_set(index(len(grid[0]), i, j-1),
index(len(grid[0]), i, j))
return False
# Time: O(m * n)
# Space: O(m * n)
|
Solution
|
python
|
Unity-Technologies__ml-agents
|
ml-agents/mlagents/trainers/settings.py
|
{
"start": 7955,
"end": 8626
}
|
class ____(Enum):
UNIFORM: str = "uniform"
GAUSSIAN: str = "gaussian"
MULTIRANGEUNIFORM: str = "multirangeuniform"
CONSTANT: str = "constant"
def to_settings(self) -> type:
_mapping = {
ParameterRandomizationType.UNIFORM: UniformSettings,
ParameterRandomizationType.GAUSSIAN: GaussianSettings,
ParameterRandomizationType.MULTIRANGEUNIFORM: MultiRangeUniformSettings,
ParameterRandomizationType.CONSTANT: ConstantSettings
# Constant type is handled if a float is provided instead of a config
}
return _mapping[self]
@attr.s(auto_attribs=True)
|
ParameterRandomizationType
|
python
|
dask__dask
|
dask/array/_array_expr/random.py
|
{
"start": 37059,
"end": 39911
}
|
class ____(RandomChoice):
_defaults = {}
@cached_property
def state_data(self):
return _spawn_bitgens(self._state, len(self.sizes))
def _layer(self) -> dict:
return {
k: (
_choice_rng,
bitgen,
self.array,
size,
self.replace,
self.p,
self.axis,
self.shuffle,
)
for k, bitgen, size in zip(
self.__dask_keys__(), self.state_data, self.sizes
)
}
"""
Lazy RNG-state machinery
Many of the RandomState methods are exported as functions in da.random for
backward compatibility reasons. Their usage is discouraged.
Use da.random.default_rng() to get a Generator based rng and use its
methods instead.
"""
_cached_states: dict[str, RandomState] = {}
_cached_states_lock = Lock()
def _make_api(attr):
def wrapper(*args, **kwargs):
key = array_creation_dispatch.backend
with _cached_states_lock:
try:
state = _cached_states[key]
except KeyError:
_cached_states[key] = state = RandomState()
return getattr(state, attr)(*args, **kwargs)
wrapper.__name__ = getattr(RandomState, attr).__name__
wrapper.__doc__ = getattr(RandomState, attr).__doc__
return wrapper
"""
RandomState only
"""
seed = _make_api("seed")
beta = _make_api("beta")
binomial = _make_api("binomial")
chisquare = _make_api("chisquare")
choice = _make_api("choice")
exponential = _make_api("exponential")
f = _make_api("f")
gamma = _make_api("gamma")
geometric = _make_api("geometric")
gumbel = _make_api("gumbel")
hypergeometric = _make_api("hypergeometric")
laplace = _make_api("laplace")
logistic = _make_api("logistic")
lognormal = _make_api("lognormal")
logseries = _make_api("logseries")
multinomial = _make_api("multinomial")
negative_binomial = _make_api("negative_binomial")
noncentral_chisquare = _make_api("noncentral_chisquare")
noncentral_f = _make_api("noncentral_f")
normal = _make_api("normal")
pareto = _make_api("pareto")
permutation = _make_api("permutation")
poisson = _make_api("poisson")
power = _make_api("power")
random_sample = _make_api("random_sample")
random = _make_api("random_sample")
randint = _make_api("randint")
random_integers = _make_api("random_integers")
rayleigh = _make_api("rayleigh")
standard_cauchy = _make_api("standard_cauchy")
standard_exponential = _make_api("standard_exponential")
standard_gamma = _make_api("standard_gamma")
standard_normal = _make_api("standard_normal")
standard_t = _make_api("standard_t")
triangular = _make_api("triangular")
uniform = _make_api("uniform")
vonmises = _make_api("vonmises")
wald = _make_api("wald")
weibull = _make_api("weibull")
zipf = _make_api("zipf")
|
RandomChoiceGenerator
|
python
|
walkccc__LeetCode
|
solutions/2182. Construct String With Repeat Limit/2182.py
|
{
"start": 0,
"end": 792
}
|
class ____:
def repeatLimitedString(self, s: str, repeatLimit: int) -> str:
ans = ''
count = collections.Counter(s)
while True:
addOne = ans and self._shouldAddOne(ans, count)
c = self._getLargestChar(ans, count)
if c == ' ':
break
repeats = 1 if addOne else min(count[c], repeatLimit)
ans += c * repeats
count[c] -= repeats
return ans
def _shouldAddOne(self, ans: str, count: collections.Counter) -> bool:
for c in reversed(string.ascii_lowercase):
if count[c]:
return ans[-1] == c
return False
def _getLargestChar(self, ans: str, count: collections.Counter) -> int:
for c in reversed(string.ascii_lowercase):
if count[c] and (not ans or ans[-1] != c):
return c
return ' '
|
Solution
|
python
|
django__django
|
tests/select_related_onetoone/models.py
|
{
"start": 880,
"end": 1023
}
|
class ____(models.Model):
name = models.CharField(max_length=100)
image = models.OneToOneField(Image, models.SET_NULL, null=True)
|
Product
|
python
|
PyCQA__pylint
|
tests/functional/u/used/used_before_assignment_typing.py
|
{
"start": 4844,
"end": 5044
}
|
class ____: # pylint: disable=too-few-public-methods
"""Conditional imports also guarded by TYPE_CHECKING when used."""
if TYPE_CHECKING:
print(urlopen)
|
ConditionalImportGuardedWhenUsed
|
python
|
pytorch__pytorch
|
test/distributed/elastic/rendezvous/dynamic_rendezvous_test.py
|
{
"start": 48248,
"end": 50718
}
|
class ____(TestCase):
def setUp(self) -> None:
self._run_id = "dummy_run_id"
self._store = DummyStore()
self._backend = DummyRendezvousBackend()
self._min_nodes = 3
self._max_nodes = 6
self._timeout: Optional[RendezvousTimeout] = RendezvousTimeout()
def _create_handler(self) -> DynamicRendezvousHandler:
return DynamicRendezvousHandler.from_backend(
run_id=self._run_id,
store=self._store,
backend=self._backend,
min_nodes=self._min_nodes,
max_nodes=self._max_nodes,
timeout=self._timeout,
)
def test_init_initializes_handler(self) -> None:
handler = self._create_handler()
self.assertEqual(handler.get_backend(), self._backend.name)
self.assertEqual(handler.get_run_id(), self._run_id)
self.assertEqual(handler.settings.run_id, self._run_id)
self.assertEqual(handler.settings.min_nodes, self._min_nodes)
self.assertEqual(handler.settings.max_nodes, self._max_nodes)
if self._timeout is None:
self.assertIsNotNone(handler.settings.timeout)
else:
self.assertIs(handler.settings.timeout, self._timeout)
def test_init_initializes_handler_if_timeout_is_not_specified(self) -> None:
self._timeout = None
self.test_init_initializes_handler()
def test_init_initializes_handler_if_min_and_max_nodes_are_equal(self) -> None:
self._min_nodes = 3
self._max_nodes = 3
self.test_init_initializes_handler()
def test_init_raises_error_if_min_nodes_is_not_positive(self) -> None:
for num in [0, -10]:
with self.subTest(min_nodes=num):
self._min_nodes = num
with self.assertRaisesRegex(
ValueError,
rf"^The minimum number of nodes \({num}\) must be greater than zero.$",
):
self._create_handler()
def test_init_raises_error_if_max_nodes_is_less_than_min(self) -> None:
self._min_nodes = 3
self._max_nodes = 2
with self.assertRaisesRegex(
ValueError,
rf"^The maximum number of nodes \({self._max_nodes}\) must be greater than or equal to "
"the minimum number of nodes "
rf"\({self._min_nodes}\).$",
):
self._create_handler()
|
DynamicRendezvousHandlerFromBackendTest
|
python
|
pydata__xarray
|
xarray/backends/file_manager.py
|
{
"start": 16239,
"end": 16950
}
|
class ____(FileManager[T_File]):
"""FileManager that simply wraps an open file in the FileManager interface."""
def __init__(self, value: T_File, *, close: Callable[[], None] | None = None):
if close is None:
close = value.close
self._value = value
self._close = close
def acquire(self, needs_lock: bool = True) -> T_File:
del needs_lock # unused
return self._value
@contextmanager
def acquire_context(self, needs_lock: bool = True) -> Iterator[T_File]:
del needs_lock # unused
yield self._value
def close(self, needs_lock: bool = True) -> None:
del needs_lock # unused
self._close()
|
DummyFileManager
|
python
|
doocs__leetcode
|
solution/2600-2699/2671.Frequency Tracker/Solution.py
|
{
"start": 0,
"end": 750
}
|
class ____:
def __init__(self):
self.cnt = defaultdict(int)
self.freq = defaultdict(int)
def add(self, number: int) -> None:
self.freq[self.cnt[number]] -= 1
self.cnt[number] += 1
self.freq[self.cnt[number]] += 1
def deleteOne(self, number: int) -> None:
if self.cnt[number]:
self.freq[self.cnt[number]] -= 1
self.cnt[number] -= 1
self.freq[self.cnt[number]] += 1
def hasFrequency(self, frequency: int) -> bool:
return self.freq[frequency] > 0
# Your FrequencyTracker object will be instantiated and called as such:
# obj = FrequencyTracker()
# obj.add(number)
# obj.deleteOne(number)
# param_3 = obj.hasFrequency(frequency)
|
FrequencyTracker
|
python
|
getsentry__sentry
|
src/sentry/remote_subscriptions/apps.py
|
{
"start": 36,
"end": 145
}
|
class ____(AppConfig):
name = "sentry.remote_subscriptions"
def ready(self) -> None:
pass
|
Config
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.