language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | aio-libs__aiohttp | aiohttp/web_app.py | {
"start": 1841,
"end": 12458
} | class ____(MutableMapping[str | AppKey[Any], Any]):
__slots__ = (
"logger",
"_router",
"_loop",
"_handler_args",
"_middlewares",
"_middlewares_handlers",
"_run_middlewares",
"_state",
"_frozen",
"_pre_frozen",
"_subapps",
"_on_response_prepare",
"_on_startup",
"_on_shutdown",
"_on_cleanup",
"_client_max_size",
"_cleanup_ctx",
)
def __init__(
self,
*,
logger: logging.Logger = web_logger,
middlewares: Iterable[Middleware] = (),
handler_args: Mapping[str, Any] | None = None,
client_max_size: int = 1024**2,
debug: Any = ..., # mypy doesn't support ellipsis
) -> None:
if debug is not ...:
warnings.warn(
"debug argument is no-op since 4.0 and scheduled for removal in 5.0",
DeprecationWarning,
stacklevel=2,
)
self._router = UrlDispatcher()
self._handler_args = handler_args
self.logger = logger
self._middlewares: _Middlewares = FrozenList(middlewares)
# initialized on freezing
self._middlewares_handlers: _MiddlewaresHandlers = tuple()
# initialized on freezing
self._run_middlewares: bool | None = None
self._state: dict[AppKey[Any] | str, object] = {}
self._frozen = False
self._pre_frozen = False
self._subapps: _Subapps = []
self._on_response_prepare: _RespPrepareSignal = Signal(self)
self._on_startup: _AppSignal = Signal(self)
self._on_shutdown: _AppSignal = Signal(self)
self._on_cleanup: _AppSignal = Signal(self)
self._cleanup_ctx = CleanupContext()
self._on_startup.append(self._cleanup_ctx._on_startup)
self._on_cleanup.append(self._cleanup_ctx._on_cleanup)
self._client_max_size = client_max_size
def __init_subclass__(cls: type["Application"]) -> None:
raise TypeError(
f"Inheritance class {cls.__name__} from web.Application is forbidden"
)
# MutableMapping API
def __eq__(self, other: object) -> bool:
return self is other
@overload # type: ignore[override]
def __getitem__(self, key: AppKey[_T]) -> _T: ...
@overload
def __getitem__(self, key: str) -> Any: ...
def __getitem__(self, key: str | AppKey[_T]) -> Any:
return self._state[key]
def _check_frozen(self) -> None:
if self._frozen:
raise RuntimeError(
"Changing state of started or joined application is forbidden"
)
@overload # type: ignore[override]
def __setitem__(self, key: AppKey[_T], value: _T) -> None: ...
@overload
def __setitem__(self, key: str, value: Any) -> None: ...
def __setitem__(self, key: str | AppKey[_T], value: Any) -> None:
self._check_frozen()
if not isinstance(key, AppKey):
warnings.warn(
"It is recommended to use web.AppKey instances for keys.\n"
+ "https://docs.aiohttp.org/en/stable/web_advanced.html"
+ "#application-s-config",
category=NotAppKeyWarning,
stacklevel=2,
)
self._state[key] = value
def __delitem__(self, key: str | AppKey[_T]) -> None:
self._check_frozen()
del self._state[key]
def __len__(self) -> int:
return len(self._state)
def __iter__(self) -> Iterator[str | AppKey[Any]]:
return iter(self._state)
def __hash__(self) -> int:
return id(self)
@overload # type: ignore[override]
def get(self, key: AppKey[_T], default: None = ...) -> _T | None: ...
@overload
def get(self, key: AppKey[_T], default: _U) -> _T | _U: ...
@overload
def get(self, key: str, default: Any = ...) -> Any: ...
def get(self, key: str | AppKey[_T], default: Any = None) -> Any:
return self._state.get(key, default)
########
def _set_loop(self, loop: asyncio.AbstractEventLoop | None) -> None:
warnings.warn(
"_set_loop() is no-op since 4.0 and scheduled for removal in 5.0",
DeprecationWarning,
stacklevel=2,
)
@property
def pre_frozen(self) -> bool:
return self._pre_frozen
def pre_freeze(self) -> None:
if self._pre_frozen:
return
self._pre_frozen = True
self._middlewares.freeze()
self._router.freeze()
self._on_response_prepare.freeze()
self._cleanup_ctx.freeze()
self._on_startup.freeze()
self._on_shutdown.freeze()
self._on_cleanup.freeze()
self._middlewares_handlers = tuple(self._prepare_middleware())
# If current app and any subapp do not have middlewares avoid run all
# of the code footprint that it implies, which have a middleware
# hardcoded per app that sets up the current_app attribute. If no
# middlewares are configured the handler will receive the proper
# current_app without needing all of this code.
self._run_middlewares = True if self.middlewares else False
for subapp in self._subapps:
subapp.pre_freeze()
self._run_middlewares = self._run_middlewares or subapp._run_middlewares
@property
def frozen(self) -> bool:
return self._frozen
def freeze(self) -> None:
if self._frozen:
return
self.pre_freeze()
self._frozen = True
for subapp in self._subapps:
subapp.freeze()
@property
def debug(self) -> bool:
warnings.warn(
"debug property is deprecated since 4.0 and scheduled for removal in 5.0",
DeprecationWarning,
stacklevel=2,
)
return asyncio.get_event_loop().get_debug()
def _reg_subapp_signals(self, subapp: "Application") -> None:
def reg_handler(signame: str) -> None:
subsig = getattr(subapp, signame)
async def handler(app: "Application") -> None:
await subsig.send(subapp)
appsig = getattr(self, signame)
appsig.append(handler)
reg_handler("on_startup")
reg_handler("on_shutdown")
reg_handler("on_cleanup")
def add_subapp(self, prefix: str, subapp: "Application") -> PrefixedSubAppResource:
if not isinstance(prefix, str):
raise TypeError("Prefix must be str")
prefix = prefix.rstrip("/")
if not prefix:
raise ValueError("Prefix cannot be empty")
factory = partial(PrefixedSubAppResource, prefix, subapp)
return self._add_subapp(factory, subapp)
def _add_subapp(
self, resource_factory: Callable[[], _Resource], subapp: "Application"
) -> _Resource:
if self.frozen:
raise RuntimeError("Cannot add sub application to frozen application")
if subapp.frozen:
raise RuntimeError("Cannot add frozen application")
resource = resource_factory()
self.router.register_resource(resource)
self._reg_subapp_signals(subapp)
self._subapps.append(subapp)
subapp.pre_freeze()
return resource
def add_domain(self, domain: str, subapp: "Application") -> MatchedSubAppResource:
if not isinstance(domain, str):
raise TypeError("Domain must be str")
elif "*" in domain:
rule: Domain = MaskDomain(domain)
else:
rule = Domain(domain)
factory = partial(MatchedSubAppResource, rule, subapp)
return self._add_subapp(factory, subapp)
def add_routes(self, routes: Iterable[AbstractRouteDef]) -> list[AbstractRoute]:
return self.router.add_routes(routes)
@property
def on_response_prepare(self) -> _RespPrepareSignal:
return self._on_response_prepare
@property
def on_startup(self) -> _AppSignal:
return self._on_startup
@property
def on_shutdown(self) -> _AppSignal:
return self._on_shutdown
@property
def on_cleanup(self) -> _AppSignal:
return self._on_cleanup
@property
def cleanup_ctx(self) -> "CleanupContext":
return self._cleanup_ctx
@property
def router(self) -> UrlDispatcher:
return self._router
@property
def middlewares(self) -> _Middlewares:
return self._middlewares
async def startup(self) -> None:
"""Causes on_startup signal
Should be called in the event loop along with the request handler.
"""
await self.on_startup.send(self)
async def shutdown(self) -> None:
"""Causes on_shutdown signal
Should be called before cleanup()
"""
await self.on_shutdown.send(self)
async def cleanup(self) -> None:
"""Causes on_cleanup signal
Should be called after shutdown()
"""
if self.on_cleanup.frozen:
await self.on_cleanup.send(self)
else:
# If an exception occurs in startup, ensure cleanup contexts are completed.
await self._cleanup_ctx._on_cleanup(self)
def _prepare_middleware(self) -> Iterator[Middleware]:
yield from reversed(self._middlewares)
yield _fix_request_current_app(self)
async def _handle(self, request: Request) -> StreamResponse:
match_info = await self._router.resolve(request)
match_info.add_app(self)
match_info.freeze()
request._match_info = match_info
if request.headers.get(hdrs.EXPECT):
resp = await match_info.expect_handler(request)
await request.writer.drain()
if resp is not None:
return resp
handler = match_info.handler
if self._run_middlewares:
# If its a SystemRoute, don't cache building the middlewares since
# they are constructed for every MatchInfoError as a new handler
# is made each time.
if isinstance(match_info.route, SystemRoute):
handler = _build_middlewares(handler, match_info.apps)
else:
handler = _cached_build_middleware(handler, match_info.apps)
return await handler(request)
def __call__(self) -> "Application":
"""gunicorn compatibility"""
return self
def __repr__(self) -> str:
return f"<Application 0x{id(self):x}>"
def __bool__(self) -> bool:
return True
| Application |
python | spack__spack | lib/spack/spack/error.py | {
"start": 3580,
"end": 3684
} | class ____(SpackError):
"""Superclass for all errors that occur while constructing specs."""
| SpecError |
python | spack__spack | lib/spack/spack/tokenize.py | {
"start": 433,
"end": 797
} | class ____(enum.Enum):
"""Base class for an enum type with a regex value"""
def __new__(cls, *args, **kwargs):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __init__(self, regex):
self.regex = regex
def __str__(self):
return f"{self._name_}"
| TokenBase |
python | huggingface__transformers | src/transformers/models/sam2/processing_sam2.py | {
"start": 1080,
"end": 23047
} | class ____(ProcessorMixin):
r"""
Constructs a SAM2 processor which wraps a SAM2 image processor and an 2D points & Bounding boxes processor into a
single processor.
[`Sam2Processor`] offers all the functionalities of [`Sam2ImageProcessorFast`] and [`Sam2VideoProcessor`]. See the docstring of
[`~Sam2ImageProcessorFast.__call__`] and [`~Sam2VideoProcessor.__call__`] for more information.
Args:
image_processor (`Sam2ImageProcessorFast`):
An instance of [`Sam2ImageProcessorFast`].
target_size (`int`, *optional*):
The target size (target_size, target_size) to which the image will be resized.
point_pad_value (`int`, *optional*, defaults to -10):
The value used for padding input points.
"""
def __init__(self, image_processor, target_size: Optional[int] = None, point_pad_value: int = -10, **kwargs):
super().__init__(image_processor, **kwargs)
self.point_pad_value = point_pad_value
self.target_size = target_size if target_size is not None else self.image_processor.size["height"]
def __call__(
self,
images: Optional[ImageInput] = None,
segmentation_maps: Optional[ImageInput] = None,
input_points: Optional[Union[list[list[list[list[float]]]], torch.Tensor]] = None,
input_labels: Optional[Union[list[list[list[int]]], torch.Tensor]] = None,
input_boxes: Optional[Union[list[list[list[float]]], torch.Tensor]] = None,
original_sizes: Optional[Union[list[list[float]], torch.Tensor]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs,
) -> BatchEncoding:
r"""
This method uses [`Sam2ImageProcessorFast.__call__`] method to prepare image(s) for the model. It also prepares 2D
points and bounding boxes for the model if they are provided.
Args:
images (`ImageInput`, *optional*):
The image(s) to process.
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps to process.
input_points (`list[list[list[list[float]]]]`, `torch.Tensor`, *optional*):
The points to add to the frame.
input_labels (`list[list[list[int]]]`, `torch.Tensor`, *optional*):
The labels for the points.
input_boxes (`list[list[list[float]]]`, `torch.Tensor`, *optional*):
The bounding boxes to add to the frame.
original_sizes (`list[list[float]]`, `torch.Tensor`, *optional*):
The original sizes of the images.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return.
**kwargs:
Additional keyword arguments to pass to the image processor.
Returns:
A [`BatchEncoding`] with the following fields:
- `pixel_values` (`torch.Tensor`): The processed image(s).
- `original_sizes` (`list[list[float]]`): The original sizes of the images.
- `labels` (`torch.Tensor`): The processed segmentation maps (if provided).
- `input_points` (`torch.Tensor`): The processed points.
- `input_labels` (`torch.Tensor`): The processed labels.
- `input_boxes` (`torch.Tensor`): The processed bounding boxes.
"""
if images is not None:
encoding_image_processor = self.image_processor(
images,
segmentation_maps=segmentation_maps,
return_tensors=return_tensors,
**kwargs,
)
elif original_sizes is not None:
if isinstance(original_sizes, torch.Tensor):
original_sizes = original_sizes.cpu().tolist()
encoding_image_processor = BatchEncoding({"original_sizes": original_sizes}, tensor_type=return_tensors)
else:
raise ValueError("Either images or original_sizes must be provided")
# pop arguments that are not used in the forward but used nevertheless
original_sizes = encoding_image_processor["original_sizes"]
# Check original_sizes is of length 1 or len(images)
if images is not None and len(original_sizes) != 1 and len(original_sizes) != len(images):
raise ValueError(
"original_sizes must be of length 1 or len(images). If you are passing a single image, you must pass a single original_size."
)
# Process input points, labels, and boxes if provided
if input_points is not None or input_labels is not None or input_boxes is not None:
# Validate and convert inputs to standardized format
processed_points = self._validate_single_input(
input_points,
expected_depth=4,
input_name="points",
expected_format="[image level, object level, point level, point coordinates]",
expected_coord_size=2,
)
processed_labels = self._validate_single_input(
input_labels,
expected_depth=3,
input_name="labels",
expected_format="[image level, object level, point level]",
)
processed_boxes = self._validate_single_input(
input_boxes,
expected_depth=3,
input_name="boxes",
expected_format="[image level, box level, box coordinates]",
expected_coord_size=4,
)
# Get padding requirements for all inputs
if processed_points is not None:
points_max_dims = self._get_nested_dimensions(processed_points)[:3]
if processed_labels is not None:
labels_max_dims = self._get_nested_dimensions(processed_labels)[:3]
if processed_boxes is not None:
boxes_max_dims = self._get_nested_dimensions(processed_boxes)[:2]
# Ensure points and labels have consistent dimensions
if processed_points is not None and processed_labels is not None:
if points_max_dims != labels_max_dims:
raise ValueError(
"Input points and labels have inconsistent dimensions. Please ensure they have the same dimensions."
)
# Check that boxes don't need padding (model limitation)
if processed_boxes is not None and len(processed_boxes) >= 2:
if any(len(img_boxes) < boxes_max_dims[1] for img_boxes in processed_boxes):
raise ValueError(
"Input boxes have inconsistent dimensions that would require padding, "
"but boxes cannot be padded due to model limitations. "
"Please ensure all images have the same number of boxes."
)
# Pad and normalize all inputs to final tensor format
if processed_points is not None:
padded_points = self._pad_nested_list(processed_points, points_max_dims + [2])
final_points = torch.tensor(padded_points, dtype=torch.float32)
self._normalize_tensor_coordinates(final_points, original_sizes, preserve_padding=True)
encoding_image_processor.update({"input_points": final_points})
if processed_labels is not None:
padded_labels = self._pad_nested_list(processed_labels, labels_max_dims)
final_labels = torch.tensor(padded_labels, dtype=torch.int64)
encoding_image_processor.update({"input_labels": final_labels})
if processed_boxes is not None:
final_boxes = torch.tensor(processed_boxes, dtype=torch.float32)
self._normalize_tensor_coordinates(final_boxes, original_sizes, is_bounding_box=True)
encoding_image_processor.update({"input_boxes": final_boxes})
return encoding_image_processor
def _normalize_coordinates(
self, target_size: int, coords: "torch.Tensor", original_size, is_bounding_box=False
) -> "torch.Tensor":
"""
Expects a numpy array of length 2 in the final dimension. Requires the original image size in (H, W) format.
Args:
target_size (`int`):
The target size of the image.
coords (`torch.Tensor`):
The coordinates to be normalized.
original_size (`tuple`):
The original size of the image.
is_bounding_box (`bool`, *optional*, defaults to `False`):
Whether the coordinates are bounding boxes.
"""
old_h, old_w = original_size
new_h, new_w = target_size, target_size
coords = deepcopy(coords).float()
if is_bounding_box:
coords = coords.reshape(-1, 2, 2)
coords[..., 0] = coords[..., 0] * (new_w / old_w)
coords[..., 1] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
coords = coords.reshape(-1, 4)
return coords
def _convert_to_nested_list(self, data, expected_depth, current_depth=0):
"""
Recursively convert various input formats (tensors, numpy arrays, lists) to nested lists.
Args:
data: Input data in any format
expected_depth: Expected nesting depth
current_depth: Current depth in recursion
Returns:
Nested list representation of the data
"""
if data is None:
return None
# Convert tensor/numpy to list if we're at a leaf level or if it's a multi-dimensional array
if isinstance(data, torch.Tensor): # PyTorch tensor
if current_depth == expected_depth - 2 or len(data.shape) <= 2: # At coordinate level or small tensor
return data.numpy().tolist()
else:
return [self._convert_to_nested_list(item, expected_depth, current_depth + 1) for item in data]
elif isinstance(data, np.ndarray): # NumPy array
if current_depth == expected_depth - 2 or len(data.shape) <= 2: # At coordinate level or small array
return data.tolist()
else:
return [self._convert_to_nested_list(item, expected_depth, current_depth + 1) for item in data]
elif isinstance(data, list):
if current_depth == expected_depth:
# We've reached the expected depth, return as is
return data
else:
# Continue recursion
return [self._convert_to_nested_list(item, expected_depth, current_depth + 1) for item in data]
elif isinstance(data, (int, float)):
return data
else:
raise TypeError(f"Unsupported data type: {type(data)}")
def _get_nested_dimensions(self, nested_list, max_dims=None):
"""
Get the maximum dimensions at each level of nesting.
Args:
nested_list (`list`):
Nested list structure.
max_dims (`list`, *optional*):
Current maximum dimensions (for recursion).
Returns:
`list`: A list of maximum dimensions for each nesting level.
"""
if max_dims is None:
max_dims = []
if not isinstance(nested_list, list):
return max_dims
if len(max_dims) == 0:
max_dims.append(len(nested_list))
else:
max_dims[0] = max(max_dims[0], len(nested_list))
if len(nested_list) > 0:
for item in nested_list:
if isinstance(item, list):
sub_dims = self._get_nested_dimensions(item)
# Merge sub_dims into max_dims
for i, dim in enumerate(sub_dims):
if i + 1 >= len(max_dims):
max_dims.append(dim)
else:
max_dims[i + 1] = max(max_dims[i + 1], dim)
return max_dims
def _pad_nested_list(self, nested_list, target_dims, current_level=0, pad_value=None):
"""
Recursively pad a nested list to match target dimensions.
Args:
nested_list (`list`):
Nested list to pad.
target_dims (`list`):
Target dimensions for each level.
current_level (`int`, *optional*, defaults to 0):
Current nesting level.
pad_value (`int`, *optional*):
Value to use for padding.
Returns:
`list`: The padded nested list.
"""
if pad_value is None:
pad_value = self.point_pad_value
if current_level >= len(target_dims):
return nested_list
# Ensure we have a list
if not isinstance(nested_list, list):
nested_list = [nested_list]
# Pad current level
current_size = len(nested_list)
target_size = target_dims[current_level]
# Pad with appropriate values
if current_level == len(target_dims) - 1:
# At the coordinate level, pad with pad_value
nested_list.extend([pad_value] * (target_size - current_size))
else:
# At higher levels, pad with nested structures
if current_size > 0:
# Create appropriately sized template
if current_level < len(target_dims) - 2:
# For non-coordinate levels, create empty nested structure
template_dims = target_dims[current_level + 1 :]
template = self._create_empty_nested_structure(template_dims, pad_value)
else:
# For coordinate level, create list of pad_values
template = [pad_value] * target_dims[current_level + 1]
nested_list.extend([deepcopy(template) for _ in range(target_size - current_size)])
else:
# Create from scratch
template_dims = target_dims[current_level + 1 :]
template = self._create_empty_nested_structure(template_dims, pad_value)
nested_list.extend([deepcopy(template) for _ in range(target_size)])
# Recursively pad sublists
if current_level < len(target_dims) - 1:
for i in range(len(nested_list)):
if isinstance(nested_list[i], list):
nested_list[i] = self._pad_nested_list(nested_list[i], target_dims, current_level + 1, pad_value)
return nested_list
def _create_empty_nested_structure(self, dims, pad_value):
"""
Create an empty nested structure with given dimensions filled with pad_value.
Args:
dims (`list`):
The dimensions of the nested structure.
pad_value (`int`):
The value to fill the structure with.
"""
if len(dims) == 1:
return [pad_value] * dims[0]
else:
return [self._create_empty_nested_structure(dims[1:], pad_value) for _ in range(dims[0])]
def _get_nesting_level(self, input_list):
"""
Get the nesting level of a list structure.
Args:
input_list (`list`):
The list to get the nesting level of.
"""
if isinstance(input_list, list):
if len(input_list) == 0:
return 1
return 1 + self._get_nesting_level(input_list[0])
elif isinstance(input_list, (np.ndarray, torch.Tensor)):
# For arrays/tensors, the nesting level is the number of dimensions
return len(input_list.shape)
return 0
def _validate_single_input(
self,
data: Union[torch.Tensor, np.ndarray, list],
expected_depth: int,
input_name: str,
expected_format: str,
expected_coord_size: Optional[int] = None,
) -> list:
"""
Validate a single input by ensuring proper nesting and raising an error if the input is not valid.
Args:
data (`torch.Tensor`, `np.ndarray`, or `list`):
Input data to process.
expected_depth (`int`):
Expected nesting depth.
input_name (`str`):
Name of the input for error messages.
expected_format (`str`):
The expected format of the input.
expected_coord_size (`int`, *optional*):
Expected coordinate size (2 for points, 4 for boxes, None for labels).
.
"""
if data is None:
return None
# Handle tensors and numpy arrays first
if isinstance(data, (torch.Tensor, np.ndarray)):
# For tensors/arrays, we can directly check the number of dimensions
if data.ndim != expected_depth:
raise ValueError(
f"Input {input_name} must be a tensor/array with {expected_depth} dimensions. The expected nesting format is {expected_format}. Got {data.ndim} dimensions."
)
elif expected_coord_size is not None:
if data.shape[-1] != expected_coord_size:
raise ValueError(
f"Input {input_name} must be a tensor/array with {expected_coord_size} as the last dimension, got {data.shape[-1]}."
)
return self._convert_to_nested_list(data, expected_depth)
# Handle nested lists
if isinstance(data, list):
current_depth = self._get_nesting_level(data)
if current_depth != expected_depth:
raise ValueError(
f"Input {input_name} must be a nested list with {expected_depth} levels. The expected nesting format is {expected_format}. Got {current_depth} levels."
)
return self._convert_to_nested_list(data, expected_depth)
def _normalize_tensor_coordinates(self, tensor, original_sizes, is_bounding_box=False, preserve_padding=False):
"""
Helper method to normalize coordinates in a tensor across multiple images.
Args:
tensor (`torch.Tensor`):
Input tensor with coordinates.
original_sizes (`list`):
Original image sizes.
is_bounding_box (`bool`, *optional*, defaults to `False`):
Whether coordinates are bounding boxes.
preserve_padding (`bool`, *optional*, defaults to `False`):
Whether to preserve padding values (for points).
"""
if preserve_padding:
# For points: avoid normalizing pad values
mask = tensor != self.point_pad_value
coord_mask = mask.all(dim=-1, keepdim=True)
for img_idx in range(len(original_sizes)):
if img_idx < tensor.shape[0]:
original_size = original_sizes[img_idx] if img_idx < len(original_sizes) else original_sizes[0]
normalized_coords = self._normalize_coordinates(
self.target_size, tensor[img_idx], original_size, is_bounding_box=is_bounding_box
)
if preserve_padding:
# Only update non-padded values
img_mask = coord_mask[img_idx]
tensor[img_idx] = torch.where(
img_mask.expand_as(tensor[img_idx]), normalized_coords, tensor[img_idx]
)
else:
tensor[img_idx] = normalized_coords
def post_process_masks(
self,
masks,
original_sizes,
mask_threshold=0.0,
binarize=True,
max_hole_area=0.0,
max_sprinkle_area=0.0,
apply_non_overlapping_constraints=False,
**kwargs,
):
"""
Remove padding and upscale masks to the original image size.
Args:
masks (`Union[List[torch.Tensor], List[np.ndarray]]`):
Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
original_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`):
The original sizes of each image before it was resized to the model's expected input shape, in (height,
width) format.
mask_threshold (`float`, *optional*, defaults to 0.0):
Threshold for binarization and post-processing operations.
binarize (`bool`, *optional*, defaults to `True`):
Whether to binarize the masks.
max_hole_area (`float`, *optional*, defaults to 0.0):
The maximum area of a hole to fill.
max_sprinkle_area (`float`, *optional*, defaults to 0.0):
The maximum area of a sprinkle to fill.
apply_non_overlapping_constraints (`bool`, *optional*, defaults to `False`):
Whether to apply non-overlapping constraints to the masks.
Returns:
(`torch.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width)
is given by original_size.
"""
return self.image_processor.post_process_masks(
masks,
original_sizes,
mask_threshold,
binarize,
max_hole_area,
max_sprinkle_area,
apply_non_overlapping_constraints,
**kwargs,
)
@property
def model_input_names(self):
image_processor_input_names = self.image_processor.model_input_names
return list(image_processor_input_names + ["original_sizes"])
__all__ = ["Sam2Processor"]
| Sam2Processor |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/matchLiteral2.py | {
"start": 171,
"end": 218
} | class ____:
tag: Literal["a"]
name: str
| A |
python | pdm-project__pdm | src/pdm/resolver/providers.py | {
"start": 16559,
"end": 19793
} | class ____(BaseProvider):
"""A provider that reuses preferred pins if possible.
This is used to implement "add", "remove", and "reuse upgrade",
where already-pinned candidates in lockfile should be preferred.
"""
def __init__(
self,
repository: BaseRepository,
allow_prereleases: bool | None = None,
overrides: dict[str, str] | None = None,
direct_minimal_versions: bool = False,
*,
locked_repository: LockedRepository | None = None,
tracked_names: Iterable[str],
) -> None:
super().__init__(
repository=repository,
allow_prereleases=allow_prereleases,
overrides=overrides,
direct_minimal_versions=direct_minimal_versions,
locked_repository=locked_repository,
)
self.tracked_names = set(tracked_names)
def iter_reuse_candidates(self, identifier: str, requirement: Requirement | None) -> Iterable[Candidate]:
bare_name = strip_extras(identifier)[0]
if bare_name in self.tracked_names or identifier not in self.locked_candidates:
return []
return sorted(self.locked_candidates[identifier], key=lambda c: c.version or "", reverse=True)
def get_reuse_candidate(self, identifier: str, requirement: Requirement | None) -> Candidate | None:
deprecation_warning(
"The get_reuse_candidate method is deprecated, use iter_reuse_candidates instead.", stacklevel=2
)
return next(iter(self.iter_reuse_candidates(identifier, requirement)), None)
def find_matches(
self,
identifier: str,
requirements: Mapping[str, Iterator[Requirement]],
incompatibilities: Mapping[str, Iterator[Candidate]],
) -> Callable[[], Iterator[Candidate]]:
super_find = super().find_matches(identifier, requirements, incompatibilities)
def matches_gen() -> Iterator[Candidate]:
requested_req = next(filter(lambda r: r.is_named, requirements[identifier]), None)
for pin in self.iter_reuse_candidates(identifier, requested_req):
if identifier not in self.overrides and pin.req.is_named:
pin = pin.copy_with(min(requirements[identifier], key=self.requirement_preference))
incompat = list(incompatibilities[identifier])
pin._preferred = True # type: ignore[attr-defined]
if pin not in incompat and all(self.is_satisfied_by(r, pin) for r in requirements[identifier]):
yield pin
yield from super_find()
return matches_gen
def _get_dependencies_from_repository(self, candidate: Candidate) -> tuple[list[Requirement], PySpecSet, str]:
is_stable_metadata = candidate.req.is_named or (
isinstance(candidate.req, VcsRequirement) and candidate.req.revision
)
if self.locked_repository is not None and is_stable_metadata:
try:
return self.locked_repository.get_dependencies(candidate)
except CandidateNotFound:
pass
return super()._get_dependencies_from_repository(candidate)
@register_provider("eager")
| ReusePinProvider |
python | pytorch__pytorch | torch/_dynamo/source.py | {
"start": 14419,
"end": 14833
} | class ____(ChainedSource):
member: str = "grad"
def reconstruct(self, codegen: "PyCodegen") -> None:
codegen(self.base)
codegen.extend_output(codegen.create_load_attrs(self.member))
def guard_source(self) -> GuardSource:
return self.base.guard_source()
def name(self) -> str:
return f"{self.base.name()}.{self.member}"
@dataclasses.dataclass(frozen=True)
| GradSource |
python | viewflow__viewflow | tests/json/test_json__char.py | {
"start": 95,
"end": 302
} | class ____(models.Model):
data = models.JSONField(default=dict)
char_field = jsonstore.CharField(max_length=250, blank=True)
required_char_field = jsonstore.CharField(max_length=250)
| CharFieldModel |
python | scikit-learn__scikit-learn | sklearn/tree/_export.py | {
"start": 20397,
"end": 40933
} | class ____(_BaseTreeExporter):
def __init__(
self,
max_depth=None,
feature_names=None,
class_names=None,
label="all",
filled=False,
impurity=True,
node_ids=False,
proportion=False,
rounded=False,
precision=3,
fontsize=None,
):
super().__init__(
max_depth=max_depth,
feature_names=feature_names,
class_names=class_names,
label=label,
filled=filled,
impurity=impurity,
node_ids=node_ids,
proportion=proportion,
rounded=rounded,
precision=precision,
)
self.fontsize = fontsize
# The depth of each node for plotting with 'leaf' option
self.ranks = {"leaves": []}
# The colors to render each node with
self.colors = {"bounds": None}
self.characters = ["#", "[", "]", "<=", "\n", "", ""]
self.bbox_args = dict()
if self.rounded:
self.bbox_args["boxstyle"] = "round"
self.arrow_args = dict(arrowstyle="<-")
def _make_tree(self, node_id, et, criterion, depth=0):
# traverses _tree.Tree recursively, builds intermediate
# "_reingold_tilford.Tree" object
name = self.node_to_str(et, node_id, criterion=criterion)
if et.children_left[node_id] != _tree.TREE_LEAF and (
self.max_depth is None or depth <= self.max_depth
):
children = [
self._make_tree(
et.children_left[node_id], et, criterion, depth=depth + 1
),
self._make_tree(
et.children_right[node_id], et, criterion, depth=depth + 1
),
]
else:
return Tree(name, node_id)
return Tree(name, node_id, *children)
def export(self, decision_tree, ax=None):
import matplotlib.pyplot as plt
from matplotlib.text import Annotation
if ax is None:
ax = plt.gca()
ax.clear()
ax.set_axis_off()
my_tree = self._make_tree(0, decision_tree.tree_, decision_tree.criterion)
draw_tree = buchheim(my_tree)
# important to make sure we're still
# inside the axis after drawing the box
# this makes sense because the width of a box
# is about the same as the distance between boxes
max_x, max_y = draw_tree.max_extents() + 1
ax_width = ax.get_window_extent().width
ax_height = ax.get_window_extent().height
scale_x = ax_width / max_x
scale_y = ax_height / max_y
self.recurse(draw_tree, decision_tree.tree_, ax, max_x, max_y)
anns = [ann for ann in ax.get_children() if isinstance(ann, Annotation)]
# update sizes of all bboxes
renderer = ax.figure.canvas.get_renderer()
for ann in anns:
ann.update_bbox_position_size(renderer)
if self.fontsize is None:
# get figure to data transform
# adjust fontsize to avoid overlap
# get max box width and height
extents = [
bbox_patch.get_window_extent()
for ann in anns
if (bbox_patch := ann.get_bbox_patch()) is not None
]
max_width = max([extent.width for extent in extents])
max_height = max([extent.height for extent in extents])
# width should be around scale_x in axis coordinates
size = anns[0].get_fontsize() * min(
scale_x / max_width, scale_y / max_height
)
for ann in anns:
ann.set_fontsize(size)
return anns
def recurse(self, node, tree, ax, max_x, max_y, depth=0):
import matplotlib.pyplot as plt
# kwargs for annotations without a bounding box
common_kwargs = dict(
zorder=100 - 10 * depth,
xycoords="axes fraction",
)
if self.fontsize is not None:
common_kwargs["fontsize"] = self.fontsize
# kwargs for annotations with a bounding box
kwargs = dict(
ha="center",
va="center",
bbox=self.bbox_args.copy(),
arrowprops=self.arrow_args.copy(),
**common_kwargs,
)
kwargs["arrowprops"]["edgecolor"] = plt.rcParams["text.color"]
# offset things by .5 to center them in plot
xy = ((node.x + 0.5) / max_x, (max_y - node.y - 0.5) / max_y)
if self.max_depth is None or depth <= self.max_depth:
if self.filled:
kwargs["bbox"]["fc"] = self.get_fill_color(tree, node.tree.node_id)
else:
kwargs["bbox"]["fc"] = ax.get_facecolor()
if node.parent is None:
# root
ax.annotate(node.tree.label, xy, **kwargs)
else:
xy_parent = (
(node.parent.x + 0.5) / max_x,
(max_y - node.parent.y - 0.5) / max_y,
)
ax.annotate(node.tree.label, xy_parent, xy, **kwargs)
# Draw True/False labels if parent is root node
if node.parent.parent is None:
# Adjust the position for the text to be slightly above the arrow
text_pos = (
(xy_parent[0] + xy[0]) / 2,
(xy_parent[1] + xy[1]) / 2,
)
# Annotate the arrow with the edge label to indicate the child
# where the sample-split condition is satisfied
if node.parent.left() == node:
label_text, label_ha = ("True ", "right")
else:
label_text, label_ha = (" False", "left")
ax.annotate(label_text, text_pos, ha=label_ha, **common_kwargs)
for child in node.children:
self.recurse(child, tree, ax, max_x, max_y, depth=depth + 1)
else:
xy_parent = (
(node.parent.x + 0.5) / max_x,
(max_y - node.parent.y - 0.5) / max_y,
)
kwargs["bbox"]["fc"] = "grey"
ax.annotate("\n (...) \n", xy_parent, xy, **kwargs)
@validate_params(
{
"decision_tree": "no_validation",
"out_file": [str, None, HasMethods("write")],
"max_depth": [Interval(Integral, 0, None, closed="left"), None],
"feature_names": ["array-like", None],
"class_names": ["array-like", "boolean", None],
"label": [StrOptions({"all", "root", "none"})],
"filled": ["boolean"],
"leaves_parallel": ["boolean"],
"impurity": ["boolean"],
"node_ids": ["boolean"],
"proportion": ["boolean"],
"rotate": ["boolean"],
"rounded": ["boolean"],
"special_characters": ["boolean"],
"precision": [Interval(Integral, 0, None, closed="left"), None],
"fontname": [str],
},
prefer_skip_nested_validation=True,
)
def export_graphviz(
decision_tree,
out_file=None,
*,
max_depth=None,
feature_names=None,
class_names=None,
label="all",
filled=False,
leaves_parallel=False,
impurity=True,
node_ids=False,
proportion=False,
rotate=False,
rounded=False,
special_characters=False,
precision=3,
fontname="helvetica",
):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : object
The decision tree estimator to be exported to GraphViz.
out_file : object or str, default=None
Handle or name of the output file. If ``None``, the result is
returned as a string.
.. versionchanged:: 0.20
Default of out_file changed from "tree.dot" to None.
max_depth : int, default=None
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : array-like of shape (n_features,), default=None
An array containing the feature names.
If None, generic names will be used ("x[0]", "x[1]", ...).
class_names : array-like of shape (n_classes,) or bool, default=None
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, default='all'
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, default=False
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, default=False
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, default=True
When set to ``True``, show the impurity at each node.
node_ids : bool, default=False
When set to ``True``, show the ID number on each node.
proportion : bool, default=False
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, default=False
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, default=False
When set to ``True``, draw node boxes with rounded corners.
special_characters : bool, default=False
When set to ``False``, ignore special characters for PostScript
compatibility.
precision : int, default=3
Number of digits of precision for floating point in the values of
impurity, threshold and value attributes of each node.
fontname : str, default='helvetica'
Name of font used to render text.
Returns
-------
dot_data : str
String representation of the input tree in GraphViz dot format.
Only returned if ``out_file`` is None.
.. versionadded:: 0.18
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf)
'digraph Tree {...
"""
if feature_names is not None:
if any((not isinstance(name, str) for name in feature_names)):
raise ValueError("All feature names must be strings.")
feature_names = check_array(
feature_names, ensure_2d=False, dtype=None, ensure_min_samples=0
)
if class_names is not None and not isinstance(class_names, bool):
class_names = check_array(
class_names, ensure_2d=False, dtype=None, ensure_min_samples=0
)
check_is_fitted(decision_tree)
own_file = False
return_string = False
try:
if isinstance(out_file, str):
out_file = open(out_file, "w", encoding="utf-8")
own_file = True
if out_file is None:
return_string = True
out_file = StringIO()
exporter = _DOTTreeExporter(
out_file=out_file,
max_depth=max_depth,
feature_names=feature_names,
class_names=class_names,
label=label,
filled=filled,
leaves_parallel=leaves_parallel,
impurity=impurity,
node_ids=node_ids,
proportion=proportion,
rotate=rotate,
rounded=rounded,
special_characters=special_characters,
precision=precision,
fontname=fontname,
)
exporter.export(decision_tree)
if return_string:
return exporter.out_file.getvalue()
finally:
if own_file:
out_file.close()
def _compute_depth(tree, node):
"""
Returns the depth of the subtree rooted in node.
"""
def compute_depth_(
current_node, current_depth, children_left, children_right, depths
):
depths += [current_depth]
left = children_left[current_node]
right = children_right[current_node]
if left != -1 and right != -1:
compute_depth_(
left, current_depth + 1, children_left, children_right, depths
)
compute_depth_(
right, current_depth + 1, children_left, children_right, depths
)
depths = []
compute_depth_(node, 1, tree.children_left, tree.children_right, depths)
return max(depths)
@validate_params(
{
"decision_tree": [DecisionTreeClassifier, DecisionTreeRegressor],
"feature_names": ["array-like", None],
"class_names": ["array-like", None],
"max_depth": [Interval(Integral, 0, None, closed="left"), None],
"spacing": [Interval(Integral, 1, None, closed="left"), None],
"decimals": [Interval(Integral, 0, None, closed="left"), None],
"show_weights": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def export_text(
decision_tree,
*,
feature_names=None,
class_names=None,
max_depth=10,
spacing=3,
decimals=2,
show_weights=False,
):
"""Build a text report showing the rules of a decision tree.
Note that backwards compatibility may not be supported.
Parameters
----------
decision_tree : object
The decision tree estimator to be exported.
It can be an instance of
DecisionTreeClassifier or DecisionTreeRegressor.
feature_names : array-like of shape (n_features,), default=None
An array containing the feature names.
If None generic names will be used ("feature_0", "feature_1", ...).
class_names : array-like of shape (n_classes,), default=None
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
- if `None`, the class names are delegated to `decision_tree.classes_`;
- otherwise, `class_names` will be used as class names instead of
`decision_tree.classes_`. The length of `class_names` must match
the length of `decision_tree.classes_`.
.. versionadded:: 1.3
max_depth : int, default=10
Only the first max_depth levels of the tree are exported.
Truncated branches will be marked with "...".
spacing : int, default=3
Number of spaces between edges. The higher it is, the wider the result.
decimals : int, default=2
Number of decimal digits to display.
show_weights : bool, default=False
If true the classification weights will be exported on each leaf.
The classification weights are the number of samples each class.
Returns
-------
report : str
Text summary of all the rules in the decision tree.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.tree import DecisionTreeClassifier
>>> from sklearn.tree import export_text
>>> iris = load_iris()
>>> X = iris['data']
>>> y = iris['target']
>>> decision_tree = DecisionTreeClassifier(random_state=0, max_depth=2)
>>> decision_tree = decision_tree.fit(X, y)
>>> r = export_text(decision_tree, feature_names=iris['feature_names'])
>>> print(r)
|--- petal width (cm) <= 0.80
| |--- class: 0
|--- petal width (cm) > 0.80
| |--- petal width (cm) <= 1.75
| | |--- class: 1
| |--- petal width (cm) > 1.75
| | |--- class: 2
"""
if feature_names is not None:
feature_names = check_array(
feature_names, ensure_2d=False, dtype=None, ensure_min_samples=0
)
if class_names is not None:
class_names = check_array(
class_names, ensure_2d=False, dtype=None, ensure_min_samples=0
)
check_is_fitted(decision_tree)
tree_ = decision_tree.tree_
if is_classifier(decision_tree):
if class_names is None:
class_names = decision_tree.classes_
elif len(class_names) != len(decision_tree.classes_):
raise ValueError(
"When `class_names` is an array, it should contain as"
" many items as `decision_tree.classes_`. Got"
f" {len(class_names)} while the tree was fitted with"
f" {len(decision_tree.classes_)} classes."
)
right_child_fmt = "{} {} <= {}\n"
left_child_fmt = "{} {} > {}\n"
truncation_fmt = "{} {}\n"
if feature_names is not None and len(feature_names) != tree_.n_features:
raise ValueError(
"feature_names must contain %d elements, got %d"
% (tree_.n_features, len(feature_names))
)
if isinstance(decision_tree, DecisionTreeClassifier):
value_fmt = "{}{} weights: {}\n"
if not show_weights:
value_fmt = "{}{}{}\n"
else:
value_fmt = "{}{} value: {}\n"
if feature_names is not None:
feature_names_ = [
feature_names[i] if i != _tree.TREE_UNDEFINED else None
for i in tree_.feature
]
else:
feature_names_ = ["feature_{}".format(i) for i in tree_.feature]
report = StringIO()
def _add_leaf(value, weighted_n_node_samples, class_name, indent):
val = ""
if isinstance(decision_tree, DecisionTreeClassifier):
if show_weights:
val = [
"{1:.{0}f}, ".format(decimals, v * weighted_n_node_samples)
for v in value
]
val = "[" + "".join(val)[:-2] + "]"
weighted_n_node_samples
val += " class: " + str(class_name)
else:
val = ["{1:.{0}f}, ".format(decimals, v) for v in value]
val = "[" + "".join(val)[:-2] + "]"
report.write(value_fmt.format(indent, "", val))
def print_tree_recurse(report, node, depth):
indent = ("|" + (" " * spacing)) * depth
indent = indent[:-spacing] + "-" * spacing
value = None
if tree_.n_outputs == 1:
value = tree_.value[node][0]
else:
value = tree_.value[node].T[0]
class_name = np.argmax(value)
if tree_.n_classes[0] != 1 and tree_.n_outputs == 1:
class_name = class_names[class_name]
weighted_n_node_samples = tree_.weighted_n_node_samples[node]
if depth <= max_depth + 1:
info_fmt = ""
info_fmt_left = info_fmt
info_fmt_right = info_fmt
if tree_.feature[node] != _tree.TREE_UNDEFINED:
name = feature_names_[node]
threshold = tree_.threshold[node]
threshold = "{1:.{0}f}".format(decimals, threshold)
report.write(right_child_fmt.format(indent, name, threshold))
report.write(info_fmt_left)
print_tree_recurse(report, tree_.children_left[node], depth + 1)
report.write(left_child_fmt.format(indent, name, threshold))
report.write(info_fmt_right)
print_tree_recurse(report, tree_.children_right[node], depth + 1)
else: # leaf
_add_leaf(value, weighted_n_node_samples, class_name, indent)
else:
subtree_depth = _compute_depth(tree_, node)
if subtree_depth == 1:
_add_leaf(value, weighted_n_node_samples, class_name, indent)
else:
trunc_report = "truncated branch of depth %d" % subtree_depth
report.write(truncation_fmt.format(indent, trunc_report))
print_tree_recurse(report, 0, 1)
return report.getvalue()
| _MPLTreeExporter |
python | ray-project__ray | python/ray/data/_internal/datasource/mongo_datasource.py | {
"start": 270,
"end": 4780
} | class ____(Datasource):
"""Datasource for reading from and writing to MongoDB."""
def __init__(
self,
uri: str,
database: str,
collection: str,
pipeline: Optional[List[Dict]] = None,
schema: Optional["pymongoarrow.api.Schema"] = None,
**mongo_args,
):
self._uri = uri
self._database = database
self._collection = collection
self._pipeline = pipeline
self._schema = schema
self._mongo_args = mongo_args
# If pipeline is unspecified, read the entire collection.
if not pipeline:
self._pipeline = [{"$match": {"_id": {"$exists": "true"}}}]
# Initialize Mongo client lazily later when creating read tasks.
self._client = None
def estimate_inmemory_data_size(self) -> Optional[int]:
# TODO(jian): Add memory size estimation to improve auto-tune of parallelism.
return None
def _get_match_query(self, pipeline: List[Dict]) -> Dict:
if len(pipeline) == 0 or "$match" not in pipeline[0]:
return {}
return pipeline[0]["$match"]
def _get_or_create_client(self):
import pymongo
if self._client is None:
self._client = pymongo.MongoClient(self._uri)
_validate_database_collection_exist(
self._client, self._database, self._collection
)
self._avg_obj_size = self._client[self._database].command(
"collStats", self._collection
)["avgObjSize"]
def get_read_tasks(
self, parallelism: int, per_task_row_limit: Optional[int] = None
) -> List[ReadTask]:
from bson.objectid import ObjectId
self._get_or_create_client()
coll = self._client[self._database][self._collection]
match_query = self._get_match_query(self._pipeline)
partitions_ids = list(
coll.aggregate(
[
{"$match": match_query},
{"$bucketAuto": {"groupBy": "$_id", "buckets": parallelism}},
],
allowDiskUse=True,
)
)
def make_block(
uri: str,
database: str,
collection: str,
pipeline: List[Dict],
min_id: ObjectId,
max_id: ObjectId,
right_closed: bool,
schema: "pymongoarrow.api.Schema",
kwargs: dict,
) -> Block:
import pymongo
from pymongoarrow.api import aggregate_arrow_all
# A range query over the partition.
match = [
{
"$match": {
"_id": {
"$gte": min_id,
"$lte" if right_closed else "$lt": max_id,
}
}
}
]
client = pymongo.MongoClient(uri)
return aggregate_arrow_all(
client[database][collection], match + pipeline, schema=schema, **kwargs
)
read_tasks: List[ReadTask] = []
for i, partition in enumerate(partitions_ids):
metadata = BlockMetadata(
num_rows=partition["count"],
size_bytes=partition["count"] * self._avg_obj_size,
input_files=None,
exec_stats=None,
)
make_block_args = (
self._uri,
self._database,
self._collection,
self._pipeline,
partition["_id"]["min"],
partition["_id"]["max"],
i == len(partitions_ids) - 1,
self._schema,
self._mongo_args,
)
read_task = ReadTask(
lambda args=make_block_args: [make_block(*args)],
metadata,
per_task_row_limit=per_task_row_limit,
)
read_tasks.append(read_task)
return read_tasks
def _validate_database_collection_exist(client, database: str, collection: str):
db_names = client.list_database_names()
if database not in db_names:
raise ValueError(f"The destination database {database} doesn't exist.")
collection_names = client[database].list_collection_names()
if collection not in collection_names:
raise ValueError(f"The destination collection {collection} doesn't exist.")
| MongoDatasource |
python | matplotlib__matplotlib | galleries/examples/text_labels_and_annotations/angle_annotation.py | {
"start": 3139,
"end": 13120
} | class ____(Arc):
"""
Draws an arc between two vectors which appears circular in display space.
"""
def __init__(self, xy, p1, p2, size=75, unit="points", ax=None,
text="", textposition="inside", text_kw=None, **kwargs):
"""
Parameters
----------
xy, p1, p2 : tuple or array of two floats
Center position and two points. Angle annotation is drawn between
the two vectors connecting *p1* and *p2* with *xy*, respectively.
Units are data coordinates.
size : float
Diameter of the angle annotation in units specified by *unit*.
unit : str
One of the following strings to specify the unit of *size*:
* "pixels": pixels
* "points": points, use points instead of pixels to not have a
dependence on the DPI
* "axes width", "axes height": relative units of Axes width, height
* "axes min", "axes max": minimum or maximum of relative Axes
width, height
ax : `matplotlib.axes.Axes`
The Axes to add the angle annotation to.
text : str
The text to mark the angle with.
textposition : {"inside", "outside", "edge"}
Whether to show the text in- or outside the arc. "edge" can be used
for custom positions anchored at the arc's edge.
text_kw : dict
Dictionary of arguments passed to the Annotation.
**kwargs
Further parameters are passed to `matplotlib.patches.Arc`. Use this
to specify, color, linewidth etc. of the arc.
"""
self.ax = ax or plt.gca()
self._xydata = xy # in data coordinates
self.vec1 = p1
self.vec2 = p2
self.size = size
self.unit = unit
self.textposition = textposition
super().__init__(self._xydata, size, size, angle=0.0,
theta1=self.theta1, theta2=self.theta2, **kwargs)
self.set_transform(IdentityTransform())
self.ax.add_patch(self)
self.kw = dict(ha="center", va="center",
xycoords=IdentityTransform(),
xytext=(0, 0), textcoords="offset points",
annotation_clip=True)
self.kw.update(text_kw or {})
self.text = ax.annotate(text, xy=self._center, **self.kw)
def get_size(self):
factor = 1.
if self.unit == "points":
factor = self.ax.figure.dpi / 72.
elif self.unit[:4] == "axes":
b = TransformedBbox(Bbox.unit(), self.ax.transAxes)
dic = {"max": max(b.width, b.height),
"min": min(b.width, b.height),
"width": b.width, "height": b.height}
factor = dic[self.unit[5:]]
return self.size * factor
def set_size(self, size):
self.size = size
def get_center_in_pixels(self):
"""return center in pixels"""
return self.ax.transData.transform(self._xydata)
def set_center(self, xy):
"""set center in data coordinates"""
self._xydata = xy
def get_theta(self, vec):
vec_in_pixels = self.ax.transData.transform(vec) - self._center
return np.rad2deg(np.arctan2(vec_in_pixels[1], vec_in_pixels[0]))
def get_theta1(self):
return self.get_theta(self.vec1)
def get_theta2(self):
return self.get_theta(self.vec2)
def set_theta(self, angle):
pass
# Redefine attributes of the Arc to always give values in pixel space
_center = property(get_center_in_pixels, set_center)
theta1 = property(get_theta1, set_theta)
theta2 = property(get_theta2, set_theta)
width = property(get_size, set_size)
height = property(get_size, set_size)
# The following two methods are needed to update the text position.
def draw(self, renderer):
self.update_text()
super().draw(renderer)
def update_text(self):
c = self._center
s = self.get_size()
angle_span = (self.theta2 - self.theta1) % 360
angle = np.deg2rad(self.theta1 + angle_span / 2)
r = s / 2
if self.textposition == "inside":
r = s / np.interp(angle_span, [60, 90, 135, 180],
[3.3, 3.5, 3.8, 4])
self.text.xy = c + r * np.array([np.cos(angle), np.sin(angle)])
if self.textposition == "outside":
def R90(a, r, w, h):
if a < np.arctan(h/2/(r+w/2)):
return np.sqrt((r+w/2)**2 + (np.tan(a)*(r+w/2))**2)
else:
c = np.sqrt((w/2)**2+(h/2)**2)
T = np.arcsin(c * np.cos(np.pi/2 - a + np.arcsin(h/2/c))/r)
xy = r * np.array([np.cos(a + T), np.sin(a + T)])
xy += np.array([w/2, h/2])
return np.sqrt(np.sum(xy**2))
def R(a, r, w, h):
aa = (a % (np.pi/4))*((a % (np.pi/2)) <= np.pi/4) + \
(np.pi/4 - (a % (np.pi/4)))*((a % (np.pi/2)) >= np.pi/4)
return R90(aa, r, *[w, h][::int(np.sign(np.cos(2*a)))])
bbox = self.text.get_window_extent()
X = R(angle, r, bbox.width, bbox.height)
trans = self.ax.figure.dpi_scale_trans.inverted()
offs = trans.transform(((X-s/2), 0))[0] * 72
self.text.set_position([offs*np.cos(angle), offs*np.sin(angle)])
# %%
# .. _angle-annotation-usage:
#
# Usage
# -----
#
# Required arguments to ``AngleAnnotation`` are the center of the arc, *xy*,
# and two points, such that the arc spans between the two vectors connecting
# *p1* and *p2* with *xy*, respectively. Those are given in data coordinates.
# Further arguments are the *size* of the arc and its *unit*. Additionally, a
# *text* can be specified, that will be drawn either in- or outside of the arc,
# according to the value of *textposition*. Usage of those arguments is shown
# below.
fig, ax = plt.subplots()
fig.canvas.draw() # Need to draw the figure to define renderer
ax.set_title("AngleLabel example")
# Plot two crossing lines and label each angle between them with the above
# ``AngleAnnotation`` tool.
center = (4.5, 650)
p1 = [(2.5, 710), (6.0, 605)]
p2 = [(3.0, 275), (5.5, 900)]
line1, = ax.plot(*zip(*p1))
line2, = ax.plot(*zip(*p2))
point, = ax.plot(*center, marker="o")
am1 = AngleAnnotation(center, p1[1], p2[1], ax=ax, size=75, text=r"$\alpha$")
am2 = AngleAnnotation(center, p2[1], p1[0], ax=ax, size=35, text=r"$\beta$")
am3 = AngleAnnotation(center, p1[0], p2[0], ax=ax, size=75, text=r"$\gamma$")
am4 = AngleAnnotation(center, p2[0], p1[1], ax=ax, size=35, text=r"$\theta$")
# Showcase some styling options for the angle arc, as well as the text.
p = [(6.0, 400), (5.3, 410), (5.6, 300)]
ax.plot(*zip(*p))
am5 = AngleAnnotation(p[1], p[0], p[2], ax=ax, size=40, text=r"$\Phi$",
linestyle="--", color="gray", textposition="outside",
text_kw=dict(fontsize=16, color="gray"))
# %%
# ``AngleLabel`` options
# ----------------------
#
# The *textposition* and *unit* keyword arguments may be used to modify the
# location of the text label, as shown below:
# Helper function to draw angle easily.
def plot_angle(ax, pos, angle, length=0.95, acol="C0", **kwargs):
vec2 = np.array([np.cos(np.deg2rad(angle)), np.sin(np.deg2rad(angle))])
xy = np.c_[[length, 0], [0, 0], vec2*length].T + np.array(pos)
ax.plot(*xy.T, color=acol)
return AngleAnnotation(pos, xy[0], xy[2], ax=ax, **kwargs)
fig, (ax1, ax2) = plt.subplots(nrows=2, sharex=True)
fig.suptitle("AngleLabel keyword arguments")
fig.canvas.draw() # Need to draw the figure to define renderer
# Showcase different text positions.
ax1.margins(y=0.4)
ax1.set_title("textposition")
kw = dict(size=75, unit="points", text=r"$60°$")
am6 = plot_angle(ax1, (2.0, 0), 60, textposition="inside", **kw)
am7 = plot_angle(ax1, (3.5, 0), 60, textposition="outside", **kw)
am8 = plot_angle(ax1, (5.0, 0), 60, textposition="edge",
text_kw=dict(bbox=dict(boxstyle="round", fc="w")), **kw)
am9 = plot_angle(ax1, (6.5, 0), 60, textposition="edge",
text_kw=dict(xytext=(30, 20), arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=-0.2")), **kw)
for x, text in zip([2.0, 3.5, 5.0, 6.5], ['"inside"', '"outside"', '"edge"',
'"edge", custom arrow']):
ax1.annotate(text, xy=(x, 0), xycoords=ax1.get_xaxis_transform(),
bbox=dict(boxstyle="round", fc="w"), ha="left", fontsize=8,
annotation_clip=True)
# Showcase different size units. The effect of this can best be observed
# by interactively changing the figure size
ax2.margins(y=0.4)
ax2.set_title("unit")
kw = dict(text=r"$60°$", textposition="outside")
am10 = plot_angle(ax2, (2.0, 0), 60, size=50, unit="pixels", **kw)
am11 = plot_angle(ax2, (3.5, 0), 60, size=50, unit="points", **kw)
am12 = plot_angle(ax2, (5.0, 0), 60, size=0.25, unit="axes min", **kw)
am13 = plot_angle(ax2, (6.5, 0), 60, size=0.25, unit="axes max", **kw)
for x, text in zip([2.0, 3.5, 5.0, 6.5], ['"pixels"', '"points"',
'"axes min"', '"axes max"']):
ax2.annotate(text, xy=(x, 0), xycoords=ax2.get_xaxis_transform(),
bbox=dict(boxstyle="round", fc="w"), ha="left", fontsize=8,
annotation_clip=True)
plt.show()
# %%
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.patches.Arc`
# - `matplotlib.axes.Axes.annotate` / `matplotlib.pyplot.annotate`
# - `matplotlib.text.Annotation`
# - `matplotlib.transforms.IdentityTransform`
# - `matplotlib.transforms.TransformedBbox`
# - `matplotlib.transforms.Bbox`
| AngleAnnotation |
python | numba__numba | numba/core/callconv.py | {
"start": 36473,
"end": 36635
} | class ____(ErrorModel):
"""
The Python error model. Any invalid FP input raises an exception.
"""
raise_on_fp_zero_division = True
| PythonErrorModel |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/compiler.py | {
"start": 11463,
"end": 13725
} | class ____(NamedTuple):
"""represents state to use when producing "expanded" and
"post compile" bound parameters for a statement.
"expanded" parameters are parameters that are generated at
statement execution time to suit a number of parameters passed, the most
prominent example being the individual elements inside of an IN expression.
"post compile" parameters are parameters where the SQL literal value
will be rendered into the SQL statement at execution time, rather than
being passed as separate parameters to the driver.
To create an :class:`.ExpandedState` instance, use the
:meth:`.SQLCompiler.construct_expanded_state` method on any
:class:`.SQLCompiler` instance.
"""
statement: str
"""String SQL statement with parameters fully expanded"""
parameters: _CoreSingleExecuteParams
"""Parameter dictionary with parameters fully expanded.
For a statement that uses named parameters, this dictionary will map
exactly to the names in the statement. For a statement that uses
positional parameters, the :attr:`.ExpandedState.positional_parameters`
will yield a tuple with the positional parameter set.
"""
processors: Mapping[str, _BindProcessorType[Any]]
"""mapping of bound value processors"""
positiontup: Optional[Sequence[str]]
"""Sequence of string names indicating the order of positional
parameters"""
parameter_expansion: Mapping[str, List[str]]
"""Mapping representing the intermediary link from original parameter
name to list of "expanded" parameter names, for those parameters that
were expanded."""
@property
def positional_parameters(self) -> Tuple[Any, ...]:
"""Tuple of positional parameters, for statements that were compiled
using a positional paramstyle.
"""
if self.positiontup is None:
raise exc.InvalidRequestError(
"statement does not use a positional paramstyle"
)
return tuple(self.parameters[key] for key in self.positiontup)
@property
def additional_parameters(self) -> _CoreSingleExecuteParams:
"""synonym for :attr:`.ExpandedState.parameters`."""
return self.parameters
| ExpandedState |
python | getsentry__sentry | src/sentry/services/organization/model.py | {
"start": 256,
"end": 487
} | class ____(pydantic.BaseModel):
sentry_options: Any | None = None # Placeholder for any sentry post-provisioning data
getsentry_options: Any | None = None # Reserved for getsentry post-provisioning data
| PostProvisionOptions |
python | bokeh__bokeh | src/bokeh/core/property/singletons.py | {
"start": 1837,
"end": 2479
} | class ____:
""" Indicates usage of the intrinsic default value of a property. """
def __copy__(self) -> IntrinsicType:
return self
def __str__(self) -> str:
return "Intrinsic"
def __repr__(self) -> str:
return "Intrinsic"
Intrinsic = IntrinsicType()
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| IntrinsicType |
python | kamyu104__LeetCode-Solutions | Python/minimum-operations-to-make-array-equal-to-target.py | {
"start": 46,
"end": 424
} | class ____(object):
def minimumOperations(self, nums, target):
"""
:type nums: List[int]
:type target: List[int]
:rtype: int
"""
for i in xrange(len(target)):
target[i] -= nums[i]
return sum(max((target[i] if i < len(target) else 0)-(target[i-1] if i-1 >= 0 else 0), 0) for i in xrange(len(target)+1))
| Solution |
python | ansible__ansible | lib/ansible/module_utils/facts/hardware/freebsd.py | {
"start": 978,
"end": 9910
} | class ____(Hardware):
"""
FreeBSD-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- devices
- uptime_seconds
"""
platform = 'FreeBSD'
DMESG_BOOT = '/var/run/dmesg.boot'
def populate(self, collected_facts=None):
hardware_facts = {}
cpu_facts = self.get_cpu_facts()
memory_facts = self.get_memory_facts()
uptime_facts = self.get_uptime_facts()
dmi_facts = self.get_dmi_facts()
device_facts = self.get_device_facts()
mount_facts = {}
try:
mount_facts = self.get_mount_facts()
except TimeoutError:
pass
hardware_facts.update(cpu_facts)
hardware_facts.update(memory_facts)
hardware_facts.update(uptime_facts)
hardware_facts.update(dmi_facts)
hardware_facts.update(device_facts)
hardware_facts.update(mount_facts)
return hardware_facts
def get_cpu_facts(self):
cpu_facts = {}
cpu_facts['processor'] = []
sysctl = self.module.get_bin_path('sysctl')
if sysctl:
rc, out, err = self.module.run_command("%s -n hw.ncpu" % sysctl, check_rc=False)
cpu_facts['processor_count'] = out.strip()
dmesg_boot = get_file_content(FreeBSDHardware.DMESG_BOOT)
if not dmesg_boot:
try:
rc, dmesg_boot, err = self.module.run_command(self.module.get_bin_path("dmesg"), check_rc=False)
except Exception:
dmesg_boot = ''
for line in dmesg_boot.splitlines():
if 'CPU:' in line:
cpu = re.sub(r'CPU:\s+', r"", line)
cpu_facts['processor'].append(cpu.strip())
if 'Logical CPUs per core' in line:
cpu_facts['processor_cores'] = line.split()[4]
return cpu_facts
def get_memory_facts(self):
memory_facts = {}
sysctl = self.module.get_bin_path('sysctl')
if sysctl:
rc, out, err = self.module.run_command("%s vm.stats" % sysctl, check_rc=False)
for line in out.splitlines():
data = line.split()
if 'vm.stats.vm.v_page_size' in line:
pagesize = int(data[1])
if 'vm.stats.vm.v_page_count' in line:
pagecount = int(data[1])
if 'vm.stats.vm.v_free_count' in line:
freecount = int(data[1])
memory_facts['memtotal_mb'] = pagesize * pagecount // 1024 // 1024
memory_facts['memfree_mb'] = pagesize * freecount // 1024 // 1024
swapinfo = self.module.get_bin_path('swapinfo')
if swapinfo:
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = self.module.run_command("%s -k" % swapinfo)
lines = out.splitlines()
if len(lines[-1]) == 0:
lines.pop()
data = lines[-1].split()
if data[0] != 'Device':
memory_facts['swaptotal_mb'] = int(data[1]) // 1024
memory_facts['swapfree_mb'] = int(data[3]) // 1024
return memory_facts
def get_uptime_facts(self):
# On FreeBSD, the default format is annoying to parse.
# Use -b to get the raw value and decode it.
sysctl_cmd = self.module.get_bin_path('sysctl')
cmd = [sysctl_cmd, '-b', 'kern.boottime']
# We need to get raw bytes, not UTF-8.
rc, out, err = self.module.run_command(cmd, encoding=None)
# kern.boottime returns seconds and microseconds as two 64-bits
# fields, but we are only interested in the first field.
struct_format = '@L'
struct_size = struct.calcsize(struct_format)
if rc != 0 or len(out) < struct_size:
return {}
(kern_boottime, ) = struct.unpack(struct_format, out[:struct_size])
return {
'uptime_seconds': int(time.time() - kern_boottime),
}
@timeout()
def get_mount_facts(self):
mount_facts = {}
mount_facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.splitlines():
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+', ' ', line).split()
mount_statvfs_info = get_mount_size(fields[1])
mount_info = {'mount': fields[1],
'device': fields[0],
'fstype': fields[2],
'options': fields[3]}
mount_info.update(mount_statvfs_info)
mount_facts['mounts'].append(mount_info)
return mount_facts
def get_device_facts(self):
device_facts = {}
sysdir = '/dev'
device_facts['devices'] = {}
# TODO: rc, disks, err = self.module.run_command("/sbin/sysctl kern.disks")
drives = re.compile(
r"""(?x)(
(?:
ada? # ATA/SATA disk device
|da # SCSI disk device
|a?cd # SCSI CDROM drive
|amrd # AMI MegaRAID drive
|idad # Compaq RAID array
|ipsd # IBM ServeRAID RAID array
|md # md(4) disk device
|mfid # LSI MegaRAID SAS array
|mlxd # Mylex RAID disk
|twed # 3ware ATA RAID array
|vtbd # VirtIO Block Device
)\d+
)
"""
)
slices = re.compile(
r"""(?x)(
(?:
ada? # ATA/SATA disk device
|a?cd # SCSI CDROM drive
|amrd # AMI MegaRAID drive
|da # SCSI disk device
|idad # Compaq RAID array
|ipsd # IBM ServeRAID RAID array
|md # md(4) disk device
|mfid # LSI MegaRAID SAS array
|mlxd # Mylex RAID disk
|twed # 3ware ATA RAID array
|vtbd # VirtIO Block Device
)\d+[ps]\d+\w*
)
"""
)
if os.path.isdir(sysdir):
dirlist = sorted(os.listdir(sysdir))
for device in dirlist:
d = drives.match(device)
if d and d.group(1) not in device_facts['devices']:
device_facts['devices'][d.group(1)] = []
s = slices.match(device)
if s:
device_facts['devices'][d.group(1)].append(s.group(1))
return device_facts
def get_dmi_facts(self):
""" learn dmi facts from system
Use dmidecode executable if available"""
dmi_facts = {}
# Fall back to using dmidecode, if available
dmi_bin = self.module.get_bin_path('dmidecode')
DMI_DICT = {
'bios_date': 'bios-release-date',
'bios_vendor': 'bios-vendor',
'bios_version': 'bios-version',
'board_asset_tag': 'baseboard-asset-tag',
'board_name': 'baseboard-product-name',
'board_serial': 'baseboard-serial-number',
'board_vendor': 'baseboard-manufacturer',
'board_version': 'baseboard-version',
'chassis_asset_tag': 'chassis-asset-tag',
'chassis_serial': 'chassis-serial-number',
'chassis_vendor': 'chassis-manufacturer',
'chassis_version': 'chassis-version',
'form_factor': 'chassis-type',
'product_name': 'system-product-name',
'product_serial': 'system-serial-number',
'product_uuid': 'system-uuid',
'product_version': 'system-version',
'system_vendor': 'system-manufacturer',
}
if dmi_bin is None:
dmi_facts = dict.fromkeys(
DMI_DICT.keys(),
'NA'
)
return dmi_facts
for (k, v) in DMI_DICT.items():
(rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
# FIXME: why add the fact and then test if it is json?
dmi_facts[k] = ''.join([line for line in out.splitlines() if not line.startswith('#')])
try:
json.dumps(dmi_facts[k])
except UnicodeDecodeError:
dmi_facts[k] = 'NA'
else:
dmi_facts[k] = 'NA'
return dmi_facts
| FreeBSDHardware |
python | redis__redis-py | tests/test_pubsub.py | {
"start": 22332,
"end": 23503
} | class ____:
def my_handler(self, message):
self.message = ["my handler", message]
def test_push_handler(self, r):
if is_resp2_connection(r):
return
p = r.pubsub(push_handler_func=self.my_handler)
p.subscribe("foo")
assert wait_for_message(p) is None
assert self.message == ["my handler", [b"subscribe", b"foo", 1]]
assert r.publish("foo", "test message") == 1
assert wait_for_message(p) is None
assert self.message == ["my handler", [b"message", b"foo", b"test message"]]
@skip_if_server_version_lt("7.0.0")
def test_push_handler_sharded_pubsub(self, r):
if is_resp2_connection(r):
return
p = r.pubsub(push_handler_func=self.my_handler)
p.ssubscribe("foo")
assert wait_for_message(p, func=p.get_sharded_message) is None
assert self.message == ["my handler", [b"ssubscribe", b"foo", 1]]
assert r.spublish("foo", "test message") == 1
assert wait_for_message(p, func=p.get_sharded_message) is None
assert self.message == ["my handler", [b"smessage", b"foo", b"test message"]]
| TestPubSubRESP3Handler |
python | getsentry__sentry | tests/sentry/core/endpoints/test_organization_index.py | {
"start": 5828,
"end": 14491
} | class ____(OrganizationIndexTest, HybridCloudTestMixin):
method = "post"
def test_missing_params(self) -> None:
self.get_error_response(status_code=400)
def test_valid_params(self) -> None:
data = {"name": "hello world", "slug": "foobar"}
response = self.get_success_response(**data)
organization_id = response.data["id"]
org = Organization.objects.get(id=organization_id)
assert org.name == "hello world"
assert org.slug == "foobar"
team_qs = Team.objects.filter(organization_id=organization_id)
assert not team_qs.exists()
self.get_error_response(status_code=400, **data)
def test_org_ownership(self) -> None:
data = {"name": "hello world", "slug": "foobar"}
response = self.get_success_response(**data)
organization_id = response.data["id"]
org = Organization.objects.get(id=organization_id)
assert org.name == "hello world"
owners = [owner.id for owner in org.get_owners()]
assert [self.user.id] == owners
def test_with_default_team_false(self) -> None:
data = {"name": "hello world", "slug": "foobar", "defaultTeam": False}
response = self.get_success_response(**data)
organization_id = response.data["id"]
org = Organization.objects.get(id=organization_id)
assert org.name == "hello world"
assert org.slug == "foobar"
team_qs = Team.objects.filter(organization_id=organization_id)
assert not team_qs.exists()
def test_with_default_team_true(self) -> None:
data = {"name": "hello world", "slug": "foobar", "defaultTeam": True}
response = self.get_success_response(**data)
organization_id = response.data["id"]
Organization.objects.get(id=organization_id)
team = Team.objects.get(organization_id=organization_id)
assert team.name == "hello world"
org_member = OrganizationMember.objects.get(
organization_id=organization_id, user_id=self.user.id
)
OrganizationMemberTeam.objects.get(organizationmember_id=org_member.id, team_id=team.id)
@pytest.mark.skip("flaky: INFRENG-210")
def test_valid_slugs(self) -> None:
valid_slugs = ["santry", "downtown-canada", "1234-foo"]
for input_slug in valid_slugs:
self.organization.refresh_from_db()
response = self.get_success_response(name=input_slug, slug=input_slug)
org = Organization.objects.get(id=response.data["id"])
assert org.slug == input_slug.lower()
def test_invalid_slugs(self) -> None:
with self.options({"api.rate-limit.org-create": 9001}):
self.get_error_response(name="name", slug=" i have whitespace ", status_code=400)
self.get_error_response(name="name", slug="foo-bar ", status_code=400)
self.get_error_response(name="name", slug="bird-company!", status_code=400)
self.get_error_response(name="name", slug="downtown_canada", status_code=400)
self.get_error_response(name="name", slug="canada-", status_code=400)
self.get_error_response(name="name", slug="-canada", status_code=400)
self.get_error_response(name="name", slug="----", status_code=400)
self.get_error_response(name="name", slug="1234", status_code=400)
self.get_error_response(name="name", slug="I-contain-UPPERCASE", status_code=400)
def test_without_slug(self) -> None:
response = self.get_success_response(name="hello world")
organization_id = response.data["id"]
org = Organization.objects.get(id=organization_id)
assert org.slug == "hello-world"
def test_generated_slug_not_entirely_numeric(self) -> None:
response = self.get_success_response(name="1234")
organization_id = response.data["id"]
org = Organization.objects.get(id=organization_id)
assert org.slug.startswith("1234-")
assert not org.slug.isdecimal()
@patch(
"sentry.core.endpoints.organization_member_requests_join.ratelimiter.backend.is_limited",
return_value=False,
)
def test_name_slugify(self, is_limited: MagicMock) -> None:
response = self.get_success_response(name="---foo")
org = Organization.objects.get(id=response.data["id"])
assert org.slug == "foo"
org_slug_pattern = re.compile(ORG_SLUG_PATTERN)
response = self.get_success_response(name="---foo---")
org = Organization.objects.get(id=response.data["id"])
assert org.slug != "foo-"
assert org.slug.startswith("foo-")
assert org_slug_pattern.match(org.slug)
response = self.get_success_response(name="___foo___")
org = Organization.objects.get(id=response.data["id"])
assert org.slug != "foo-"
assert org.slug.startswith("foo-")
assert org_slug_pattern.match(org.slug)
response = self.get_success_response(name="foo_bar")
org = Organization.objects.get(id=response.data["id"])
assert org.slug == "foo-bar"
response = self.get_success_response(name="----")
org = Organization.objects.get(id=response.data["id"])
assert len(org.slug) > 0
assert org_slug_pattern.match(org.slug)
response = self.get_success_response(name="CaNaDa")
org = Organization.objects.get(id=response.data["id"])
assert org.slug == "canada"
assert org_slug_pattern.match(org.slug)
response = self.get_success_response(name="1234-foo")
org = Organization.objects.get(id=response.data["id"])
assert org.slug == "1234-foo"
assert org_slug_pattern.match(org.slug)
def test_required_terms_with_terms_url(self) -> None:
data: dict[str, Any] = {"name": "hello world"}
with self.settings(PRIVACY_URL=None, TERMS_URL="https://example.com/terms"):
self.get_success_response(**data)
with self.settings(TERMS_URL=None, PRIVACY_URL="https://example.com/privacy"):
self.get_success_response(**data)
with self.settings(
TERMS_URL="https://example.com/terms", PRIVACY_URL="https://example.com/privacy"
):
data = {"name": "hello world", "agreeTerms": False}
self.get_error_response(status_code=400, **data)
data = {"name": "hello world", "agreeTerms": True}
self.get_success_response(**data)
def test_organization_mapping(self) -> None:
data = {"slug": "santry", "name": "SaNtRy", "idempotencyKey": "1234"}
response = self.get_success_response(**data)
organization_id = response.data["id"]
org = Organization.objects.get(id=organization_id)
assert org.slug == data["slug"]
assert org.name == data["name"]
def test_slug_already_taken(self) -> None:
self.create_organization(slug="taken")
self.get_error_response(slug="taken", name="TaKeN", status_code=400)
def test_add_organization_member(self) -> None:
self.login_as(user=self.user)
response = self.get_success_response(name="org name")
org_member = OrganizationMember.objects.get(
organization_id=response.data["id"], user_id=self.user.id
)
self.assert_org_member_mapping(org_member=org_member)
def test_data_consent(self) -> None:
data = {"name": "hello world original", "agreeTerms": True}
response = self.get_success_response(**data)
organization_id = response.data["id"]
org = Organization.objects.get(id=organization_id)
assert org.name == data["name"]
assert not OrganizationOption.objects.get_value(org, "sentry:aggregated_data_consent")
data = {"name": "hello world", "agreeTerms": True, "aggregatedDataConsent": True}
response = self.get_success_response(**data)
organization_id = response.data["id"]
org = Organization.objects.get(id=organization_id)
assert org.name == data["name"]
assert OrganizationOption.objects.get_value(org, "sentry:aggregated_data_consent") is True
def test_streamline_only_is_true(self) -> None:
"""
All new organizations should never see the legacy UI.
"""
self.login_as(user=self.user)
response = self.get_success_response(name="acme")
organization = Organization.objects.get(id=response.data["id"])
assert OrganizationOption.objects.get_value(organization, "sentry:streamline_ui_only")
@region_silo_test(regions=create_test_regions("de", "us"))
| OrganizationsCreateTest |
python | dask__distributed | distributed/comm/ws.py | {
"start": 14737,
"end": 14903
} | class ____(BaseTCPBackend):
_connector_class = WSSConnector
_listener_class = WSSListener
backends["ws"] = WSBackend()
backends["wss"] = WSSBackend()
| WSSBackend |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/remote_representation/external_data.py | {
"start": 24022,
"end": 24206
} | class ____:
name: str
ResourceValueSnap: TypeAlias = Union[str, ResourceConfigEnvVarSnap]
UNKNOWN_RESOURCE_TYPE = "Unknown"
@whitelist_for_serdes
@record
| ResourceConfigEnvVarSnap |
python | getsentry__sentry | tests/sentry/seer/endpoints/test_group_autofix_setup_check.py | {
"start": 638,
"end": 3304
} | class ____(TestCase):
def test_missing_integration(self) -> None:
result = get_autofix_integration_setup_problems(
organization=self.organization, project=self.project
)
assert result == "integration_missing"
def test_supported_github_integration(self) -> None:
self.create_integration(
organization=self.organization,
provider=IntegrationProviderSlug.GITHUB.value,
external_id="1",
)
result = get_autofix_integration_setup_problems(
organization=self.organization, project=self.project
)
assert result is None
def test_supported_github_integration_with_disabled_status(self) -> None:
integration = self.create_integration(
organization=self.organization,
provider=IntegrationProviderSlug.GITHUB.value,
external_id="1",
)
with assume_test_silo_mode(SiloMode.CONTROL):
integration.disable()
result = get_autofix_integration_setup_problems(
organization=self.organization, project=self.project
)
assert result == "integration_missing"
def test_supported_github_enterprise_integration(self) -> None:
self.create_integration(
organization=self.organization,
provider=IntegrationProviderSlug.GITHUB_ENTERPRISE.value,
external_id="1",
)
result = get_autofix_integration_setup_problems(
organization=self.organization, project=self.project
)
assert result is None
def test_supported_github_enterprise_integration_with_disabled_status(self) -> None:
integration = self.create_integration(
organization=self.organization,
provider=IntegrationProviderSlug.GITHUB_ENTERPRISE.value,
external_id="1",
)
with assume_test_silo_mode(SiloMode.CONTROL):
integration.disable()
result = get_autofix_integration_setup_problems(
organization=self.organization, project=self.project
)
assert result == "integration_missing"
def test_unsupported_gitlab_integration(self) -> None:
self.create_integration(
organization=self.organization,
provider=IntegrationProviderSlug.GITLAB.value,
external_id="1",
)
result = get_autofix_integration_setup_problems(
organization=self.organization, project=self.project
)
assert result == "integration_missing"
@with_feature("organizations:gen-ai-features")
| GetAutofixIntegrationSetupProblemsTestCase |
python | numba__numba | numba/np/ufunc/wrappers.py | {
"start": 21816,
"end": 24350
} | class ____(object):
def __init__(self, context, builder, args, steps, i, step_offset,
typ, syms, sym_dim):
self.context = context
self.builder = builder
offset = context.get_constant(types.intp, i)
data = builder.load(builder.gep(args, [offset], name="data.ptr"),
name="data")
self.data = data
core_step_ptr = builder.gep(steps, [offset], name="core.step.ptr")
core_step = builder.load(core_step_ptr)
if isinstance(typ, types.Array):
as_scalar = not syms
# number of symbol in the shape spec should match the dimension
# of the array type.
if len(syms) != typ.ndim:
if len(syms) == 0 and typ.ndim == 1:
# This is an exception for handling scalar argument.
# The type can be 1D array for scalar.
# In the future, we may deprecate this exception.
pass
else:
raise TypeError("type and shape signature mismatch for arg "
"#{0}".format(i + 1))
ndim = typ.ndim
shape = [sym_dim[s] for s in syms]
strides = []
for j in range(ndim):
stepptr = builder.gep(steps,
[context.get_constant(types.intp,
step_offset + j)],
name="step.ptr")
step = builder.load(stepptr)
strides.append(step)
ldcls = (_ArrayAsScalarArgLoader
if as_scalar
else _ArrayArgLoader)
self._loader = ldcls(dtype=typ.dtype,
ndim=ndim,
core_step=core_step,
as_scalar=as_scalar,
shape=shape,
strides=strides)
else:
# If typ is not an array
if syms:
raise TypeError("scalar type {0} given for non scalar "
"argument #{1}".format(typ, i + 1))
self._loader = _ScalarArgLoader(dtype=typ, stride=core_step)
def get_array_at_offset(self, ind):
return self._loader.load(context=self.context, builder=self.builder,
data=self.data, ind=ind)
| GUArrayArg |
python | python-openxml__python-docx | src/docx/opc/package.py | {
"start": 747,
"end": 6979
} | class ____:
"""Main API class for |python-opc|.
A new instance is constructed by calling the :meth:`open` class method with a path
to a package file or file-like object containing one.
"""
def after_unmarshal(self):
"""Entry point for any post-unmarshaling processing.
May be overridden by subclasses without forwarding call to super.
"""
# don't place any code here, just catch call if not overridden by
# subclass
pass
@property
def core_properties(self) -> CoreProperties:
"""|CoreProperties| object providing read/write access to the Dublin Core
properties for this document."""
return self._core_properties_part.core_properties
def iter_rels(self) -> Iterator[_Relationship]:
"""Generate exactly one reference to each relationship in the package by
performing a depth-first traversal of the rels graph."""
def walk_rels(
source: OpcPackage | Part, visited: list[Part] | None = None
) -> Iterator[_Relationship]:
visited = [] if visited is None else visited
for rel in source.rels.values():
yield rel
if rel.is_external:
continue
part = rel.target_part
if part in visited:
continue
visited.append(part)
new_source = part
for rel in walk_rels(new_source, visited):
yield rel
for rel in walk_rels(self):
yield rel
def iter_parts(self) -> Iterator[Part]:
"""Generate exactly one reference to each of the parts in the package by
performing a depth-first traversal of the rels graph."""
def walk_parts(source, visited=[]):
for rel in source.rels.values():
if rel.is_external:
continue
part = rel.target_part
if part in visited:
continue
visited.append(part)
yield part
new_source = part
for part in walk_parts(new_source, visited):
yield part
for part in walk_parts(self):
yield part
def load_rel(self, reltype: str, target: Part | str, rId: str, is_external: bool = False):
"""Return newly added |_Relationship| instance of `reltype` between this part
and `target` with key `rId`.
Target mode is set to ``RTM.EXTERNAL`` if `is_external` is |True|. Intended for
use during load from a serialized package, where the rId is well known. Other
methods exist for adding a new relationship to the package during processing.
"""
return self.rels.add_relationship(reltype, target, rId, is_external)
@property
def main_document_part(self):
"""Return a reference to the main document part for this package.
Examples include a document part for a WordprocessingML package, a presentation
part for a PresentationML package, or a workbook part for a SpreadsheetML
package.
"""
return self.part_related_by(RT.OFFICE_DOCUMENT)
def next_partname(self, template: str) -> PackURI:
"""Return a |PackURI| instance representing partname matching `template`.
The returned part-name has the next available numeric suffix to distinguish it
from other parts of its type. `template` is a printf (%)-style template string
containing a single replacement item, a '%d' to be used to insert the integer
portion of the partname. Example: "/word/header%d.xml"
"""
partnames = {part.partname for part in self.iter_parts()}
for n in range(1, len(partnames) + 2):
candidate_partname = template % n
if candidate_partname not in partnames:
return PackURI(candidate_partname)
@classmethod
def open(cls, pkg_file: str | IO[bytes]) -> Self:
"""Return an |OpcPackage| instance loaded with the contents of `pkg_file`."""
pkg_reader = PackageReader.from_file(pkg_file)
package = cls()
Unmarshaller.unmarshal(pkg_reader, package, PartFactory)
return package
def part_related_by(self, reltype: str) -> Part:
"""Return part to which this package has a relationship of `reltype`.
Raises |KeyError| if no such relationship is found and |ValueError| if more than
one such relationship is found.
"""
return self.rels.part_with_reltype(reltype)
@property
def parts(self) -> list[Part]:
"""Return a list containing a reference to each of the parts in this package."""
return list(self.iter_parts())
def relate_to(self, part: Part, reltype: str):
"""Return rId key of new or existing relationship to `part`.
If a relationship of `reltype` to `part` already exists, its rId is returned. Otherwise a
new relationship is created and that rId is returned.
"""
rel = self.rels.get_or_add(reltype, part)
return rel.rId
@lazyproperty
def rels(self):
"""Return a reference to the |Relationships| instance holding the collection of
relationships for this package."""
return Relationships(PACKAGE_URI.baseURI)
def save(self, pkg_file: str | IO[bytes]):
"""Save this package to `pkg_file`.
`pkg_file` can be either a file-path or a file-like object.
"""
for part in self.parts:
part.before_marshal()
PackageWriter.write(pkg_file, self.rels, self.parts)
@property
def _core_properties_part(self) -> CorePropertiesPart:
"""|CorePropertiesPart| object related to this package.
Creates a default core properties part if one is not present (not common).
"""
try:
return cast(CorePropertiesPart, self.part_related_by(RT.CORE_PROPERTIES))
except KeyError:
core_properties_part = CorePropertiesPart.default(self)
self.relate_to(core_properties_part, RT.CORE_PROPERTIES)
return core_properties_part
| OpcPackage |
python | gevent__gevent | src/gevent/resolver/ares.py | {
"start": 967,
"end": 12965
} | class ____(AbstractResolver):
"""
Implementation of the resolver API using the `c-ares`_ library.
This implementation uses the c-ares library to handle name
resolution. c-ares is natively asynchronous at the socket level
and so integrates well into gevent's event loop.
In comparison to :class:`gevent.resolver_thread.Resolver` (which
delegates to the native system resolver), the implementation is
much more complex. In addition, there have been reports of it not
properly honoring certain system configurations (for example, the
order in which IPv4 and IPv6 results are returned may not match
the threaded resolver). However, because it does not use threads,
it may scale better for applications that make many lookups.
There are some known differences from the system resolver.
- ``gethostbyname_ex`` and ``gethostbyaddr`` may return
different for the ``aliaslist`` tuple member. (Sometimes the
same, sometimes in a different order, sometimes a different
alias altogether.)
- ``gethostbyname_ex`` may return the ``ipaddrlist`` in a
different order.
- ``getaddrinfo`` does not return ``SOCK_RAW`` results.
- ``getaddrinfo`` may return results in a different order.
- Handling of ``.local`` (mDNS) names may be different, even
if they are listed in the hosts file.
- c-ares will not resolve ``broadcasthost``, even if listed in
the hosts file prior to 2020-04-30.
- This implementation may raise ``gaierror(4)`` where the
system implementation would raise ``herror(1)`` or vice versa,
with different error numbers. However, after 2020-04-30, this should be
much reduced.
- The results for ``localhost`` may be different. In
particular, some system resolvers will return more results
from ``getaddrinfo`` than c-ares does, such as SOCK_DGRAM
results, and c-ares may report more ips on a multi-homed
host.
- The system implementation may return some names fully qualified, where
this implementation returns only the host name. This appears to be
the case only with entries found in ``/etc/hosts``.
- c-ares supports a limited set of flags for ``getnameinfo`` and
``getaddrinfo``; unknown flags are ignored. System-specific flags
such as ``AI_V4MAPPED_CFG`` are not supported.
- ``getaddrinfo`` may return canonical names even without the ``AI_CANONNAME``
being set.
- ``getaddrinfo`` does not appear to support IPv6 symbolic scope IDs.
.. caution::
This module is considered extremely experimental on PyPy, and
due to its implementation in cython, it may be slower. It may also lead to
interpreter crashes.
.. versionchanged:: 1.5.0
This version of gevent typically embeds c-ares 1.15.0 or newer. In
that version of c-ares, domains ending in ``.onion`` `are never
resolved <https://github.com/c-ares/c-ares/issues/196>`_ or even
sent to the DNS server.
.. versionchanged:: 20.5.0
``getaddrinfo`` is now implemented using the native c-ares function
from c-ares 1.16 or newer.
.. versionchanged:: 20.5.0
Now ``herror`` and ``gaierror`` are raised more consistently with
the standard library resolver, and have more consistent errno values.
Handling of localhost and broadcast names is now more consistent.
.. versionchanged:: 22.10.1
Now has a ``__del__`` method that warns if the object is destroyed
without being properly closed.
.. _c-ares: http://c-ares.haxx.se
"""
cares_class = channel
def __init__(self, hub=None, use_environ=True, **kwargs):
AbstractResolver.__init__(self)
if hub is None:
hub = get_hub()
self.hub = hub
if use_environ:
for setting in config.settings.values():
if isinstance(setting, AresSettingMixin):
value = setting.get()
if value is not None:
kwargs.setdefault(setting.kwarg_name, value)
self.cares = self.cares_class(hub.loop, **kwargs)
self.pid = os.getpid()
self.params = kwargs
self.fork_watcher = hub.loop.fork(ref=False) # We shouldn't keep the loop alive
self.fork_watcher.start(self._on_fork)
def __repr__(self):
return '<gevent.resolver_ares.Resolver at 0x%x ares=%r>' % (id(self), self.cares)
def _on_fork(self):
# NOTE: See comment in gevent.hub.reinit.
pid = os.getpid()
if pid != self.pid:
self.hub.loop.run_callback(self.cares.destroy)
self.cares = self.cares_class(self.hub.loop, **self.params)
self.pid = pid
def close(self):
AbstractResolver.close(self)
if self.cares is not None:
self.hub.loop.run_callback(self.cares.destroy)
self.cares = None
self.fork_watcher.stop()
def __del__(self):
if self.cares is not None:
warnings.warn("cares Resolver destroyed while not closed",
ResourceWarning)
self.close()
def _gethostbyname_ex(self, hostname_bytes, family):
while True:
ares = self.cares
try:
waiter = Waiter(self.hub)
ares.gethostbyname(waiter, hostname_bytes, family)
result = waiter.get()
if not result[-1]:
raise herror(EAI_NONAME, self.EAI_NONAME_MSG)
return result
except herror as ex:
if ares is self.cares:
if ex.args[0] == 1:
# Somewhere along the line, the internal
# implementation of gethostbyname_ex changed to invoke
# getaddrinfo() as a first pass, much like we do for ``getnameinfo()``;
# this means it raises a different error for not-found hosts.
raise gaierror(EAI_NONAME, self.EAI_NONAME_MSG)
raise
# "self.cares is not ares" means channel was destroyed (because we were forked)
def _lookup_port(self, port, socktype):
return lookup_port(port, socktype)
def __getaddrinfo(
self, host, port,
family=0, socktype=0, proto=0, flags=0,
fill_in_type_proto=True
):
"""
Returns a list ``(family, socktype, proto, canonname, sockaddr)``
:raises gaierror: If no results are found.
"""
# pylint:disable=too-many-locals,too-many-branches
if isinstance(host, text_type):
host = host.encode('idna')
if isinstance(port, text_type):
port = port.encode('ascii')
elif isinstance(port, integer_types):
if port == 0:
port = None
else:
port = str(port).encode('ascii')
waiter = Waiter(self.hub)
self.cares.getaddrinfo(
waiter,
host,
port,
family,
socktype,
proto,
flags,
)
# Result is a list of:
# (family, socktype, proto, canonname, sockaddr)
# Where sockaddr depends on family; for INET it is
# (address, port)
# and INET6 is
# (address, port, flow info, scope id)
result = waiter.get()
if not result:
raise gaierror(EAI_NONAME, self.EAI_NONAME_MSG)
if fill_in_type_proto:
# c-ares 1.16 DOES NOT fill in socktype or proto in the results,
# ever. It's at least supposed to do that if they were given as
# hints, but it doesn't (https://github.com/c-ares/c-ares/issues/317)
# Sigh.
# The SOL_* constants are another (older?) name for IPPROTO_*
if socktype:
hard_type_proto = [
(socktype, SOL_TCP if socktype == SOCK_STREAM else SOL_UDP),
]
elif proto:
hard_type_proto = [
(SOCK_STREAM if proto == SOL_TCP else SOCK_DGRAM, proto),
]
else:
hard_type_proto = [
(SOCK_STREAM, SOL_TCP),
(SOCK_DGRAM, SOL_UDP),
]
# pylint:disable=not-an-iterable,unsubscriptable-object
result = [
(rfamily,
hard_type if not rtype else rtype,
hard_proto if not rproto else rproto,
rcanon,
raddr)
for rfamily, rtype, rproto, rcanon, raddr
in result
for hard_type, hard_proto
in hard_type_proto
]
return result
def _getaddrinfo(self, host_bytes, port, family, socktype, proto, flags):
while True:
ares = self.cares
try:
return self.__getaddrinfo(host_bytes, port, family, socktype, proto, flags)
except gaierror:
if ares is self.cares:
raise
def __gethostbyaddr(self, ip_address):
waiter = Waiter(self.hub)
try:
self.cares.gethostbyaddr(waiter, ip_address)
return waiter.get()
except InvalidIP:
result = self._getaddrinfo(ip_address, None,
family=AF_UNSPEC, socktype=SOCK_DGRAM,
proto=0, flags=0)
if not result:
raise
# pylint:disable=unsubscriptable-object
_ip_address = result[0][-1][0]
if isinstance(_ip_address, text_type):
_ip_address = _ip_address.encode('ascii')
if _ip_address == ip_address:
raise
waiter.clear()
self.cares.gethostbyaddr(waiter, _ip_address)
return waiter.get()
def _gethostbyaddr(self, ip_address_bytes):
while True:
ares = self.cares
try:
return self.__gethostbyaddr(ip_address_bytes)
except herror:
if ares is self.cares:
raise
def __getnameinfo(self, hostname, port, sockaddr, flags):
result = self.__getaddrinfo(
hostname, port,
family=AF_UNSPEC, socktype=SOCK_DGRAM,
proto=0, flags=0,
fill_in_type_proto=False)
if len(result) != 1:
raise error('sockaddr resolved to multiple addresses')
family, _socktype, _proto, _name, address = result[0]
if family == AF_INET:
if len(sockaddr) != 2:
raise error("IPv4 sockaddr must be 2 tuple")
elif family == AF_INET6:
address = address[:2] + sockaddr[2:]
waiter = Waiter(self.hub)
self.cares.getnameinfo(waiter, address, flags)
node, service = waiter.get()
if service is None:
# ares docs: "If the query did not complete
# successfully, or one of the values was not
# requested, node or service will be NULL ". Python 2
# allows that for the service, but Python 3 raises
# an error. This is tested by test_socket in py 3.4
err = gaierror(EAI_NONAME, self.EAI_NONAME_MSG)
err.errno = EAI_NONAME
raise err
return node, service or '0'
def _getnameinfo(self, address_bytes, port, sockaddr, flags):
while True:
ares = self.cares
try:
return self.__getnameinfo(address_bytes, port, sockaddr, flags)
except gaierror:
if ares is self.cares:
raise
# # Things that need proper error handling
# gethostbyaddr = AbstractResolver.convert_gaierror_to_herror(AbstractResolver.gethostbyaddr)
| Resolver |
python | matplotlib__matplotlib | lib/mpl_toolkits/mplot3d/axis3d.py | {
"start": 1084,
"end": 28516
} | class ____(maxis.XAxis):
"""An Axis class for the 3D plots."""
# These points from the unit cube make up the x, y and z-planes
_PLANES = (
(0, 3, 7, 4), (1, 2, 6, 5), # yz planes
(0, 1, 5, 4), (3, 2, 6, 7), # xz planes
(0, 1, 2, 3), (4, 5, 6, 7), # xy planes
)
# Some properties for the axes
_AXINFO = {
'x': {'i': 0, 'tickdir': 1, 'juggled': (1, 0, 2)},
'y': {'i': 1, 'tickdir': 0, 'juggled': (0, 1, 2)},
'z': {'i': 2, 'tickdir': 0, 'juggled': (0, 2, 1)},
}
def _old_init(self, adir, v_intervalx, d_intervalx, axes, *args,
rotate_label=None, **kwargs):
return locals()
def _new_init(self, axes, *, rotate_label=None, **kwargs):
return locals()
def __init__(self, *args, **kwargs):
params = _api.select_matching_signature(
[self._old_init, self._new_init], *args, **kwargs)
if "adir" in params:
_api.warn_deprecated(
"3.6", message=f"The signature of 3D Axis constructors has "
f"changed in %(since)s; the new signature is "
f"{inspect.signature(type(self).__init__)}", pending=True)
if params["adir"] != self.axis_name:
raise ValueError(f"Cannot instantiate {type(self).__name__} "
f"with adir={params['adir']!r}")
axes = params["axes"]
rotate_label = params["rotate_label"]
args = params.get("args", ())
kwargs = params["kwargs"]
name = self.axis_name
self._label_position = 'default'
self._tick_position = 'default'
# This is a temporary member variable.
# Do not depend on this existing in future releases!
self._axinfo = self._AXINFO[name].copy()
# Common parts
self._axinfo.update({
'label': {'va': 'center', 'ha': 'center',
'rotation_mode': 'anchor'},
'color': mpl.rcParams[f'axes3d.{name}axis.panecolor'],
'tick': {
'inward_factor': 0.2,
'outward_factor': 0.1,
},
})
if mpl.rcParams['_internal.classic_mode']:
self._axinfo.update({
'axisline': {'linewidth': 0.75, 'color': (0, 0, 0, 1)},
'grid': {
'color': (0.9, 0.9, 0.9, 1),
'linewidth': 1.0,
'linestyle': '-',
},
})
self._axinfo['tick'].update({
'linewidth': {
True: mpl.rcParams['lines.linewidth'], # major
False: mpl.rcParams['lines.linewidth'], # minor
}
})
else:
self._axinfo.update({
'axisline': {
'linewidth': mpl.rcParams['axes.linewidth'],
'color': mpl.rcParams['axes.edgecolor'],
},
'grid': {
'color': mpl.rcParams['grid.color'],
'linewidth': mpl.rcParams['grid.linewidth'],
'linestyle': mpl.rcParams['grid.linestyle'],
},
})
self._axinfo['tick'].update({
'linewidth': {
True: ( # major
mpl.rcParams['xtick.major.width'] if name in 'xz'
else mpl.rcParams['ytick.major.width']),
False: ( # minor
mpl.rcParams['xtick.minor.width'] if name in 'xz'
else mpl.rcParams['ytick.minor.width']),
}
})
super().__init__(axes, *args, **kwargs)
# data and viewing intervals for this direction
if "d_intervalx" in params:
self.set_data_interval(*params["d_intervalx"])
if "v_intervalx" in params:
self.set_view_interval(*params["v_intervalx"])
self.set_rotate_label(rotate_label)
self._init3d() # Inline after init3d deprecation elapses.
__init__.__signature__ = inspect.signature(_new_init)
adir = _api.deprecated("3.6", pending=True)(
property(lambda self: self.axis_name))
def _init3d(self):
self.line = mlines.Line2D(
xdata=(0, 0), ydata=(0, 0),
linewidth=self._axinfo['axisline']['linewidth'],
color=self._axinfo['axisline']['color'],
antialiased=True)
# Store dummy data in Polygon object
self.pane = mpatches.Polygon([[0, 0], [0, 1]], closed=False)
self.set_pane_color(self._axinfo['color'])
self.axes._set_artist_props(self.line)
self.axes._set_artist_props(self.pane)
self.gridlines = art3d.Line3DCollection([])
self.axes._set_artist_props(self.gridlines)
self.axes._set_artist_props(self.label)
self.axes._set_artist_props(self.offsetText)
# Need to be able to place the label at the correct location
self.label._transform = self.axes.transData
self.offsetText._transform = self.axes.transData
@_api.deprecated("3.6", pending=True)
def init3d(self): # After deprecation elapses, inline _init3d to __init__.
self._init3d()
def get_major_ticks(self, numticks=None):
ticks = super().get_major_ticks(numticks)
for t in ticks:
for obj in [
t.tick1line, t.tick2line, t.gridline, t.label1, t.label2]:
obj.set_transform(self.axes.transData)
return ticks
def get_minor_ticks(self, numticks=None):
ticks = super().get_minor_ticks(numticks)
for t in ticks:
for obj in [
t.tick1line, t.tick2line, t.gridline, t.label1, t.label2]:
obj.set_transform(self.axes.transData)
return ticks
def set_ticks_position(self, position):
"""
Set the ticks position.
Parameters
----------
position : {'lower', 'upper', 'both', 'default', 'none'}
The position of the bolded axis lines, ticks, and tick labels.
"""
_api.check_in_list(['lower', 'upper', 'both', 'default', 'none'],
position=position)
self._tick_position = position
def get_ticks_position(self):
"""
Get the ticks position.
Returns
-------
str : {'lower', 'upper', 'both', 'default', 'none'}
The position of the bolded axis lines, ticks, and tick labels.
"""
return self._tick_position
def set_label_position(self, position):
"""
Set the label position.
Parameters
----------
position : {'lower', 'upper', 'both', 'default', 'none'}
The position of the axis label.
"""
_api.check_in_list(['lower', 'upper', 'both', 'default', 'none'],
position=position)
self._label_position = position
def get_label_position(self):
"""
Get the label position.
Returns
-------
str : {'lower', 'upper', 'both', 'default', 'none'}
The position of the axis label.
"""
return self._label_position
def set_pane_color(self, color, alpha=None):
"""
Set pane color.
Parameters
----------
color : :mpltype:`color`
Color for axis pane.
alpha : float, optional
Alpha value for axis pane. If None, base it on *color*.
"""
color = mcolors.to_rgba(color, alpha)
self._axinfo['color'] = color
self.pane.set_edgecolor(color)
self.pane.set_facecolor(color)
self.pane.set_alpha(color[-1])
self.stale = True
def set_rotate_label(self, val):
"""
Whether to rotate the axis label: True, False or None.
If set to None the label will be rotated if longer than 4 chars.
"""
self._rotate_label = val
self.stale = True
def get_rotate_label(self, text):
if self._rotate_label is not None:
return self._rotate_label
else:
return len(text) > 4
def _get_coord_info(self):
mins, maxs = np.array([
self.axes.get_xbound(),
self.axes.get_ybound(),
self.axes.get_zbound(),
]).T
# Project the bounds along the current position of the cube:
bounds = mins[0], maxs[0], mins[1], maxs[1], mins[2], maxs[2]
bounds_proj = self.axes._transformed_cube(bounds)
# Determine which one of the parallel planes are higher up:
means_z0 = np.zeros(3)
means_z1 = np.zeros(3)
for i in range(3):
means_z0[i] = np.mean(bounds_proj[self._PLANES[2 * i], 2])
means_z1[i] = np.mean(bounds_proj[self._PLANES[2 * i + 1], 2])
highs = means_z0 < means_z1
# Special handling for edge-on views
equals = np.abs(means_z0 - means_z1) <= np.finfo(float).eps
if np.sum(equals) == 2:
vertical = np.where(~equals)[0][0]
if vertical == 2: # looking at XY plane
highs = np.array([True, True, highs[2]])
elif vertical == 1: # looking at XZ plane
highs = np.array([True, highs[1], False])
elif vertical == 0: # looking at YZ plane
highs = np.array([highs[0], False, False])
return mins, maxs, bounds_proj, highs
def _calc_centers_deltas(self, maxs, mins):
centers = 0.5 * (maxs + mins)
# In mpl3.8, the scale factor was 1/12. mpl3.9 changes this to
# 1/12 * 24/25 = 0.08 to compensate for the change in automargin
# behavior and keep appearance the same. The 24/25 factor is from the
# 1/48 padding added to each side of the axis in mpl3.8.
scale = 0.08
deltas = (maxs - mins) * scale
return centers, deltas
def _get_axis_line_edge_points(self, minmax, maxmin, position=None):
"""Get the edge points for the black bolded axis line."""
# When changing vertical axis some of the axes has to be
# moved to the other plane so it looks the same as if the z-axis
# was the vertical axis.
mb = [minmax, maxmin] # line from origin to nearest corner to camera
mb_rev = mb[::-1]
mm = [[mb, mb_rev, mb_rev], [mb_rev, mb_rev, mb], [mb, mb, mb]]
mm = mm[self.axes._vertical_axis][self._axinfo["i"]]
juggled = self._axinfo["juggled"]
edge_point_0 = mm[0].copy() # origin point
if ((position == 'lower' and mm[1][juggled[-1]] < mm[0][juggled[-1]]) or
(position == 'upper' and mm[1][juggled[-1]] > mm[0][juggled[-1]])):
edge_point_0[juggled[-1]] = mm[1][juggled[-1]]
else:
edge_point_0[juggled[0]] = mm[1][juggled[0]]
edge_point_1 = edge_point_0.copy()
edge_point_1[juggled[1]] = mm[1][juggled[1]]
return edge_point_0, edge_point_1
def _get_all_axis_line_edge_points(self, minmax, maxmin, axis_position=None):
# Determine edge points for the axis lines
edgep1s = []
edgep2s = []
position = []
if axis_position in (None, 'default'):
edgep1, edgep2 = self._get_axis_line_edge_points(minmax, maxmin)
edgep1s = [edgep1]
edgep2s = [edgep2]
position = ['default']
else:
edgep1_l, edgep2_l = self._get_axis_line_edge_points(minmax, maxmin,
position='lower')
edgep1_u, edgep2_u = self._get_axis_line_edge_points(minmax, maxmin,
position='upper')
if axis_position in ('lower', 'both'):
edgep1s.append(edgep1_l)
edgep2s.append(edgep2_l)
position.append('lower')
if axis_position in ('upper', 'both'):
edgep1s.append(edgep1_u)
edgep2s.append(edgep2_u)
position.append('upper')
return edgep1s, edgep2s, position
def _get_tickdir(self, position):
"""
Get the direction of the tick.
Parameters
----------
position : str, optional : {'upper', 'lower', 'default'}
The position of the axis.
Returns
-------
tickdir : int
Index which indicates which coordinate the tick line will
align with.
"""
_api.check_in_list(('upper', 'lower', 'default'), position=position)
# TODO: Move somewhere else where it's triggered less:
tickdirs_base = [v["tickdir"] for v in self._AXINFO.values()] # default
elev_mod = np.mod(self.axes.elev + 180, 360) - 180
azim_mod = np.mod(self.axes.azim, 360)
if position == 'upper':
if elev_mod >= 0:
tickdirs_base = [2, 2, 0]
else:
tickdirs_base = [1, 0, 0]
if 0 <= azim_mod < 180:
tickdirs_base[2] = 1
elif position == 'lower':
if elev_mod >= 0:
tickdirs_base = [1, 0, 1]
else:
tickdirs_base = [2, 2, 1]
if 0 <= azim_mod < 180:
tickdirs_base[2] = 0
info_i = [v["i"] for v in self._AXINFO.values()]
i = self._axinfo["i"]
vert_ax = self.axes._vertical_axis
j = vert_ax - 2
# default: tickdir = [[1, 2, 1], [2, 2, 0], [1, 0, 0]][vert_ax][i]
tickdir = np.roll(info_i, -j)[np.roll(tickdirs_base, j)][i]
return tickdir
def active_pane(self):
mins, maxs, tc, highs = self._get_coord_info()
info = self._axinfo
index = info['i']
if not highs[index]:
loc = mins[index]
plane = self._PLANES[2 * index]
else:
loc = maxs[index]
plane = self._PLANES[2 * index + 1]
xys = np.array([tc[p] for p in plane])
return xys, loc
def draw_pane(self, renderer):
"""
Draw pane.
Parameters
----------
renderer : `~matplotlib.backend_bases.RendererBase` subclass
"""
renderer.open_group('pane3d', gid=self.get_gid())
xys, loc = self.active_pane()
self.pane.xy = xys[:, :2]
self.pane.draw(renderer)
renderer.close_group('pane3d')
def _axmask(self):
axmask = [True, True, True]
axmask[self._axinfo["i"]] = False
return axmask
def _draw_ticks(self, renderer, edgep1, centers, deltas, highs,
deltas_per_point, pos):
ticks = self._update_ticks()
info = self._axinfo
index = info["i"]
juggled = info["juggled"]
mins, maxs, tc, highs = self._get_coord_info()
centers, deltas = self._calc_centers_deltas(maxs, mins)
# Draw ticks:
tickdir = self._get_tickdir(pos)
tickdelta = deltas[tickdir] if highs[tickdir] else -deltas[tickdir]
tick_info = info['tick']
tick_out = tick_info['outward_factor'] * tickdelta
tick_in = tick_info['inward_factor'] * tickdelta
tick_lw = tick_info['linewidth']
edgep1_tickdir = edgep1[tickdir]
out_tickdir = edgep1_tickdir + tick_out
in_tickdir = edgep1_tickdir - tick_in
default_label_offset = 8. # A rough estimate
points = deltas_per_point * deltas
for tick in ticks:
# Get tick line positions
pos = edgep1.copy()
pos[index] = tick.get_loc()
pos[tickdir] = out_tickdir
x1, y1, z1 = proj3d.proj_transform(*pos, self.axes.M)
pos[tickdir] = in_tickdir
x2, y2, z2 = proj3d.proj_transform(*pos, self.axes.M)
# Get position of label
labeldeltas = (tick.get_pad() + default_label_offset) * points
pos[tickdir] = edgep1_tickdir
pos = _move_from_center(pos, centers, labeldeltas, self._axmask())
lx, ly, lz = proj3d.proj_transform(*pos, self.axes.M)
_tick_update_position(tick, (x1, x2), (y1, y2), (lx, ly))
tick.tick1line.set_linewidth(tick_lw[tick._major])
tick.draw(renderer)
def _draw_offset_text(self, renderer, edgep1, edgep2, labeldeltas, centers,
highs, pep, dx, dy):
# Get general axis information:
info = self._axinfo
index = info["i"]
juggled = info["juggled"]
tickdir = info["tickdir"]
# Which of the two edge points do we want to
# use for locating the offset text?
if juggled[2] == 2:
outeredgep = edgep1
outerindex = 0
else:
outeredgep = edgep2
outerindex = 1
pos = _move_from_center(outeredgep, centers, labeldeltas,
self._axmask())
olx, oly, olz = proj3d.proj_transform(*pos, self.axes.M)
self.offsetText.set_text(self.major.formatter.get_offset())
self.offsetText.set_position((olx, oly))
angle = art3d._norm_text_angle(np.rad2deg(np.arctan2(dy, dx)))
self.offsetText.set_rotation(angle)
# Must set rotation mode to "anchor" so that
# the alignment point is used as the "fulcrum" for rotation.
self.offsetText.set_rotation_mode('anchor')
# ----------------------------------------------------------------------
# Note: the following statement for determining the proper alignment of
# the offset text. This was determined entirely by trial-and-error
# and should not be in any way considered as "the way". There are
# still some edge cases where alignment is not quite right, but this
# seems to be more of a geometry issue (in other words, I might be
# using the wrong reference points).
#
# (TT, FF, TF, FT) are the shorthand for the tuple of
# (centpt[tickdir] <= pep[tickdir, outerindex],
# centpt[index] <= pep[index, outerindex])
#
# Three-letters (e.g., TFT, FTT) are short-hand for the array of bools
# from the variable 'highs'.
# ---------------------------------------------------------------------
centpt = proj3d.proj_transform(*centers, self.axes.M)
if centpt[tickdir] > pep[tickdir, outerindex]:
# if FT and if highs has an even number of Trues
if (centpt[index] <= pep[index, outerindex]
and np.count_nonzero(highs) % 2 == 0):
# Usually, this means align right, except for the FTT case,
# in which offset for axis 1 and 2 are aligned left.
if highs.tolist() == [False, True, True] and index in (1, 2):
align = 'left'
else:
align = 'right'
else:
# The FF case
align = 'left'
else:
# if TF and if highs has an even number of Trues
if (centpt[index] > pep[index, outerindex]
and np.count_nonzero(highs) % 2 == 0):
# Usually mean align left, except if it is axis 2
align = 'right' if index == 2 else 'left'
else:
# The TT case
align = 'right'
self.offsetText.set_va('center')
self.offsetText.set_ha(align)
self.offsetText.draw(renderer)
def _draw_labels(self, renderer, edgep1, edgep2, labeldeltas, centers, dx, dy):
label = self._axinfo["label"]
# Draw labels
lxyz = 0.5 * (edgep1 + edgep2)
lxyz = _move_from_center(lxyz, centers, labeldeltas, self._axmask())
tlx, tly, tlz = proj3d.proj_transform(*lxyz, self.axes.M)
self.label.set_position((tlx, tly))
if self.get_rotate_label(self.label.get_text()):
angle = art3d._norm_text_angle(np.rad2deg(np.arctan2(dy, dx)))
self.label.set_rotation(angle)
self.label.set_va(label['va'])
self.label.set_ha(label['ha'])
self.label.set_rotation_mode(label['rotation_mode'])
self.label.draw(renderer)
@artist.allow_rasterization
def draw(self, renderer):
self.label._transform = self.axes.transData
self.offsetText._transform = self.axes.transData
renderer.open_group("axis3d", gid=self.get_gid())
# Get general axis information:
mins, maxs, tc, highs = self._get_coord_info()
centers, deltas = self._calc_centers_deltas(maxs, mins)
# Calculate offset distances
# A rough estimate; points are ambiguous since 3D plots rotate
reltoinches = self.get_figure(root=False).dpi_scale_trans.inverted()
ax_inches = reltoinches.transform(self.axes.bbox.size)
ax_points_estimate = sum(72. * ax_inches)
deltas_per_point = 48 / ax_points_estimate
default_offset = 21.
labeldeltas = (self.labelpad + default_offset) * deltas_per_point * deltas
# Determine edge points for the axis lines
minmax = np.where(highs, maxs, mins) # "origin" point
maxmin = np.where(~highs, maxs, mins) # "opposite" corner near camera
for edgep1, edgep2, pos in zip(*self._get_all_axis_line_edge_points(
minmax, maxmin, self._tick_position)):
# Project the edge points along the current position
pep = proj3d._proj_trans_points([edgep1, edgep2], self.axes.M)
pep = np.asarray(pep)
# The transAxes transform is used because the Text object
# rotates the text relative to the display coordinate system.
# Therefore, if we want the labels to remain parallel to the
# axis regardless of the aspect ratio, we need to convert the
# edge points of the plane to display coordinates and calculate
# an angle from that.
# TODO: Maybe Text objects should handle this themselves?
dx, dy = (self.axes.transAxes.transform([pep[0:2, 1]]) -
self.axes.transAxes.transform([pep[0:2, 0]]))[0]
# Draw the lines
self.line.set_data(pep[0], pep[1])
self.line.draw(renderer)
# Draw ticks
self._draw_ticks(renderer, edgep1, centers, deltas, highs,
deltas_per_point, pos)
# Draw Offset text
self._draw_offset_text(renderer, edgep1, edgep2, labeldeltas,
centers, highs, pep, dx, dy)
for edgep1, edgep2, pos in zip(*self._get_all_axis_line_edge_points(
minmax, maxmin, self._label_position)):
# See comments above
pep = proj3d._proj_trans_points([edgep1, edgep2], self.axes.M)
pep = np.asarray(pep)
dx, dy = (self.axes.transAxes.transform([pep[0:2, 1]]) -
self.axes.transAxes.transform([pep[0:2, 0]]))[0]
# Draw labels
self._draw_labels(renderer, edgep1, edgep2, labeldeltas, centers, dx, dy)
renderer.close_group('axis3d')
self.stale = False
@artist.allow_rasterization
def draw_grid(self, renderer):
if not self.axes._draw_grid:
return
renderer.open_group("grid3d", gid=self.get_gid())
ticks = self._update_ticks()
if len(ticks):
# Get general axis information:
info = self._axinfo
index = info["i"]
mins, maxs, tc, highs = self._get_coord_info()
minmax = np.where(highs, maxs, mins)
maxmin = np.where(~highs, maxs, mins)
# Grid points where the planes meet
xyz0 = np.tile(minmax, (len(ticks), 1))
xyz0[:, index] = [tick.get_loc() for tick in ticks]
# Grid lines go from the end of one plane through the plane
# intersection (at xyz0) to the end of the other plane. The first
# point (0) differs along dimension index-2 and the last (2) along
# dimension index-1.
lines = np.stack([xyz0, xyz0, xyz0], axis=1)
lines[:, 0, index - 2] = maxmin[index - 2]
lines[:, 2, index - 1] = maxmin[index - 1]
self.gridlines.set_segments(lines)
gridinfo = info['grid']
self.gridlines.set_color(gridinfo['color'])
self.gridlines.set_linewidth(gridinfo['linewidth'])
self.gridlines.set_linestyle(gridinfo['linestyle'])
self.gridlines.do_3d_projection()
self.gridlines.draw(renderer)
renderer.close_group('grid3d')
# TODO: Get this to work (more) properly when mplot3d supports the
# transforms framework.
def get_tightbbox(self, renderer=None, *, for_layout_only=False):
# docstring inherited
if not self.get_visible():
return
# We have to directly access the internal data structures
# (and hope they are up to date) because at draw time we
# shift the ticks and their labels around in (x, y) space
# based on the projection, the current view port, and their
# position in 3D space. If we extend the transforms framework
# into 3D we would not need to do this different book keeping
# than we do in the normal axis
major_locs = self.get_majorticklocs()
minor_locs = self.get_minorticklocs()
ticks = [*self.get_minor_ticks(len(minor_locs)),
*self.get_major_ticks(len(major_locs))]
view_low, view_high = self.get_view_interval()
if view_low > view_high:
view_low, view_high = view_high, view_low
interval_t = self.get_transform().transform([view_low, view_high])
ticks_to_draw = []
for tick in ticks:
try:
loc_t = self.get_transform().transform(tick.get_loc())
except AssertionError:
# Transform.transform doesn't allow masked values but
# some scales might make them, so we need this try/except.
pass
else:
if mtransforms._interval_contains_close(interval_t, loc_t):
ticks_to_draw.append(tick)
ticks = ticks_to_draw
bb_1, bb_2 = self._get_ticklabel_bboxes(ticks, renderer)
other = []
if self.line.get_visible():
other.append(self.line.get_window_extent(renderer))
if (self.label.get_visible() and not for_layout_only and
self.label.get_text()):
other.append(self.label.get_window_extent(renderer))
return mtransforms.Bbox.union([*bb_1, *bb_2, *other])
d_interval = _api.deprecated(
"3.6", alternative="get_data_interval", pending=True)(
property(lambda self: self.get_data_interval(),
lambda self, minmax: self.set_data_interval(*minmax)))
v_interval = _api.deprecated(
"3.6", alternative="get_view_interval", pending=True)(
property(lambda self: self.get_view_interval(),
lambda self, minmax: self.set_view_interval(*minmax)))
| Axis |
python | pypa__virtualenv | src/virtualenv/app_data/via_tempdir.py | {
"start": 214,
"end": 811
} | class ____(AppDataDiskFolder):
transient = True
can_update = False
def __init__(self) -> None:
super().__init__(folder=mkdtemp())
LOGGER.debug("created temporary app data folder %s", self.lock.path)
def reset(self):
"""This is a temporary folder, is already empty to start with."""
def close(self):
LOGGER.debug("remove temporary app data folder %s", self.lock.path)
safe_delete(self.lock.path)
def embed_update_log(self, distribution, for_py_version):
raise NotImplementedError
__all__ = [
"TempAppData",
]
| TempAppData |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/callbackProtocol7.py | {
"start": 419,
"end": 500
} | class ____(Protocol):
def __call__(self, x: int, /, *args: *tuple[int]): ...
| P3 |
python | kubernetes-client__python | kubernetes/client/models/v1_ip_block.py | {
"start": 383,
"end": 4726
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'cidr': 'str',
'_except': 'list[str]'
}
attribute_map = {
'cidr': 'cidr',
'_except': 'except'
}
def __init__(self, cidr=None, _except=None, local_vars_configuration=None): # noqa: E501
"""V1IPBlock - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._cidr = None
self.__except = None
self.discriminator = None
self.cidr = cidr
if _except is not None:
self._except = _except
@property
def cidr(self):
"""Gets the cidr of this V1IPBlock. # noqa: E501
cidr is a string representing the IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\" # noqa: E501
:return: The cidr of this V1IPBlock. # noqa: E501
:rtype: str
"""
return self._cidr
@cidr.setter
def cidr(self, cidr):
"""Sets the cidr of this V1IPBlock.
cidr is a string representing the IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\" # noqa: E501
:param cidr: The cidr of this V1IPBlock. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and cidr is None: # noqa: E501
raise ValueError("Invalid value for `cidr`, must not be `None`") # noqa: E501
self._cidr = cidr
@property
def _except(self):
"""Gets the _except of this V1IPBlock. # noqa: E501
except is a slice of CIDRs that should not be included within an IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\" Except values will be rejected if they are outside the cidr range # noqa: E501
:return: The _except of this V1IPBlock. # noqa: E501
:rtype: list[str]
"""
return self.__except
@_except.setter
def _except(self, _except):
"""Sets the _except of this V1IPBlock.
except is a slice of CIDRs that should not be included within an IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\" Except values will be rejected if they are outside the cidr range # noqa: E501
:param _except: The _except of this V1IPBlock. # noqa: E501
:type: list[str]
"""
self.__except = _except
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1IPBlock):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1IPBlock):
return True
return self.to_dict() != other.to_dict()
| V1IPBlock |
python | pytorch__pytorch | benchmarks/tensorexpr/conv.py | {
"start": 2487,
"end": 2651
} | class ____(ConvImplBench):
def __init__(self, *args):
super().__init__("conv", *args)
@staticmethod
def module():
return "conv"
| ConvBench |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_boolean_trap/FBT.py | {
"start": 3745,
"end": 3929
} | class ____:
def __or__(self, other: Self | bool) -> Self: ...
def __ror__(self, other: Self | bool) -> Self: ...
def __ior__(self, other: Self | bool) -> Self: ...
| BooleanArray |
python | realpython__materials | python-class/animals.py | {
"start": 460,
"end": 537
} | class ____(Bird):
def fly(self):
print("The eagle is flying")
| Eagle |
python | django__django | tests/composite_pk/models/tenant.py | {
"start": 1388,
"end": 1593
} | class ____(models.Model):
pk = models.CompositePrimaryKey("tenant_id", "id")
tenant = models.ForeignKey(Tenant, on_delete=models.CASCADE, default=1)
id = models.UUIDField(default=uuid.uuid4)
| Post |
python | bokeh__bokeh | tests/unit/bokeh/core/property/_util_property.py | {
"start": 1921,
"end": 2259
} | class ____(HasProps):
x = Int(12)
y = String("hello")
z = List(Int, default=[1, 2, 3])
zz = Dict(String, Int)
s = Nullable(String, default=None)
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| _TestModel2 |
python | pennersr__django-allauth | allauth/socialaccount/providers/saml/views.py | {
"start": 1436,
"end": 2002
} | class ____(SAMLViewMixin, View):
def dispatch(self, request, organization_slug):
url = reverse(
"saml_finish_acs",
kwargs={"organization_slug": organization_slug},
)
response = HttpResponseRedirect(url)
acs_session = LoginSession(request, "saml_acs_session", "saml-acs-session")
acs_session.store.update({"request": httpkit.serialize_request(request)})
acs_session.save(response)
return response
acs = ACSView.as_view()
@method_decorator(login_not_required, name="dispatch")
| ACSView |
python | plotly__plotly.py | plotly/graph_objs/heatmap/colorbar/_tickfont.py | {
"start": 233,
"end": 9918
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "heatmap.colorbar"
_path_str = "heatmap.colorbar.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.heatmap.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super().__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.heatmap.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.heatmap.colorbar.Tickfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickfont |
python | django__django | django/core/checks/registry.py | {
"start": 613,
"end": 3880
} | class ____:
def __init__(self):
self.registered_checks = set()
self.deployment_checks = set()
def register(self, check=None, *tags, **kwargs):
"""
Can be used as a function or a decorator. Register given function
`f` labeled with given `tags`. The function should receive **kwargs
and return list of Errors and Warnings.
Example::
registry = CheckRegistry()
@registry.register('mytag', 'anothertag')
def my_check(app_configs, **kwargs):
# ... perform checks and collect `errors` ...
return errors
# or
registry.register(my_check, 'mytag', 'anothertag')
"""
def inner(check):
if not func_accepts_kwargs(check):
raise TypeError(
"Check functions must accept keyword arguments (**kwargs)."
)
check.tags = tags
checks = (
self.deployment_checks
if kwargs.get("deploy")
else self.registered_checks
)
checks.add(check)
return check
if callable(check):
return inner(check)
else:
if check:
tags += (check,)
return inner
def run_checks(
self,
app_configs=None,
tags=None,
include_deployment_checks=False,
databases=None,
):
"""
Run all registered checks and return list of Errors and Warnings.
"""
errors = []
checks = self.get_checks(include_deployment_checks)
if tags is not None:
checks = [check for check in checks if not set(check.tags).isdisjoint(tags)]
elif not databases:
# By default, 'database'-tagged checks are not run if an alias
# is not explicitly specified as they do more than mere static
# code analysis.
checks = [check for check in checks if Tags.database not in check.tags]
if databases is None:
databases = list(connections)
for check in checks:
new_errors = check(app_configs=app_configs, databases=databases)
if not isinstance(new_errors, Iterable):
raise TypeError(
"The function %r did not return a list. All functions "
"registered with the checks registry must return a list." % check,
)
errors.extend(new_errors)
return errors
def tag_exists(self, tag, include_deployment_checks=False):
return tag in self.tags_available(include_deployment_checks)
def tags_available(self, deployment_checks=False):
return set(
chain.from_iterable(
check.tags for check in self.get_checks(deployment_checks)
)
)
def get_checks(self, include_deployment_checks=False):
checks = list(self.registered_checks)
if include_deployment_checks:
checks.extend(self.deployment_checks)
return checks
registry = CheckRegistry()
register = registry.register
run_checks = registry.run_checks
tag_exists = registry.tag_exists
| CheckRegistry |
python | ray-project__ray | doc/source/rllib/doc_code/replay_buffer_demo.py | {
"start": 2187,
"end": 5573
} | class ____(ReplayBuffer):
@override(ReplayBuffer)
def sample(
self, num_items: int, evict_sampled_more_then: int = 30, **kwargs
) -> Optional[SampleBatchType]:
"""Evicts experiences that have been sampled > evict_sampled_more_then times."""
idxes = [random.randint(0, len(self) - 1) for _ in range(num_items)]
often_sampled_idxes = list(
filter(lambda x: self._hit_count[x] >= evict_sampled_more_then, set(idxes))
)
sample = self._encode_sample(idxes)
self._num_timesteps_sampled += sample.count
for idx in often_sampled_idxes:
del self._storage[idx]
self._hit_count = np.append(
self._hit_count[:idx], self._hit_count[idx + 1 :]
)
return sample
config = (
DQNConfig()
.api_stack(
enable_env_runner_and_connector_v2=False, enable_rl_module_and_learner=False
)
.environment(env="CartPole-v1")
.training(replay_buffer_config={"type": LessSampledReplayBuffer})
)
tune.Tuner(
"DQN",
param_space=config,
run_config=tune.RunConfig(
stop={"training_iteration": 1},
),
).fit()
# __sphinx_doc_replay_buffer_own_buffer__end__
# __sphinx_doc_replay_buffer_advanced_usage_storage_unit__begin__
# This line will make our buffer store only complete episodes found in a batch
config.training(replay_buffer_config={"storage_unit": StorageUnit.EPISODES})
less_sampled_buffer = LessSampledReplayBuffer(**config.replay_buffer_config)
# Gather some random experiences
env = RandomEnv()
terminated = truncated = False
batch = SampleBatch({})
t = 0
while not terminated and not truncated:
obs, reward, terminated, truncated, info = env.step([0, 0])
# Note that in order for RLlib to find out about start and end of an episode,
# "t" and "terminateds" have to properly mark an episode's trajectory
one_step_batch = SampleBatch(
{
"obs": [obs],
"t": [t],
"reward": [reward],
"terminateds": [terminated],
"truncateds": [truncated],
}
)
batch = concat_samples([batch, one_step_batch])
t += 1
less_sampled_buffer.add(batch)
for i in range(10):
assert len(less_sampled_buffer._storage) == 1
less_sampled_buffer.sample(num_items=1, evict_sampled_more_then=9)
assert len(less_sampled_buffer._storage) == 0
# __sphinx_doc_replay_buffer_advanced_usage_storage_unit__end__
# __sphinx_doc_replay_buffer_advanced_usage_underlying_buffers__begin__
config = (
DQNConfig()
.api_stack(
enable_env_runner_and_connector_v2=False, enable_rl_module_and_learner=False
)
.training(
replay_buffer_config={
"type": "MultiAgentReplayBuffer",
"underlying_replay_buffer_config": {
"type": LessSampledReplayBuffer,
# We can specify the default call argument
# for the sample method of the underlying buffer method here.
"evict_sampled_more_then": 20,
},
}
)
.environment(env="CartPole-v1")
)
tune.Tuner(
"DQN",
param_space=config.to_dict(),
run_config=tune.RunConfig(
stop={"env_runners/episode_return_mean": 40, "training_iteration": 7},
),
).fit()
# __sphinx_doc_replay_buffer_advanced_usage_underlying_buffers__end__
| LessSampledReplayBuffer |
python | huggingface__transformers | src/transformers/models/swinv2/configuration_swinv2.py | {
"start": 895,
"end": 7547
} | class ____(BackboneConfigMixin, PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Swinv2Model`]. It is used to instantiate a Swin
Transformer v2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Swin Transformer v2
[microsoft/swinv2-tiny-patch4-window8-256](https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256)
architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 4):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embed_dim (`int`, *optional*, defaults to 96):
Dimensionality of patch embedding.
depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`):
Depth of each layer in the Transformer encoder.
num_heads (`list(int)`, *optional*, defaults to `[3, 6, 12, 24]`):
Number of attention heads in each layer of the Transformer encoder.
window_size (`int`, *optional*, defaults to 7):
Size of windows.
pretrained_window_sizes (`list(int)`, *optional*, defaults to `[0, 0, 0, 0]`):
Size of windows during pretraining.
mlp_ratio (`float`, *optional*, defaults to 4.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether or not a learnable bias should be added to the queries, keys and values.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
use_absolute_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to add absolute position embeddings to the patch embeddings.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
encoder_stride (`int`, *optional*, defaults to 32):
Factor to increase the spatial resolution by in the decoder head for masked image modeling.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage.
Example:
```python
>>> from transformers import Swinv2Config, Swinv2Model
>>> # Initializing a Swinv2 microsoft/swinv2-tiny-patch4-window8-256 style configuration
>>> configuration = Swinv2Config()
>>> # Initializing a model (with random weights) from the microsoft/swinv2-tiny-patch4-window8-256 style configuration
>>> model = Swinv2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "swinv2"
attribute_map = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(
self,
image_size=224,
patch_size=4,
num_channels=3,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
pretrained_window_sizes=[0, 0, 0, 0],
mlp_ratio=4.0,
qkv_bias=True,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
drop_path_rate=0.1,
hidden_act="gelu",
use_absolute_embeddings=False,
initializer_range=0.02,
layer_norm_eps=1e-5,
encoder_stride=32,
out_features=None,
out_indices=None,
**kwargs,
):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.depths = depths
self.num_layers = len(depths)
self.num_heads = num_heads
self.window_size = window_size
self.pretrained_window_sizes = pretrained_window_sizes
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.use_absolute_embeddings = use_absolute_embeddings
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.encoder_stride = encoder_stride
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
)
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
__all__ = ["Swinv2Config"]
| Swinv2Config |
python | django-haystack__django-haystack | test_haystack/solr_tests/test_solr_backend.py | {
"start": 1459,
"end": 1990
} | class ____(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
name = indexes.CharField(
model_attr="author", faceted=True, index_fieldname="name_s"
)
pub_date = indexes.DateField(model_attr="pub_date", index_fieldname="pub_date_dt")
today = indexes.IntegerField(index_fieldname="today_i")
def prepare_today(self, obj):
return datetime.datetime.now().day
def get_model(self):
return MockModel
| SolrMockOverriddenFieldNameSearchIndex |
python | keras-team__keras | keras/src/metrics/confusion_metrics.py | {
"start": 15874,
"end": 21548
} | class ____(Metric):
"""Computes the recall of the predictions with respect to the labels.
This metric creates two local variables, `true_positives` and
`false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
If `top_k` is set, recall will be computed as how often on average a class
among the labels of a batch entry is in the top-k predictions.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing the
fraction of them for which `class_id` is above the threshold and/or in the
top-k predictions.
Args:
thresholds: (Optional) A float value, or a Python list/tuple of float
threshold values in `[0, 1]`. A threshold is compared with
prediction values to determine the truth value of predictions (i.e.,
above the threshold is `True`, below is `False`). If used with a
loss function that sets `from_logits=True` (i.e. no sigmoid
applied to predictions), `thresholds` should be set to 0.
One metric value is generated for each threshold value.
If neither `thresholds` nor `top_k` are set,
the default is to calculate recall with `thresholds=0.5`.
top_k: (Optional) Unset by default. An int value specifying the top-k
predictions to consider when calculating recall.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Example:
>>> m = keras.metrics.Recall()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
>>> m.result()
0.6666667
>>> m.reset_state()
>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
>>> m.result()
1.0
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='binary_crossentropy',
metrics=[keras.metrics.Recall()])
```
Usage with a loss with `from_logits=True`:
```python
model.compile(optimizer='adam',
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.Recall(thresholds=0)])
```
"""
def __init__(
self, thresholds=None, top_k=None, class_id=None, name=None, dtype=None
):
super().__init__(name=name, dtype=dtype)
# Metric should be maximized during optimization.
self._direction = "up"
self.init_thresholds = thresholds
self.top_k = top_k
self.class_id = class_id
default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF
self.thresholds = metrics_utils.parse_init_thresholds(
thresholds, default_threshold=default_threshold
)
self._thresholds_distributed_evenly = (
metrics_utils.is_evenly_distributed_thresholds(self.thresholds)
)
self.true_positives = self.add_variable(
shape=(len(self.thresholds),),
initializer=initializers.Zeros(),
name="true_positives",
)
self.false_negatives = self.add_variable(
shape=(len(self.thresholds),),
initializer=initializers.Zeros(),
name="false_negatives",
)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates true positive and false negative statistics.
Args:
y_true: The ground truth values, with the same dimensions as
`y_pred`. Will be cast to `bool`.
y_pred: The predicted values. Each element must be in the range
`[0, 1]`.
sample_weight: Optional weighting of each example. Defaults to `1`.
Can be a tensor whose rank is either 0, or the same rank as
`y_true`, and must be broadcastable to `y_true`.
"""
metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, # noqa: E501
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives, # noqa: E501
},
y_true,
y_pred,
thresholds=self.thresholds,
thresholds_distributed_evenly=self._thresholds_distributed_evenly,
top_k=self.top_k,
class_id=self.class_id,
sample_weight=sample_weight,
)
def result(self):
result = ops.divide_no_nan(
self.true_positives,
ops.add(self.true_positives, self.false_negatives),
)
return result[0] if len(self.thresholds) == 1 else result
def reset_state(self):
num_thresholds = len(to_list(self.thresholds))
self.true_positives.assign(ops.zeros((num_thresholds,)))
self.false_negatives.assign(ops.zeros((num_thresholds,)))
def get_config(self):
config = {
"thresholds": self.init_thresholds,
"top_k": self.top_k,
"class_id": self.class_id,
}
base_config = super().get_config()
return {**base_config, **config}
| Recall |
python | sqlalchemy__sqlalchemy | test/sql/test_insert_exec.py | {
"start": 1672,
"end": 15264
} | class ____(fixtures.TablesTest):
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"user_id", INT, primary_key=True, test_needs_autoincrement=True
),
Column("user_name", VARCHAR(20)),
test_needs_acid=True,
)
@testing.requires.multivalues_inserts
@testing.combinations("string", "column", "expect", argnames="keytype")
def test_multivalues_insert(self, connection, keytype):
users = self.tables.users
if keytype == "string":
user_id, user_name = "user_id", "user_name"
elif keytype == "column":
user_id, user_name = users.c.user_id, users.c.user_name
elif keytype == "expect":
user_id, user_name = ExpectExpr(users.c.user_id), ExpectExpr(
users.c.user_name
)
else:
assert False
connection.execute(
users.insert().values(
[
{user_id: 7, user_name: "jack"},
{user_id: 8, user_name: "ed"},
]
)
)
rows = connection.execute(
users.select().order_by(users.c.user_id)
).all()
eq_(rows[0], (7, "jack"))
eq_(rows[1], (8, "ed"))
connection.execute(users.insert().values([(9, "jack"), (10, "ed")]))
rows = connection.execute(
users.select().order_by(users.c.user_id)
).all()
eq_(rows[2], (9, "jack"))
eq_(rows[3], (10, "ed"))
def test_insert_heterogeneous_params(self, connection):
"""test that executemany parameters are asserted to match the
parameter set of the first."""
users = self.tables.users
assert_raises_message(
exc.StatementError,
r"\(sqlalchemy.exc.InvalidRequestError\) A value is required for "
"bind parameter 'user_name', in "
"parameter group 2\n"
r"\[SQL: u?INSERT INTO users",
connection.execute,
users.insert(),
[
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9},
],
)
# this succeeds however. We aren't yet doing
# a length check on all subsequent parameters.
connection.execute(
users.insert(),
[
{"user_id": 7},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9},
],
)
def _test_lastrow_accessor(self, connection, table_, values, assertvalues):
"""Tests the inserted_primary_key and lastrow_has_id() functions."""
def insert_values(table_, values):
"""
Inserts a row into a table, returns the full list of values
INSERTed including defaults that fired off on the DB side and
detects rows that had defaults and post-fetches.
"""
# verify implicit_returning is working
if (
connection.dialect.insert_returning
and table_.implicit_returning
and not connection.dialect.postfetch_lastrowid
):
ins = table_.insert()
comp = ins.compile(connection, column_keys=list(values))
if not set(values).issuperset(
c.key for c in table_.primary_key
):
is_(bool(comp.returning), True)
result = connection.execute(table_.insert(), values)
ret = values.copy()
ipk = result.inserted_primary_key
for col, id_ in zip(table_.primary_key, ipk):
ret[col.key] = id_
if result.lastrow_has_defaults():
criterion = and_(
*[
col == id_
for col, id_ in zip(
table_.primary_key, result.inserted_primary_key
)
]
)
row = connection.execute(
table_.select().where(criterion)
).first()
for c in table_.c:
ret[c.key] = row._mapping[c]
return ret, ipk
table_.create(connection, checkfirst=True)
i, ipk = insert_values(table_, values)
eq_(i, assertvalues)
# named tuple tests
for col in table_.primary_key:
eq_(getattr(ipk, col.key), assertvalues[col.key])
eq_(ipk._mapping[col.key], assertvalues[col.key])
eq_(ipk._fields, tuple([col.key for col in table_.primary_key]))
@testing.requires.supports_autoincrement_w_composite_pk
@testing.combinations(
(True, testing.requires.insert_returning),
(False,),
argnames="implicit_returning",
)
def test_lastrow_accessor_one(
self, metadata, connection, implicit_returning
):
self._test_lastrow_accessor(
connection,
Table(
"t1",
metadata,
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("foo", String(30), primary_key=True),
implicit_returning=implicit_returning,
),
{"foo": "hi"},
{"id": 1, "foo": "hi"},
)
@testing.requires.supports_autoincrement_w_composite_pk
@testing.combinations(
(True, testing.requires.insert_returning),
(False,),
argnames="implicit_returning",
)
def test_lastrow_accessor_two(
self, metadata, connection, implicit_returning
):
self._test_lastrow_accessor(
connection,
Table(
"t2",
metadata,
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("foo", String(30), primary_key=True),
Column("bar", String(30), server_default="hi"),
implicit_returning=implicit_returning,
),
{"foo": "hi"},
{"id": 1, "foo": "hi", "bar": "hi"},
)
@testing.combinations(
(True, testing.requires.insert_returning),
(False,),
argnames="implicit_returning",
)
def test_lastrow_accessor_three(
self, metadata, connection, implicit_returning
):
self._test_lastrow_accessor(
connection,
Table(
"t3",
metadata,
Column("id", String(40), primary_key=True),
Column("foo", String(30), primary_key=True),
Column("bar", String(30)),
implicit_returning=implicit_returning,
),
{"id": "hi", "foo": "thisisfoo", "bar": "thisisbar"},
{"id": "hi", "foo": "thisisfoo", "bar": "thisisbar"},
)
@testing.requires.sequences
@testing.combinations(
(True, testing.requires.insert_returning),
(False,),
argnames="implicit_returning",
)
def test_lastrow_accessor_four(
self, metadata, connection, implicit_returning
):
self._test_lastrow_accessor(
connection,
Table(
"t4",
metadata,
Column(
"id",
Integer,
normalize_sequence(
config, Sequence("t4_id_seq", optional=True)
),
primary_key=True,
),
Column("foo", String(30), primary_key=True),
Column("bar", String(30), server_default="hi"),
implicit_returning=implicit_returning,
),
{"foo": "hi", "id": 1},
{"id": 1, "foo": "hi", "bar": "hi"},
)
@testing.requires.sequences
@testing.combinations(
(True, testing.requires.insert_returning),
(False,),
argnames="implicit_returning",
)
def test_lastrow_accessor_four_a(
self, metadata, connection, implicit_returning
):
self._test_lastrow_accessor(
connection,
Table(
"t4",
metadata,
Column(
"id",
Integer,
normalize_sequence(config, Sequence("t4_id_seq")),
primary_key=True,
),
Column("foo", String(30)),
implicit_returning=implicit_returning,
),
{"foo": "hi"},
{"id": 1, "foo": "hi"},
)
@testing.combinations(
(True, testing.requires.insert_returning),
(False,),
argnames="implicit_returning",
)
def test_lastrow_accessor_five(
self, metadata, connection, implicit_returning
):
self._test_lastrow_accessor(
connection,
Table(
"t5",
metadata,
Column("id", String(10), primary_key=True),
Column("bar", String(30), server_default="hi"),
implicit_returning=implicit_returning,
),
{"id": "id1"},
{"id": "id1", "bar": "hi"},
)
@testing.requires.supports_autoincrement_w_composite_pk
@testing.combinations(
(True, testing.requires.insert_returning),
(False,),
argnames="implicit_returning",
)
def test_lastrow_accessor_six(
self, metadata, connection, implicit_returning
):
self._test_lastrow_accessor(
connection,
Table(
"t6",
metadata,
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("bar", Integer, primary_key=True),
implicit_returning=implicit_returning,
),
{"bar": 0},
{"id": 1, "bar": 0},
)
# TODO: why not in the sqlite suite?
@testing.only_on("sqlite+pysqlite")
def test_lastrowid_zero(self, metadata, connection):
from sqlalchemy.dialects import sqlite
class ExcCtx(sqlite.base.SQLiteExecutionContext):
def get_lastrowid(self):
return 0
t = Table(
"t",
self.metadata,
Column("x", Integer, primary_key=True),
Column("y", Integer),
implicit_returning=False,
)
t.create(connection)
with mock.patch.object(
connection.dialect, "execution_ctx_cls", ExcCtx
):
r = connection.execute(t.insert().values(y=5))
eq_(r.inserted_primary_key, (0,))
@testing.requires.supports_autoincrement_w_composite_pk
def test_misordered_lastrow(self, connection, metadata):
related = Table(
"related",
metadata,
Column("id", Integer, primary_key=True),
mysql_engine="MyISAM",
mariadb_engine="MyISAM",
)
t6 = Table(
"t6",
metadata,
Column(
"manual_id",
Integer,
ForeignKey("related.id"),
primary_key=True,
),
Column(
"auto_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
mysql_engine="MyISAM",
mariadb_engine="MyISAM",
)
metadata.create_all(connection)
r = connection.execute(related.insert().values(id=12))
id_ = r.inserted_primary_key[0]
eq_(id_, 12)
r = connection.execute(t6.insert().values(manual_id=id_))
eq_(r.inserted_primary_key, (12, 1))
def test_implicit_id_insert_select_columns(self, connection):
users = self.tables.users
stmt = users.insert().from_select(
(users.c.user_id, users.c.user_name),
users.select().where(users.c.user_id == 20),
)
r = connection.execute(stmt)
eq_(r.inserted_primary_key, (None,))
def test_implicit_id_insert_select_keys(self, connection):
users = self.tables.users
stmt = users.insert().from_select(
["user_id", "user_name"],
users.select().where(users.c.user_id == 20),
)
r = connection.execute(stmt)
eq_(r.inserted_primary_key, (None,))
@testing.requires.empty_inserts
@testing.requires.insert_returning
def test_no_inserted_pk_on_returning(
self, connection, close_result_when_finished
):
users = self.tables.users
result = connection.execute(
users.insert().returning(users.c.user_id, users.c.user_name)
)
close_result_when_finished(result)
assert_raises_message(
exc.InvalidRequestError,
r"Can't call inserted_primary_key when returning\(\) is used.",
getattr,
result,
"inserted_primary_key",
)
| InsertExecTest |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/types.py | {
"start": 4982,
"end": 5718
} | class ____:
"""User-defined Airbyte source bound to actual created Airbyte source."""
def __init__(self, source: AirbyteSource, source_id: str, source_definition_id: Optional[str]):
self.source = source
self.source_id = source_id
self.source_definition_id = source_definition_id
@classmethod
def from_api_json(cls, api_json: Mapping[str, Any]):
return cls(
source=AirbyteSource(
name=api_json["name"],
source_type=api_json["sourceName"],
source_configuration=api_json["connectionConfiguration"],
),
source_id=api_json["sourceId"],
source_definition_id=None,
)
| InitializedAirbyteSource |
python | getsentry__sentry | tests/sentry/integrations/tasks/test_sync_status_outbound.py | {
"start": 989,
"end": 7409
} | class ____(TestCase):
def setUp(self) -> None:
self.example_integration = self.create_integration(
organization=self.group.organization,
external_id="123456",
provider="example",
oi_params={
"config": {
"sync_comments": True,
"sync_status_outbound": True,
"sync_status_inbound": True,
"sync_assignee_outbound": True,
"sync_assignee_inbound": True,
}
},
)
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@mock.patch.object(ExampleIntegration, "sync_status_outbound")
def test_successful_outbound_sync(
self, mock_sync_status: mock.MagicMock, mock_record_event: mock.MagicMock
) -> None:
external_issue: ExternalIssue = self.create_integration_external_issue(
group=self.group, key="foo_integration", integration=self.example_integration
)
sync_status_outbound(self.group.id, external_issue_id=external_issue.id)
mock_sync_status.assert_called_once_with(external_issue, False, self.group.project_id)
mock_record_event.assert_any_call(EventLifecycleOutcome.SUCCESS, None, False, None)
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@mock.patch.object(ExampleIntegration, "sync_status_outbound")
@mock.patch.object(ExampleIntegration, "should_sync")
def test_should_not_sync(
self,
mock_should_sync: mock.MagicMock,
mock_sync_status: mock.MagicMock,
mock_record_event: mock.MagicMock,
) -> None:
mock_should_sync.return_value = False
external_issue: ExternalIssue = self.create_integration_external_issue(
group=self.group, key="foo_integration", integration=self.example_integration
)
sync_status_outbound(self.group.id, external_issue_id=external_issue.id)
mock_sync_status.assert_not_called()
assert mock_record_event.call_count == 2
start, success = mock_record_event.mock_calls
assert start.args == (EventLifecycleOutcome.STARTED,)
assert success.args == (EventLifecycleOutcome.SUCCESS, None, False, None)
@mock.patch.object(ExampleIntegration, "sync_status_outbound")
def test_missing_external_issue(self, mock_sync_status: mock.MagicMock) -> None:
# This shouldn't be an issue, but just verify that there's no external
# issue with this ID
assert not ExternalIssue.objects.filter(id=5432).exists()
sync_status_outbound(self.group.id, external_issue_id=5432)
mock_sync_status.assert_not_called()
@mock.patch.object(ExampleIntegration, "sync_status_outbound")
def test_missing_integration(self, mock_sync_status: mock.MagicMock) -> None:
external_issue: ExternalIssue = self.create_integration_external_issue(
group=self.group, key="foo_integration", integration=self.example_integration
)
with assume_test_silo_mode_of(Integration):
Integration.objects.filter().delete()
assert ExternalIssue.objects.filter(id=external_issue.id).exists()
sync_status_outbound(self.group.id, external_issue_id=external_issue.id)
mock_sync_status.assert_not_called()
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_failure")
@mock.patch.object(ExampleIntegration, "sync_status_outbound")
def test_failed_sync(
self, mock_sync_status: mock.MagicMock, mock_record_failure: mock.MagicMock
) -> None:
mock_sync_status.side_effect = raise_exception
external_issue: ExternalIssue = self.create_integration_external_issue(
group=self.group, key="foo_integration", integration=self.example_integration
)
with pytest.raises(RetryTaskError):
sync_status_outbound(self.group.id, external_issue_id=external_issue.id)
assert mock_record_failure.call_count == 1
mock_record_event_args = mock_record_failure.call_args_list[0][0]
assert mock_record_event_args[0] is not None
metric_exception = mock_record_event_args[0]
assert isinstance(metric_exception, Exception)
assert metric_exception.args[0] == "Something went wrong"
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@mock.patch.object(ExampleIntegration, "sync_status_outbound")
def test_integration_form_error(
self, mock_sync_status: mock.MagicMock, mock_record: mock.MagicMock
) -> None:
mock_sync_status.side_effect = raise_integration_form_error
external_issue: ExternalIssue = self.create_integration_external_issue(
group=self.group, key="foo_integration", integration=self.example_integration
)
sync_status_outbound(self.group.id, external_issue_id=external_issue.id)
# SLOs SYNC_STATUS_OUTBOUND (halt)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=1
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.HALTED, outcome_count=1
)
assert_halt_metric(
mock_record=mock_record, error_msg=IntegrationFormError({"foo": "Invalid foo provided"})
)
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@mock.patch.object(ExampleIntegration, "sync_status_outbound")
def test_api_unauthorized_error_halts(
self, mock_sync_status: mock.MagicMock, mock_record: mock.MagicMock
) -> None:
mock_sync_status.side_effect = raise_api_unauthorized_error
external_issue: ExternalIssue = self.create_integration_external_issue(
group=self.group, key="foo_integration", integration=self.example_integration
)
sync_status_outbound(self.group.id, external_issue_id=external_issue.id)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=1
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.HALTED, outcome_count=1
)
assert_halt_metric(mock_record=mock_record, error_msg=ApiUnauthorized("auth failed"))
| TestSyncStatusOutbound |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/function7.py | {
"start": 191,
"end": 279
} | class ____(Protocol):
def write(self, a: str, b: str) -> object:
pass
| _Writer1 |
python | tensorflow__tensorflow | tensorflow/python/distribute/experimental/mirrored_strategy_test.py | {
"start": 22337,
"end": 30039
} | class ____(test_util.DTensorBaseTest):
def setUp(self):
super().setUp()
global_ids = test_util.create_device_ids_array((2,))
local_ids = np.ravel(global_ids).tolist()
mesh_dict = {
device: layout.Mesh(['batch'], global_ids, local_ids,
test_util.create_device_list((2,), device))
for device in ['TPU', 'GPU', 'CPU']
}
self.mesh = self.configTestMesh(mesh_dict)
self.images = stateless_random_ops.stateless_random_uniform(
[8, 8, 3], seed=(1, 2), minval=0, maxval=255)
self.labels = stateless_random_ops.stateless_random_uniform(
[1], seed=(1, 2), minval=0, maxval=10)
self.dataset = dataset_ops.Dataset.from_tensors(
(self.images, self.labels)).repeat()
def test_create_batched_dataset(self):
strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh)
global_batch_size = 8
dataset = self.dataset.batch(global_batch_size).prefetch(2)
distributed_dataset = strategy.experimental_distribute_dataset(dataset)
element = next(iter(distributed_dataset))
batched_image, batched_label = element
self.assertEqual(batched_image.shape, [global_batch_size, 8, 8, 3])
self.assertEqual(batched_label.shape, [global_batch_size, 1])
# Make sure when unpack the tensor, each of them has enough shards.
self.assertLen(d_api.unpack(batched_image), self.mesh.num_local_devices())
self.assertLen(d_api.unpack(batched_label), self.mesh.num_local_devices())
def test_uneven_batched_dataset(self):
elements = [[1, 2, 3], [1, 2], [1, 2, 3, 4]]
dataset = dataset_ops.Dataset.from_generator(
lambda: elements, dtypes.int64).repeat()
strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh)
with self.assertRaisesRegex(ValueError, 'requires a static batch size'):
strategy.experimental_distribute_dataset(dataset)
def test_create_partial_batched_dataset(self):
# TODO(b/210887657): Support last partial batch.
self.skipTest('Test failed due to last partial batch')
dataset = dataset_ops.Dataset.from_tensors(
(self.images, self.labels)).repeat(30) # There is a last partial batch
strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh)
global_batch_size = 8
dataset = dataset.batch(global_batch_size).prefetch(2)
distributed_dataset = strategy.experimental_distribute_dataset(dataset)
expected_element_batch_size = [8, 8, 8, 6]
# The last batch with 6 element will fail to produce with StopIteration.
iterator = iter(distributed_dataset)
for batch_size in expected_element_batch_size:
element = next(iterator)
batched_image, batched_label = element
self.assertEqual(batched_image.shape, [batch_size, 8, 8, 3])
self.assertEqual(batched_label.shape, [batch_size, 1])
# Make sure when unpack the tensor, each of them has enough shards.
self.assertLen(d_api.unpack(batched_image), self.mesh.num_local_devices())
self.assertLen(d_api.unpack(batched_label), self.mesh.num_local_devices())
def test_deprecated_strategy_methods(self):
strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh)
with self.assertRaisesRegex(
NotImplementedError, 'only available in the V1 API'):
strategy.make_dataset_iterator(self.dataset)
with self.assertRaisesRegex(
NotImplementedError, 'only available in the V1 API'):
strategy.make_input_fn_iterator(lambda _: self.dataset)
def test_distribute_dataset_from_fn(self):
local_batch_size = 4
global_batch_size = 8
def dataset_fn(option):
del option
return dataset_ops.Dataset.from_tensors(
(self.images, self.labels)).repeat().batch(
local_batch_size, drop_remainder=True).prefetch(2)
strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh)
distributed_dataset = strategy.distribute_datasets_from_function(
dataset_fn, None)
iterator = iter(distributed_dataset)
self.assertEqual(distributed_dataset.element_spec,
(tensor_spec.TensorSpec(shape=(8, 8, 8, 3),
dtype=dtypes.float32, name=None),
tensor_spec.TensorSpec(shape=(8, 1),
dtype=dtypes.float32, name=None)))
self.assertEqual(distributed_dataset.element_spec, iterator.element_spec)
batched_image, batched_label = next(iterator)
self.assertEqual(batched_image.shape, [global_batch_size, 8, 8, 3])
self.assertEqual(batched_label.shape, [global_batch_size, 1])
# Make sure there are two shards when unpack, and each of them has 4 as
# batch size
unpacked_images = d_api.unpack(batched_image)
self.assertLen(unpacked_images, self.mesh.num_local_devices())
self.assertEqual(unpacked_images[0].shape, [local_batch_size, 8, 8, 3])
self.assertEqual(unpacked_images[1].shape, [local_batch_size, 8, 8, 3])
def test_distribute_values_from_function(self):
array_value = np.array([3., 2., 1.])
def value_fn(ctx):
return array_value[ctx.replica_id_in_sync_group]
strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh)
distributed_values = (
strategy.experimental_distribute_values_from_function(
value_fn))
self.assertDTensorEqual(
constant_op.constant([3., 2.], dtype=dtypes.float64),
layout.Layout.batch_sharded(self.mesh, batch_dim='batch', rank=1),
distributed_values)
def test_distribute_values_from_function_with_nested_structure(self):
array_value = np.array([3., 2., 1.])
def value_fn(ctx):
value = array_value[ctx.replica_id_in_sync_group]
return {'a': value,
'b': constant_op.constant([value + 1.0, value + 2.0])}
strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh)
distributed_values = (
strategy.experimental_distribute_values_from_function(
value_fn))
self.assertIsInstance(distributed_values, dict)
self.assertDTensorEqual(
constant_op.constant([3., 2.], dtype=dtypes.float64),
layout.Layout.batch_sharded(self.mesh, batch_dim='batch', rank=1),
distributed_values['a'])
unpacked_a = d_api.unpack(distributed_values['a'])
# Note that this might have a slight behavior difference, the original
# mirrored strategy may return scalar for each PerReplica. The DTensor
# implementation is more strict and always ensures the PerReplica
# value has the same rank as the global-view Tensor.
self.assertAllClose(unpacked_a[0], [3.])
self.assertAllClose(unpacked_a[1], [2.])
self.assertDTensorEqual(
constant_op.constant([4., 5., 3., 4.], dtype=dtypes.float64),
layout.Layout.batch_sharded(self.mesh, batch_dim='batch', rank=1),
distributed_values['b'])
def test_distribute_dataset_in_tf_function(self):
strategy = mirrored_strategy.MirroredStrategy(mesh=self.mesh)
local_batch_size = 4
global_batch_size = 8
dataset = self.dataset.batch(global_batch_size).prefetch(2)
distributed_dataset = strategy.experimental_distribute_dataset(dataset)
@def_function.function
def step_fn(iterator):
images, labels = next(iterator)
del labels
return images
result = strategy.run(step_fn, args=(iter(distributed_dataset),))
self.assertIsInstance(result, dtensor_util.DTensorDistributedValue)
self.assertLen(result.values, self.mesh.num_local_devices())
self.assertEqual(result.values[0].shape, [local_batch_size, 8, 8, 3])
self.assertEqual(result.values[1].shape, [local_batch_size, 8, 8, 3])
if __name__ == '__main__':
test.main()
| StrategyDatasetTest |
python | huggingface__transformers | src/transformers/models/big_bird/modeling_big_bird.py | {
"start": 9379,
"end": 52566
} | class ____(nn.Module):
def __init__(self, config, seed=None):
super().__init__()
self.max_seqlen = config.max_position_embeddings
self.seed = seed
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)
self.num_attention_heads = config.num_attention_heads
self.num_random_blocks = config.num_random_blocks
self.block_size = config.block_size
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
def forward(
self,
hidden_states,
band_mask=None,
from_mask=None,
to_mask=None,
from_blocked_mask=None,
to_blocked_mask=None,
output_attentions=None,
):
# Currently this `class` can't be used in decoder.
batch_size, seqlen, _ = hidden_states.size()
to_seq_length = from_seq_length = seqlen
from_block_size = to_block_size = self.block_size
if from_seq_length % from_block_size != 0:
raise ValueError("Query sided sequence length must be multiple of block size")
if to_seq_length % to_block_size != 0:
raise ValueError("Key/Value sided sequence length must be multiple of block size")
query_layer = (
self.query(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
key_layer = (
self.key(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
value_layer = (
self.value(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
context_layer, attention_probs = self.bigbird_block_sparse_attention(
query_layer,
key_layer,
value_layer,
band_mask,
from_mask,
to_mask,
from_blocked_mask,
to_blocked_mask,
self.num_attention_heads,
self.num_random_blocks,
self.attention_head_size,
from_block_size,
to_block_size,
batch_size,
from_seq_length,
to_seq_length,
seed=self.seed,
plan_from_length=None,
plan_num_rand_blocks=None,
output_attentions=output_attentions,
)
context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1)
return context_layer, attention_probs
@staticmethod
def torch_bmm_nd(inp_1, inp_2, ndim=None):
"""Fast nd matrix multiplication"""
# faster replacement of torch.einsum ("bhqk,bhkd->bhqd")
return torch.bmm(inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:])).view(
inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 1])
)
@staticmethod
def torch_bmm_nd_transpose(inp_1, inp_2, ndim=None):
"""Fast nd matrix multiplication with transpose"""
# faster replacement of torch.einsum (bhqd,bhkd->bhqk)
return torch.bmm(
inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:]).transpose(1, 2)
).view(inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 2]))
def bigbird_block_sparse_attention(
self,
query_layer,
key_layer,
value_layer,
band_mask,
from_mask,
to_mask,
from_blocked_mask,
to_blocked_mask,
n_heads,
n_rand_blocks,
attention_head_size,
from_block_size,
to_block_size,
batch_size,
from_seq_len,
to_seq_len,
seed,
plan_from_length,
plan_num_rand_blocks,
output_attentions,
):
# BigBird block-sparse attention as suggested in paper
# ITC:
# global tokens: 2 x block_size
# window tokens: 3 x block_size
# random tokens: num_rand_tokens x block_size
# ETC:
# global tokens: extra_globals_tokens + 2 x block_size
# window tokens: 3 x block_size
# random tokens: num_rand_tokens x block_size
# Note:
# 1) Currently, ETC is not supported.
# 2) Window size is fixed to 3 blocks & it can be changed only by
# changing `block_size`.
# 3) Number of global blocks are fixed (2 blocks here) & global tokens can be
# controlled only by `block_size`.
# attention is calculated separately for q[0], q[1], q[2:-2], q[-2], q[-1] in order to use special trick of shifting tokens (for calculating sliding attention)
# hence following code can be divided into 5 parts.
if from_seq_len // from_block_size != to_seq_len // to_block_size:
raise ValueError("Error the number of blocks needs to be same!")
rsqrt_d = 1 / math.sqrt(attention_head_size)
bsz = batch_size
attn_mask_penalty = -10000.0
# generate random attention and corresponding masks
np.random.seed(seed)
if from_seq_len in [1024, 3072, 4096]: # old plans used in paper
rand_attn = [
self._bigbird_block_rand_mask(
self.max_seqlen, self.max_seqlen, from_block_size, to_block_size, n_rand_blocks, last_idx=1024
)[: (from_seq_len // from_block_size - 2)]
for _ in range(n_heads)
]
else:
if plan_from_length is None:
plan_from_length, plan_num_rand_blocks = self._get_rand_attn_plan(
from_seq_len, from_block_size, n_rand_blocks
)
rand_attn = self._bigbird_block_rand_mask_with_head(
from_seq_length=from_seq_len,
to_seq_length=to_seq_len,
from_block_size=from_block_size,
to_block_size=to_block_size,
num_heads=n_heads,
plan_from_length=plan_from_length,
plan_num_rand_blocks=plan_num_rand_blocks,
)
rand_attn = np.stack(rand_attn, axis=0)
rand_attn = torch.tensor(rand_attn, device=query_layer.device, dtype=torch.long)
rand_attn.unsqueeze_(0)
rand_attn = torch.cat([rand_attn for _ in range(batch_size)], dim=0)
rand_mask = self._create_rand_mask_from_inputs(
from_blocked_mask, to_blocked_mask, rand_attn, n_heads, n_rand_blocks, bsz, from_seq_len, from_block_size
)
blocked_query_matrix = query_layer.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, -1)
blocked_key_matrix = key_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1)
blocked_value_matrix = value_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1)
# preparing block for randn attn
gathered_key = self.torch_gather_b2(blocked_key_matrix, rand_attn)
gathered_key = gathered_key.view(
bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1
) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1]
gathered_value = self.torch_gather_b2(blocked_value_matrix, rand_attn)
gathered_value = gathered_value.view(
bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1
) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1]
# 1st PART
# 1st block (global block) attention scores
# q[0] x (k[0], k[1], k[2], k[3], k[4] .... )
# [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len]
first_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 0], key_layer, ndim=4)
first_product = first_product * rsqrt_d
first_product += (1.0 - to_mask) * attn_mask_penalty
first_attn_weights = nn.functional.softmax(
first_product, dim=-1
) # [bsz, n_heads, from_block_size, to_seq_len]
# [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1]
first_context_layer = self.torch_bmm_nd(first_attn_weights, value_layer, ndim=4)
first_context_layer.unsqueeze_(2)
# 2nd PART
# 2nd block attention scores
# q[1] x (sliding_keys, random_keys, global_keys)
# sliding key blocks -> 2nd, 3rd blocks
# global key blocks -> 1st block
second_key_mat = torch.cat(
[
blocked_key_matrix[:, :, 0],
blocked_key_matrix[:, :, 1],
blocked_key_matrix[:, :, 2],
blocked_key_matrix[:, :, -1],
gathered_key[:, :, 0],
],
dim=2,
) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1]
second_value_mat = torch.cat(
[
blocked_value_matrix[:, :, 0],
blocked_value_matrix[:, :, 1],
blocked_value_matrix[:, :, 2],
blocked_value_matrix[:, :, -1],
gathered_value[:, :, 0],
],
dim=2,
) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1]
# [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]
second_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 1], second_key_mat, ndim=4)
second_seq_pad = torch.cat(
[
to_mask[:, :, :, : 3 * to_block_size],
to_mask[:, :, :, -to_block_size:],
to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]),
],
dim=3,
)
second_rand_pad = torch.cat(
[
rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]),
rand_mask[:, :, 0],
],
dim=3,
)
second_product = second_product * rsqrt_d
second_product += (1.0 - torch.minimum(second_seq_pad, second_rand_pad)) * attn_mask_penalty
second_attn_weights = nn.functional.softmax(
second_product, dim=-1
) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]
# [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1]
second_context_layer = self.torch_bmm_nd(second_attn_weights, second_value_mat, ndim=4)
second_context_layer.unsqueeze_(2)
# 3rd PART
# Middle blocks attention scores
# q[-2:2] x (sliding_keys, random_keys, global_keys)
# sliding attn is calculated using special trick of shifting tokens as discussed in paper
# random keys are generated by taking random indices as per `rand_attn`
# global keys -> 1st & last block
exp_blocked_key_matrix = torch.cat(
[blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2], blocked_key_matrix[:, :, 3:-1]], dim=3
) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]
exp_blocked_value_matrix = torch.cat(
[blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2], blocked_value_matrix[:, :, 3:-1]],
dim=3,
) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]
middle_query_matrix = blocked_query_matrix[:, :, 2:-2]
# sliding attention scores for q[-2:2]
# [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [b, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]
inner_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, exp_blocked_key_matrix, ndim=5)
# ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, 3*to_block_size]
inner_band_product = inner_band_product * rsqrt_d
# randn attention scores for q[-2:2]
# [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1]
rand_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, gathered_key[:, :, 1:-1], ndim=5)
# ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size]
rand_band_product = rand_band_product * rsqrt_d
# Including 1st block (since it's global)
first_band_product = torch.einsum(
"bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, 0]
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size]
first_band_product = first_band_product * rsqrt_d
# Including last block (since it's global)
last_band_product = torch.einsum(
"bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, -1]
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size]
last_band_product = last_band_product * rsqrt_d
# masking padded tokens
inner_band_product += (1.0 - band_mask) * attn_mask_penalty
first_band_product += (1.0 - to_mask[:, :, :, :to_block_size].unsqueeze(3)) * attn_mask_penalty
last_band_product += (1.0 - to_mask[:, :, :, -to_block_size:].unsqueeze(3)) * attn_mask_penalty
rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * attn_mask_penalty
# completing attention scores matrix for all q[-2:2]
band_product = torch.cat(
[first_band_product, inner_band_product, rand_band_product, last_band_product], dim=-1
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size]
# safely doing softmax since attention matrix is completed
attn_weights = nn.functional.softmax(
band_product, dim=-1
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size]
# contribution of sliding keys
# [bsz, n_heads, m//from_block_size-4, from_block_size, 3*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]
context_layer = self.torch_bmm_nd(
attn_weights[:, :, :, :, to_block_size : 4 * to_block_size], exp_blocked_value_matrix, ndim=5
)
# ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]
# adding contribution of random keys
# [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1]
context_layer += self.torch_bmm_nd(
attn_weights[:, :, :, :, 4 * to_block_size : -to_block_size], gathered_value[:, :, 1:-1], ndim=5
)
# ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]
# adding contribution of global keys
context_layer += torch.einsum(
"bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, :to_block_size], blocked_value_matrix[:, :, 0]
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]
context_layer += torch.einsum(
"bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, -to_block_size:], blocked_value_matrix[:, :, -1]
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]
# 4th PART
# last 2nd token attention scores
# q[-2] x (sliding_keys, random_keys, global_keys)
# sliding key blocks -> last 3 blocks
# global key block -> 1st block
# random key block -> based on indices stored in `randn_attn`
second_last_key_mat = torch.cat(
[
blocked_key_matrix[:, :, 0],
blocked_key_matrix[:, :, -3],
blocked_key_matrix[:, :, -2],
blocked_key_matrix[:, :, -1],
gathered_key[:, :, -1],
],
dim=2,
) # [bsz, n_heads, (4+n_random_blocks)*to_block_size, -1]
second_last_value_mat = torch.cat(
[
blocked_value_matrix[:, :, 0],
blocked_value_matrix[:, :, -3],
blocked_value_matrix[:, :, -2],
blocked_value_matrix[:, :, -1],
gathered_value[:, :, -1],
],
dim=2,
) # [bsz, n_heads, (4+r)*to_block_size, -1]
# [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]
second_last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -2], second_last_key_mat, ndim=4)
second_last_seq_pad = torch.cat(
[
to_mask[:, :, :, :to_block_size],
to_mask[:, :, :, -3 * to_block_size :],
to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]),
],
dim=3,
)
second_last_rand_pad = torch.cat(
[
rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]),
rand_mask[:, :, -1],
],
dim=3,
)
second_last_product = second_last_product * rsqrt_d
second_last_product += (1.0 - torch.minimum(second_last_seq_pad, second_last_rand_pad)) * attn_mask_penalty
second_last_attn_weights = nn.functional.softmax(
second_last_product, dim=-1
) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]
# [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1]
second_last_context_layer = self.torch_bmm_nd(second_last_attn_weights, second_last_value_mat, ndim=4)
second_last_context_layer.unsqueeze_(2)
# 5th PART
# last block (global) attention scores
# q[-1] x (k[0], k[1], k[2], k[3], .... )
# [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len]
last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -1], key_layer, ndim=4)
last_product = last_product * rsqrt_d
last_product += (1.0 - to_mask) * attn_mask_penalty
last_attn_weights = nn.functional.softmax(last_product, dim=-1) # [bsz, n_heads, from_block_size, n]
# [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1]
last_context_layer = self.torch_bmm_nd(last_attn_weights, value_layer, ndim=4)
last_context_layer.unsqueeze_(2)
# combining representations of all tokens
context_layer = torch.cat(
[first_context_layer, second_context_layer, context_layer, second_last_context_layer, last_context_layer],
dim=2,
)
context_layer = context_layer.view((bsz, n_heads, from_seq_len, -1)) * from_mask
context_layer = torch.transpose(context_layer, 1, 2)
# this is just for visualizing; forward pass doesn't depend on following code
if output_attentions:
# TODO(PVP): need to verify if below code is correct
attention_probs = torch.zeros(
bsz, n_heads, from_seq_len, to_seq_len, dtype=torch.float, device=context_layer.device
)
# 1st query block
# corresponding to `first_context_layer`
attention_probs[:, :, :from_block_size, :] = first_attn_weights # all keys global
# 2nd query block
# corresponding to `second_context_layer`
attention_probs[:, :, from_block_size : 2 * from_block_size, : 3 * to_block_size] = second_attn_weights[
:, :, :, : 3 * to_block_size
] # 1st three key blocks (global + sliding)
attention_probs[:, :, from_block_size : 2 * from_block_size, -to_block_size:] = second_attn_weights[
:, :, :, 3 * to_block_size : 4 * to_block_size
] # last key block (global)
# random keys
for p1, i1, w1 in zip(range(bsz), rand_attn, second_attn_weights):
# p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch
for p2, i2, w2 in zip(range(n_heads), i1, w1):
# p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads
attn_probs_view = attention_probs.view(
bsz,
n_heads,
from_seq_len // from_block_size,
from_block_size,
to_seq_len // to_block_size,
to_block_size,
)
right_slice = w2[:, 4 * to_block_size :]
attn_probs_view[p1, p2, 1, :, i2[0]] = right_slice.view(
from_block_size, n_rand_blocks, to_block_size
)
# Middle query blocks
# corresponding to `context_layer`
# sliding keys
for q_idx in range(from_seq_len // from_block_size - 4):
attn_probs_view = attention_probs.view(
bsz,
n_heads,
from_seq_len // from_block_size,
from_block_size,
to_seq_len // to_block_size,
to_block_size,
)[:, :, 2:-2, :, 1:-1, :]
right_slice = attn_weights[:, :, q_idx, :, to_block_size : 4 * to_block_size]
attn_probs_view[:, :, q_idx, :, q_idx : q_idx + 3, :] = right_slice.view(
bsz, n_heads, from_block_size, 3, to_block_size
) # inner_band_product
# global keys (corresponding to 1st key block)
attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, :to_block_size] = attn_weights[
:, :, :, :, :to_block_size
].view(bsz, n_heads, -1, to_block_size) # first_band_product
# global keys (corresponding to last key block)
attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, -to_block_size:] = attn_weights[
:, :, :, :, -to_block_size:
].view(bsz, n_heads, -1, to_block_size) # last_band_product
# random keys
for p1, i1, w1 in zip(range(bsz), rand_attn, attn_weights):
# p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch
for p2, i2, w2 in zip(range(n_heads), i1, w1):
# p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads
for q_idx in range(1, len(i2) - 1):
attn_probs_view = attention_probs.view(
bsz,
n_heads,
from_seq_len // from_block_size,
from_block_size,
to_seq_len // to_block_size,
to_block_size,
)
right_slice = w2[q_idx - 1, :, 4 * to_block_size : -to_block_size]
attn_probs_view[p1, p2, q_idx + 1, :, i2[q_idx]] = right_slice.view(
from_block_size, n_rand_blocks, to_block_size
)
# Second-last query block
# corresponding to `second_last_context_layer`
attention_probs[:, :, -2 * from_block_size : -from_block_size, :to_block_size] = second_last_attn_weights[
:, :, :, :to_block_size
] # 1st key block (global)
attention_probs[:, :, -2 * from_block_size : -from_block_size, -3 * to_block_size :] = (
second_last_attn_weights[:, :, :, to_block_size : 4 * to_block_size]
) # last three blocks (global + sliding)
# random keys
for p1, i1, w1 in zip(range(bsz), rand_attn, second_last_attn_weights):
# p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch
for p2, i2, w2 in zip(range(n_heads), i1, w1):
# p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads
attn_probs_view = attention_probs.view(
bsz,
n_heads,
from_seq_len // from_block_size,
from_block_size,
to_seq_len // to_block_size,
to_block_size,
)
right_slice = w2[:, 4 * to_block_size :]
attn_probs_view[p1, p2, -2, :, i2[-1]] = right_slice.view(
from_block_size, n_rand_blocks, to_block_size
)
# last query block
# corresponding to `last_context_layer`
attention_probs[:, :, -from_block_size:, :] = last_attn_weights # all keys global
else:
attention_probs = None
return context_layer, attention_probs
@staticmethod
def torch_gather_b2(params, indices):
if params.shape[:2] != indices.shape[:2]:
raise ValueError(
"Make sure that the first two dimensions of params and indices are identical, but"
f" they are params: {params.shape[:2]} vs. indices: {indices.shape[:2]}"
)
num_indices_to_gather = indices.shape[-2] * indices.shape[-1]
num_indices_to_pick_from = params.shape[2]
shift = torch.arange(indices.shape[0] * indices.shape[1] * num_indices_to_gather, device=indices.device)
indices_shift = torch.div(shift, num_indices_to_gather, rounding_mode="floor") * num_indices_to_pick_from
flattened_indices = indices.view(-1) + indices_shift
flattened_params = params.reshape(-1, params.shape[-2], params.shape[-1])
out_flattened = flattened_params.index_select(0, flattened_indices)
out = out_flattened.reshape(params.shape[:2] + (num_indices_to_gather,) + params.shape[3:])
return out
@staticmethod
def _create_rand_mask_from_inputs(
from_blocked_mask,
to_blocked_mask,
rand_attn,
num_attention_heads,
num_rand_blocks,
batch_size,
from_seq_length,
from_block_size,
):
"""
Create 3D attention mask from a 2D tensor mask.
Args:
from_blocked_mask: 2D Tensor of shape [batch_size,
from_seq_length//from_block_size, from_block_size].
to_blocked_mask: int32 Tensor of shape [batch_size,
to_seq_length//to_block_size, to_block_size].
rand_attn: [batch_size, num_attention_heads,
from_seq_length//from_block_size-2, num_rand_blocks]
num_attention_heads: int. Number of attention heads.
num_rand_blocks: int. Number of random chunks per row.
batch_size: int. Batch size for computation.
from_seq_length: int. length of from sequence.
from_block_size: int. size of block in from sequence.
Returns:
float Tensor of shape [batch_size, num_attention_heads, from_seq_length//from_block_size-2,
from_block_size, num_rand_blocks*to_block_size].
"""
num_windows = from_seq_length // from_block_size - 2
rand_mask = torch.stack([p1[i1.flatten()] for p1, i1 in zip(to_blocked_mask, rand_attn)])
rand_mask = rand_mask.view(batch_size, num_attention_heads, num_windows, num_rand_blocks * from_block_size)
rand_mask = torch.einsum("blq,bhlk->bhlqk", from_blocked_mask[:, 1:-1], rand_mask)
return rand_mask
@staticmethod
def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks):
"""
Gives the plan of where to put random attention.
Args:
from_seq_length: int. length of from sequence.
from_block_size: int. size of block in from sequence.
num_rand_blocks: int. Number of random chunks per row.
Returns:
plan_from_length: ending location of from block plan_num_rand_blocks: number of random ending location for
each block
"""
plan_from_length = []
plan_num_rand_blocks = []
if (2 * num_rand_blocks + 5) < (from_seq_length // from_block_size):
plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size))
plan_num_rand_blocks.append(num_rand_blocks)
plan_from_length.append(from_seq_length)
plan_num_rand_blocks.append(0)
elif (num_rand_blocks + 5) < (from_seq_length // from_block_size):
plan_from_length.append(int((num_rand_blocks + 5) * from_block_size))
plan_num_rand_blocks.append(num_rand_blocks // 2)
plan_from_length.append(from_seq_length)
plan_num_rand_blocks.append(num_rand_blocks - (num_rand_blocks // 2))
else:
plan_from_length.append(from_seq_length)
plan_num_rand_blocks.append(num_rand_blocks)
return plan_from_length, plan_num_rand_blocks
def _bigbird_block_rand_mask(
self, from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, last_idx=-1
):
"""
Create adjacency list of random attention.
Args:
from_seq_length: int. length of from sequence.
to_seq_length: int. length of to sequence.
from_block_size: int. size of block in from sequence.
to_block_size: int. size of block in to sequence.
num_rand_blocks: int. Number of random chunks per row.
last_idx: if -1 then num_rand_blocks blocks chosen anywhere in to sequence,
if positive then num_rand_blocks blocks chosen only up to last_idx.
Returns:
adjacency list of size from_seq_length//from_block_size-2 by num_rand_blocks
"""
# using this method when from_seq_length in [1024, 3072, 4096]
if from_seq_length // from_block_size != to_seq_length // to_block_size:
raise ValueError("Error the number of blocks needs to be same!")
rand_attn = np.zeros((from_seq_length // from_block_size - 2, num_rand_blocks), dtype=np.int32)
# During inference (eval) no randomness
if not self.training:
return rand_attn
middle_seq = np.arange(1, to_seq_length // to_block_size - 1, dtype=np.int32)
last = to_seq_length // to_block_size - 1
if last_idx > (2 * to_block_size):
last = (last_idx // to_block_size) - 1
r = num_rand_blocks # shorthand
for i in range(1, from_seq_length // from_block_size - 1):
start = i - 2
end = i
if i == 1:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r]
elif i == 2:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r]
elif i == from_seq_length // from_block_size - 3:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]
# Missing -3: should have been sliced till last-3
elif i == from_seq_length // from_block_size - 2:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]
# Missing -4: should have been sliced till last-4
else:
if start > last:
start = last
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]
elif (end + 1) == last:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]
else:
rand_attn[i - 1, :] = np.random.permutation(
np.concatenate((middle_seq[:start], middle_seq[end + 1 : last]))
)[:r]
return rand_attn
def _bigbird_block_rand_mask_with_head(
self,
from_seq_length,
to_seq_length,
from_block_size,
to_block_size,
num_heads,
plan_from_length,
plan_num_rand_blocks,
window_block_left=1,
window_block_right=1,
global_block_top=1,
global_block_bottom=1,
global_block_left=1,
global_block_right=1,
):
"""
Create adjacency list of random attention.
Args:
from_seq_length: int. length of from sequence.
to_seq_length: int. length of to sequence.
from_block_size: int. size of block in from sequence.
to_block_size: int. size of block in to sequence.
num_heads: int. total number of heads.
plan_from_length: list. plan from length where num_random_blocks are chosen from.
plan_num_rand_blocks: list. number of rand blocks within the plan.
window_block_left: int. number of blocks of window to left of a block.
window_block_right: int. number of blocks of window to right of a block.
global_block_top: int. number of blocks at the top.
global_block_bottom: int. number of blocks at the bottom.
global_block_left: int. Number of blocks globally used to the left.
global_block_right: int. Number of blocks globally used to the right.
Returns:
adjacency list of size num_head where each element is of size from_seq_length//from_block_size-2 by
num_rand_blocks
"""
# using this method when from_seq_length not in [1024, 3072, 4096]
if from_seq_length // from_block_size != to_seq_length // to_block_size:
raise ValueError("Error the number of blocks needs to be same!")
if from_seq_length not in plan_from_length:
raise ValueError("Error from sequence length not in plan!")
# Total number of blocks in the mmask
num_blocks = from_seq_length // from_block_size
# Number of blocks per plan
plan_block_length = np.array(plan_from_length) // from_block_size
# till when to follow plan
max_plan_idx = plan_from_length.index(from_seq_length)
# Random Attention adjacency list
rand_attn = [
np.zeros((num_blocks, np.sum(plan_num_rand_blocks[: max_plan_idx + 1])), dtype=np.int32)
for i in range(num_heads)
]
# During inference (eval) no randomness
if not self.training:
for nh in range(num_heads):
rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :]
return rand_attn
# We will go iteratively over the plan blocks and pick random number of
# Attention blocks from the legally allowed blocks
for plan_idx in range(max_plan_idx + 1):
rnd_r_cnt = 0
if plan_idx > 0:
# set the row for all from_blocks starting from 0 to
# plan_block_length[plan_idx-1]
# column indx start from plan_block_length[plan_idx-1] and ends at
# plan_block_length[plan_idx]
if plan_num_rand_blocks[plan_idx] > 0:
rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx]))
curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1]))
for blk_rw_idx in range(global_block_top, plan_block_length[plan_idx - 1]):
for h in range(num_heads):
rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(
block_id=blk_rw_idx,
to_start_block_id=plan_block_length[plan_idx - 1],
to_end_block_id=plan_block_length[plan_idx],
num_rand_blocks=plan_num_rand_blocks[plan_idx],
window_block_left=window_block_left,
window_block_right=window_block_right,
global_block_left=global_block_left,
global_block_right=global_block_right,
)
for pl_id in range(plan_idx):
if plan_num_rand_blocks[pl_id] == 0:
continue
for blk_rw_idx in range(plan_block_length[plan_idx - 1], plan_block_length[plan_idx]):
rnd_r_cnt = 0
to_start_block_id = 0
if pl_id > 0:
rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id]))
to_start_block_id = plan_block_length[pl_id - 1]
curr_r_cnt = int(np.sum(plan_num_rand_blocks[: pl_id + 1]))
for h in range(num_heads):
rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(
block_id=blk_rw_idx,
to_start_block_id=to_start_block_id,
to_end_block_id=plan_block_length[pl_id],
num_rand_blocks=plan_num_rand_blocks[pl_id],
window_block_left=window_block_left,
window_block_right=window_block_right,
global_block_left=global_block_left,
global_block_right=global_block_right,
)
if plan_num_rand_blocks[plan_idx] == 0:
continue
curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1]))
from_start_block_id = global_block_top
to_start_block_id = 0
if plan_idx > 0:
rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx]))
from_start_block_id = plan_block_length[plan_idx - 1]
to_start_block_id = plan_block_length[plan_idx - 1]
for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]):
for h in range(num_heads):
rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(
block_id=blk_rw_idx,
to_start_block_id=to_start_block_id,
to_end_block_id=plan_block_length[plan_idx],
num_rand_blocks=plan_num_rand_blocks[plan_idx],
window_block_left=window_block_left,
window_block_right=window_block_right,
global_block_left=global_block_left,
global_block_right=global_block_right,
)
for nh in range(num_heads):
rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :]
return rand_attn
@staticmethod
def _get_single_block_row_attention(
block_id,
to_start_block_id,
to_end_block_id,
num_rand_blocks,
window_block_left=1,
window_block_right=1,
global_block_left=1,
global_block_right=1,
):
"""
For a single row block get random row attention.
Args:
block_id: int. block id of row.
to_start_block_id: int. random attention column start id.
to_end_block_id: int. random attention column end id.
num_rand_blocks: int. number of random blocks to be selected.
window_block_left: int. number of blocks of window to left of a block.
window_block_right: int. number of blocks of window to right of a block.
global_block_left: int. Number of blocks globally used to the left.
global_block_right: int. Number of blocks globally used to the right.
Returns:
row containing the random attention vector of size num_rand_blocks.
"""
# list of to_blocks from which to choose random attention
to_block_list = np.arange(to_start_block_id, to_end_block_id, dtype=np.int32)
# permute the blocks
perm_block = np.random.permutation(to_block_list)
# illegal blocks for the current block id, using window
illegal_blocks = list(range(block_id - window_block_left, block_id + window_block_right + 1))
# Add blocks at the start and at the end
illegal_blocks.extend(list(range(global_block_left)))
illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id)))
# The second from_block cannot choose random attention on second last to_block
if block_id == 1:
illegal_blocks.append(to_end_block_id - 2)
# The second last from_block cannot choose random attention on second to_block
if block_id == to_end_block_id - 2:
illegal_blocks.append(1)
selected_random_blocks = []
for i in range(to_end_block_id - to_start_block_id):
if perm_block[i] not in illegal_blocks:
selected_random_blocks.append(perm_block[i])
if len(selected_random_blocks) == num_rand_blocks:
break
return np.array(selected_random_blocks, dtype=np.int32)
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BigBird
| BigBirdBlockSparseAttention |
python | ray-project__ray | rllib/offline/tests/test_dataset_reader.py | {
"start": 320,
"end": 5445
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
# TODO(Kourosh): Hitting S3 in CI is currently broken due to some AWS
# credentials issues, using a local file instead for now.
# cls.dset_path = "s3://air-example-data/rllib/cartpole/large.json"
cls.dset_path = "offline/tests/data/pendulum/large.json"
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_dataset_reader_itr_batches(self):
"""Test that the dataset reader iterates over batches of rows correctly."""
input_config = {"format": "json", "paths": self.dset_path}
dataset, _ = get_dataset_and_shards(
AlgorithmConfig().offline_data(input_="dataset", input_config=input_config)
)
ioctx = IOContext(
config=(
AlgorithmConfig()
.training(train_batch_size=1200)
.offline_data(actions_in_input_normalized=True)
),
worker_index=0,
)
reader = DatasetReader(dataset, ioctx)
assert len(reader.next()) >= 1200
def test_dataset_shard_with_only_local(self):
"""Tests whether the dataset_shard function works correctly for a single shard
for the local worker."""
config = AlgorithmConfig().offline_data(
input_="dataset", input_config={"format": "json", "paths": self.dset_path}
)
# two ways of doing this:
# we have no remote workers
_, shards = get_dataset_and_shards(config, num_workers=0)
assert len(shards) == 1
assert isinstance(shards[0], ray.data.Dataset)
def test_dataset_shard_remote_workers_with_local_worker(self):
"""Tests whether the dataset_shard function works correctly for the remote
workers with a dummy dataset shard for the local worker."""
config = AlgorithmConfig().offline_data(
input_="dataset", input_config={"format": "json", "paths": self.dset_path}
)
NUM_WORKERS = 4
_, shards = get_dataset_and_shards(config, num_workers=NUM_WORKERS)
assert len(shards) == NUM_WORKERS + 1
assert shards[0] is None
assert all(
isinstance(remote_shard, ray.data.Dataset) for remote_shard in shards[1:]
)
def test_dataset_shard_with_task_parallelization(self):
"""Tests whether the dataset_shard function works correctly with parallelism
for reading the dataset."""
config = (
AlgorithmConfig()
.offline_data(
input_="dataset",
input_config={
"format": "json",
"paths": self.dset_path,
},
)
.env_runners(num_env_runners=10)
)
NUM_WORKERS = 4
_, shards = get_dataset_and_shards(config, num_workers=NUM_WORKERS)
assert len(shards) == NUM_WORKERS + 1
assert shards[0] is None
assert all(
isinstance(remote_shard, ray.data.Dataset) for remote_shard in shards[1:]
)
def test_dataset_shard_with_loader_fn(self):
"""Tests whether the dataset_shard function works correctly with loader_fn."""
dset = ray.data.range(100)
config = AlgorithmConfig().offline_data(
input_="dataset", input_config={"loader_fn": lambda: dset}
)
ret_dataset, _ = get_dataset_and_shards(config)
assert ret_dataset.count() == dset.count()
def test_dataset_shard_error_with_unsupported_dataset_format(self):
"""Tests whether the dataset_shard function raises an error when an unsupported
dataset format is specified."""
config = AlgorithmConfig().offline_data(
input_="dataset",
input_config={
"format": "__UNSUPPORTED_FORMAT__",
"paths": self.dset_path,
},
)
with self.assertRaises(ValueError):
get_dataset_and_shards(config)
def test_dataset_shard_error_with_both_format_and_loader_fn(self):
"""Tests whether the dataset_shard function raises an error when both format
and loader_fn are specified."""
dset = ray.data.range(100)
config = AlgorithmConfig().offline_data(
input_="dataset",
input_config={
"format": "json",
"paths": self.dset_path,
"loader_fn": lambda: dset,
},
)
with self.assertRaises(ValueError):
get_dataset_and_shards(config)
def test_default_ioctx(self):
# Test DatasetReader without passing in IOContext
input_config = {"format": "json", "paths": self.dset_path}
config = AlgorithmConfig().offline_data(
input_="dataset", input_config=input_config
)
dataset, _ = get_dataset_and_shards(config)
reader = DatasetReader(dataset)
# Reads in one line of Pendulum dataset with 600 timesteps
assert len(reader.next()) == 600
| TestDatasetReader |
python | fastai__fastai | fastai/data/core.py | {
"start": 14815,
"end": 19553
} | class ____(FilteredBase, L, GetAttr):
"A `Pipeline` of `tfms` applied to a collection of `items`"
_default='tfms'
def __init__(self,
items:list, # Items to apply `Transform`s to
tfms:MutableSequence|Pipeline, # `Transform`(s) or `Pipeline` to apply
use_list:bool=None, # Use `list` in `L`
do_setup:bool=True, # Call `setup()` for `Transform`
split_idx:int=None, # Apply `Transform`(s) to training or validation set. `0` for training set and `1` for validation set
train_setup:bool=True, # Apply `Transform`(s) only on training `DataLoader`
splits:list=None, # Indices for training and validation sets
types=None, # Types of data in `items`
verbose:bool=False, # Print verbose output
dl_type:TfmdDL=None # Type of `DataLoader`
):
super().__init__(items, use_list=use_list)
if dl_type is not None: self._dl_type = dl_type
self.splits = L([slice(None),[]] if splits is None else splits).map(mask2idxs)
if isinstance(tfms,TfmdLists): tfms = tfms.tfms
if isinstance(tfms,Pipeline): do_setup=False
self.tfms = Pipeline(tfms, split_idx=split_idx)
store_attr('types,split_idx')
if do_setup:
pv(f"Setting up {self.tfms}", verbose)
self.setup(train_setup=train_setup)
def _new(self, items, split_idx=None, **kwargs):
split_idx = ifnone(split_idx,self.split_idx)
try: return super()._new(items, tfms=self.tfms, do_setup=False, types=self.types, split_idx=split_idx, **kwargs)
except IndexError as e:
e.args = [f"Tried to grab subset {i} in the Dataset, but it contained no items.\n\t{e.args[0]}"]
raise
def subset(self, i): return self._new(self._get(self.splits[i]), split_idx=i)
def _after_item(self, o): return self.tfms(o)
def __repr__(self): return f"{self.__class__.__name__}: {self.items}\ntfms - {self.tfms.fs}"
def __iter__(self): return (self[i] for i in range(len(self)))
def show(self, o, **kwargs): return self.tfms.show(o, **kwargs)
def decode(self, o, **kwargs): return self.tfms.decode(o, **kwargs)
def __call__(self, o, **kwargs): return self.tfms.__call__(o, **kwargs)
def overlapping_splits(self): return L(Counter(self.splits.concat()).values()).filter(gt(1))
def new_empty(self): return self._new([])
def setup(self,
train_setup:bool=True # Apply `Transform`(s) only on training `DataLoader`
):
self.tfms.setup(self, train_setup)
if len(self) != 0:
x = super().__getitem__(0) if self.splits is None else super().__getitem__(self.splits[0])[0]
self.types = []
for f in self.tfms.fs:
self.types.append(getattr(f, 'input_types', type(x)))
x = f(x)
self.types.append(type(x))
types = L(t if is_listy(t) else [t] for t in self.types).concat().unique()
self.pretty_types = '\n'.join([f' - {t}' for t in types])
def infer_idx(self, x):
# TODO: check if we really need this, or can simplify
idx = 0
for t in self.types:
if isinstance(x, t): break
idx += 1
types = L(t if is_listy(t) else [t] for t in self.types).concat().unique()
pretty_types = '\n'.join([f' - {t}' for t in types])
assert idx < len(self.types), f"Expected an input of type in \n{pretty_types}\n but got {type(x)}"
return idx
def infer(self, x):
return compose_tfms(x, tfms=self.tfms.fs[self.infer_idx(x):], split_idx=self.split_idx)
def __getitem__(self, idx):
res = super().__getitem__(idx)
if self._after_item is None: return res
return self._after_item(res) if is_indexer(idx) else res.map(self._after_item)
# %% ../../nbs/03_data.core.ipynb 54
add_docs(TfmdLists,
setup="Transform setup with self",
decode="From `Pipeline`",
show="From `Pipeline`",
overlapping_splits="All splits that are in more than one split",
subset="New `TfmdLists` with same tfms that only includes items in `i`th split",
infer_idx="Finds the index where `self.tfms` can be applied to `x`, depending on the type of `x`",
infer="Apply `self.tfms` to `x` starting at the right tfm depending on the type of `x`",
new_empty="A new version of `self` but with no items")
# %% ../../nbs/03_data.core.ipynb 55
def decode_at(o, idx):
"Decoded item at `idx`"
return o.decode(o[idx])
# %% ../../nbs/03_data.core.ipynb 56
def show_at(o, idx, **kwargs):
"Show item at `idx`",
return o.show(o[idx], **kwargs)
# %% ../../nbs/03_data.core.ipynb 74
@docs
@delegates(TfmdLists)
| TfmdLists |
python | apache__airflow | providers/databricks/tests/unit/databricks/utils/test_mixins.py | {
"start": 2184,
"end": 4952
} | class ____:
"""
We'll provide tests for each of the following methods:
- _handle_execution
- _handle_deferrable_execution
- execute_complete
- on_kill
"""
def test_handle_execution_success(self, databricks_sql_statements, terminal_success_state):
# Test an immediate success of the SQL statement
databricks_sql_statements._hook.get_sql_statement_state.return_value = terminal_success_state
databricks_sql_statements._handle_execution()
databricks_sql_statements._hook.cancel_sql_statement.assert_not_called()
def test_handle_execution_failure(self, databricks_sql_statements, terminal_failure_state):
# Test an immediate failure of the SQL statement
databricks_sql_statements._hook.get_sql_statement_state.return_value = terminal_failure_state
with pytest.raises(AirflowException):
databricks_sql_statements._handle_execution()
databricks_sql_statements._hook.cancel_sql_statement.assert_not_called()
def test_handle_deferrable_execution_running(self, databricks_sql_statements):
terminal_running_state = MagicMock()
terminal_running_state.is_terminal = False
# Test an immediate success of the SQL statement
databricks_sql_statements._hook.get_sql_statement_state.return_value = terminal_running_state
databricks_sql_statements._handle_deferrable_execution()
databricks_sql_statements.defer.assert_called_once()
def test_handle_deferrable_execution_success(self, databricks_sql_statements, terminal_success_state):
# Test an immediate success of the SQL statement
databricks_sql_statements._hook.get_sql_statement_state.return_value = terminal_success_state
databricks_sql_statements._handle_deferrable_execution()
databricks_sql_statements.defer.assert_not_called()
def test_handle_deferrable_execution_failure(self, databricks_sql_statements, terminal_failure_state):
# Test an immediate failure of the SQL statement
databricks_sql_statements._hook.get_sql_statement_state.return_value = terminal_failure_state
with pytest.raises(AirflowException):
databricks_sql_statements._handle_deferrable_execution()
def test_execute_complete(self):
# Both the TestDatabricksSQLStatementsOperator and TestDatabricksSQLStatementsSensor tests implement
# a test_execute_complete_failure and test_execute_complete_success method, so we'll pass here
pass
def test_on_kill(self):
# This test is implemented in both the TestDatabricksSQLStatementsOperator and
# TestDatabricksSQLStatementsSensor tests, so it will not be implemented here
pass
| TestDatabricksSQLStatementsMixin |
python | pytorch__pytorch | torch/_dynamo/aot_compile.py | {
"start": 9995,
"end": 10427
} | class ____:
"""
WIP type: represents a single model input
Which consists of a tuple of arguments and a set of contexts in which to run the model.
For each ModelInput, we'll compile one full graph of the model, and then use the guards generated
to dispatch between the compiled graphs.
"""
args: tuple[Any]
kwargs: dict[str, Any]
contexts: list[AbstractContextManager[Any]]
@dataclass
| ModelInput |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/reconciliation.py | {
"start": 24412,
"end": 26938
} | class ____(ManagedElementReconciler):
"""Reconciles Python-specified Airbyte connections with an Airbyte instance.
Passing the module containing an AirbyteManagedElementReconciler to the dagster-airbyte
CLI will allow you to check the state of your Python-code-specified Airbyte connections
against an Airbyte instance, and reconcile them if necessary.
This functionality is in beta and subject to change.
"""
@public
def __init__(
self,
airbyte: Union[AirbyteResource, ResourceDefinition],
connections: Iterable[AirbyteConnection],
delete_unmentioned_resources: bool = False,
):
"""Reconciles Python-specified Airbyte connections with an Airbyte instance.
Args:
airbyte (Union[AirbyteResource, ResourceDefinition]): The Airbyte resource definition to reconcile against.
connections (Iterable[AirbyteConnection]): The Airbyte connection objects to reconcile.
delete_unmentioned_resources (bool): Whether to delete resources that are not mentioned in
the set of connections provided. When True, all Airbyte instance contents are effectively
managed by the reconciler. Defaults to False.
"""
# airbyte = check.inst_param(airbyte, "airbyte", ResourceDefinition)
self._airbyte_instance: AirbyteResource = (
airbyte
if isinstance(airbyte, AirbyteResource)
else airbyte(build_init_resource_context())
)
self._connections = list(
check.iterable_param(connections, "connections", of_type=AirbyteConnection)
)
self._delete_unmentioned_resources = check.bool_param(
delete_unmentioned_resources, "delete_unmentioned_resources"
)
super().__init__()
def check(self, **kwargs) -> ManagedElementCheckResult:
return reconcile_config(
self._airbyte_instance,
self._connections,
dry_run=True,
should_delete=self._delete_unmentioned_resources,
ignore_secrets=(not kwargs.get("include_all_secrets", False)),
)
def apply(self, **kwargs) -> ManagedElementCheckResult:
return reconcile_config(
self._airbyte_instance,
self._connections,
dry_run=False,
should_delete=self._delete_unmentioned_resources,
ignore_secrets=(not kwargs.get("include_all_secrets", False)),
)
| AirbyteManagedElementReconciler |
python | pypa__pip | src/pip/_vendor/rich/console.py | {
"start": 8391,
"end": 8475
} | class ____(Exception):
"""An error in the Capture context manager."""
| CaptureError |
python | sqlalchemy__sqlalchemy | test/orm/test_options.py | {
"start": 50329,
"end": 63894
} | class ____(_fixtures.FixtureTest):
def test_synonym_options(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="select",
order_by=addresses.c.id,
),
adlist=synonym("addresses"),
),
)
def go():
sess = fixture_session()
u = (
sess.query(User)
.order_by(User.id)
.options(sa.orm.joinedload(User.adlist))
.filter_by(name="jack")
).one()
eq_(u.adlist, [self.static.user_address_result[0].addresses[0]])
self.assert_sql_count(testing.db, go, 1)
def test_eager_options(self):
"""A lazy relationship can be upgraded to an eager relationship."""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
self.mapper_registry.map_imperatively(Address, addresses),
order_by=addresses.c.id,
)
),
)
sess = fixture_session()
result = (
sess.query(User)
.order_by(User.id)
.options(sa.orm.joinedload(User.addresses))
).all()
def go():
eq_(result, self.static.user_address_result)
self.sql_count_(0, go)
def test_eager_options_with_limit(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="select",
)
),
)
sess = fixture_session()
u = (
sess.query(User)
.options(sa.orm.joinedload(User.addresses))
.filter_by(id=8)
).one()
def go():
eq_(u.id, 8)
eq_(len(u.addresses), 3)
self.sql_count_(0, go)
sess.expunge_all()
u = sess.query(User).filter_by(id=8).one()
eq_(u.id, 8)
eq_(len(u.addresses), 3)
def test_lazy_options_with_limit(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="joined",
)
),
)
sess = fixture_session()
u = (
sess.query(User)
.options(sa.orm.lazyload(User.addresses))
.filter_by(id=8)
).one()
def go():
eq_(u.id, 8)
eq_(len(u.addresses), 3)
self.sql_count_(1, go)
def test_eager_degrade(self):
"""An eager relationship automatically degrades to a lazy relationship
if eager columns are not available"""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="joined",
order_by=addresses.c.id,
)
),
)
sess = fixture_session()
# first test straight eager load, 1 statement
def go():
result = sess.query(User).order_by(User.id).all()
eq_(result, self.static.user_address_result)
self.sql_count_(1, go)
sess.expunge_all()
# then select just from users. run it into instances.
# then assert the data, which will launch 3 more lazy loads
# (previous users in session fell out of scope and were removed from
# session's identity map)
r = sess.connection().execute(users.select().order_by(users.c.id))
ctx = sess.query(User)._compile_context()
def go():
result = loading.instances(r, ctx).scalars().unique()
result = list(result)
eq_(result, self.static.user_address_result)
self.sql_count_(4, go)
def test_eager_degrade_deep(self):
(
users,
Keyword,
items,
order_items,
orders,
Item,
User,
Address,
keywords,
item_keywords,
Order,
addresses,
) = (
self.tables.users,
self.classes.Keyword,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.tables.keywords,
self.tables.item_keywords,
self.classes.Order,
self.tables.addresses,
)
# test with a deeper set of eager loads. when we first load the three
# users, they will have no addresses or orders. the number of lazy
# loads when traversing the whole thing will be three for the
# addresses and three for the orders.
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(Keyword, keywords)
self.mapper_registry.map_imperatively(
Item,
items,
properties=dict(
keywords=relationship(
Keyword,
secondary=item_keywords,
lazy="joined",
order_by=item_keywords.c.keyword_id,
)
),
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties=dict(
items=relationship(
Item,
secondary=order_items,
lazy="joined",
order_by=order_items.c.item_id,
)
),
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
Address, lazy="joined", order_by=addresses.c.id
),
orders=relationship(
Order, lazy="joined", order_by=orders.c.id
),
),
)
sess = fixture_session()
# first test straight eager load, 1 statement
def go():
result = sess.query(User).order_by(User.id).all()
eq_(result, self.static.user_all_result)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
# then select just from users. run it into instances.
# then assert the data, which will launch 6 more lazy loads
r = sess.connection().execute(users.select())
ctx = sess.query(User)._compile_context()
def go():
result = loading.instances(r, ctx).scalars().unique()
result = list(result)
eq_(result, self.static.user_all_result)
self.assert_sql_count(testing.db, go, 6)
def test_lazy_options(self):
"""An eager relationship can be upgraded to a lazy relationship."""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="joined",
)
),
)
sess = fixture_session()
result = (
sess.query(User)
.order_by(User.id)
.options(sa.orm.lazyload(User.addresses))
).all()
def go():
eq_(result, self.static.user_address_result)
self.sql_count_(4, go)
def test_option_propagate(self):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders,
)
self.mapper_registry.map_imperatively(
User, users, properties=dict(orders=relationship(Order))
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties=dict(items=relationship(Item, secondary=order_items)),
)
self.mapper_registry.map_imperatively(Item, items)
sess = fixture_session()
oalias = aliased(Order)
# this one is *really weird*
# here's what the test originally had. note two different strategies
# for Order.items
#
# opt1 = sa.orm.joinedload(User.orders, Order.items)
# opt2 = sa.orm.contains_eager(User.orders, Order.items, alias=oalias)
# here's how it would translate. note that the second
# contains_eager() for Order.items just got cancelled out,
# I guess the joinedload() would somehow overrule the contains_eager
#
# opt1 = Load(User).defaultload(User.orders).joinedload(Order.items)
# opt2 = Load(User).contains_eager(User.orders, alias=oalias)
# setting up the options more specifically works however with
# both the old way and the new way
opt1 = sa.orm.joinedload(User.orders, Order.items)
opt2 = sa.orm.contains_eager(User.orders, alias=oalias)
u1 = (
sess.query(User)
.join(oalias, User.orders)
.options(opt1, opt2)
.first()
)
ustate = attributes.instance_state(u1)
assert opt1 in ustate.load_options
assert opt2 not in ustate.load_options
@testing.combinations(
(
lambda User, Order: (
joinedload(User.orders),
contains_eager(User.orders),
),
r"Loader strategies for ORM Path\[Mapper\[User\(users\)\] -> "
r"User.orders -> Mapper\[Order\(orders\)\]\] conflict",
),
(
lambda User, Order: (
joinedload(User.orders),
joinedload(User.orders).joinedload(Order.items),
),
None,
),
(
lambda User, Order: (
joinedload(User.orders),
joinedload(User.orders, innerjoin=True).joinedload(
Order.items
),
),
r"Loader strategies for ORM Path\[Mapper\[User\(users\)\] -> "
r"User.orders -> Mapper\[Order\(orders\)\]\] conflict",
),
(
lambda User: (defer(User.name), undefer(User.name)),
r"Loader strategies for ORM Path\[Mapper\[User\(users\)\] -> "
r"User.name\] conflict",
),
)
def test_conflicts(self, make_opt, errmsg):
"""introduce a new error for conflicting options in SQLAlchemy 2.0.
This case seems to be fairly difficult to come up with randomly
so let's see if we can refuse to guess for this case.
"""
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders,
)
self.mapper_registry.map_imperatively(
User, users, properties=dict(orders=relationship(Order))
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties=dict(items=relationship(Item, secondary=order_items)),
)
self.mapper_registry.map_imperatively(Item, items)
sess = fixture_session()
opt = testing.resolve_lambda(
make_opt, User=User, Order=Order, Item=Item
)
if errmsg:
with expect_raises_message(sa.exc.InvalidRequestError, errmsg):
sess.query(User).options(opt)._compile_context()
else:
sess.query(User).options(opt)._compile_context()
| MapperOptionsTest |
python | pallets__werkzeug | tests/conftest.py | {
"start": 835,
"end": 7738
} | class ____:
"""Manage a live dev server process and make requests to it. Must be used
as a context manager.
If ``hostname`` starts with ``unix://``, the server listens to a unix socket
file instead of a TCP socket.
If ``port`` is not given, a random port is reserved for use by the server,
to allow multiple servers to run simultaneously.
If ``ssl_context`` is given, the server listens with TLS enabled. It can be
the special value ``custom`` to generate and pass a context to
``run_simple``, as opposed to ``adhoc`` which tells ``run_simple`` to
generate the context.
:param app_name: The name of the app from the ``live_apps`` folder to load.
:param tmp_path: The current test's temporary directory. The server process
sets the working dir here, it is added to the Python path, the log file
is written here, and for unix connections the socket is opened here.
:param server_kwargs: Arguments to pass to ``live_apps/run.py`` to control
how ``run_simple`` is called in the subprocess.
"""
scheme: str
"""One of ``http``, ``https``, or ``unix``. Set based on ``ssl_context`` or
``hostname``.
"""
addr: str
"""The host and port."""
url: str
"""The scheme, host, and port."""
def __init__(
self, app_name: str = "standard", *, tmp_path: Path, **server_kwargs: t.Any
) -> None:
host = server_kwargs.get("hostname", "127.0.0.1")
if not host.startswith("unix://"):
port = server_kwargs.get("port")
if port is None:
server_kwargs["port"] = port = ephemeral_port_reserve.reserve(host)
self.scheme = "https" if "ssl_context" in server_kwargs else "http"
self.addr = f"{host}:{port}"
self.url = f"{self.scheme}://{self.addr}"
else:
self.scheme = "unix"
self.addr = host[7:] # strip "unix://"
self.url = host
self._app_name = app_name
self._server_kwargs = server_kwargs
self._tmp_path = tmp_path
self._log_write: t.IO[bytes] | None = None
self._log_read: t.IO[str] | None = None
self._proc: subprocess.Popen[bytes] | None = None
def __enter__(self) -> te.Self:
"""Start the server process and wait for it to be ready."""
log_path = self._tmp_path / "log.txt"
self._log_write = open(log_path, "wb")
self._log_read = open(log_path, encoding="utf8", errors="surrogateescape")
tmp_dir = os.fspath(self._tmp_path)
self._proc = subprocess.Popen(
[
sys.executable,
os.fspath(Path(__file__).parent / "live_apps/run.py"),
self._app_name,
json.dumps(self._server_kwargs),
],
env={**os.environ, "PYTHONUNBUFFERED": "1", "PYTHONPATH": tmp_dir},
cwd=tmp_dir,
close_fds=True,
stdout=self._log_write,
stderr=subprocess.STDOUT,
)
self.wait_ready()
return self
def __exit__(
self,
exc_type: type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> None:
"""Clean up the server process."""
assert self._proc is not None
self._proc.terminate()
self._proc.wait()
self._proc = None
assert self._log_read is not None
self._log_read.close()
self._log_read = None
assert self._log_write is not None
self._log_write.close()
self._log_write = None
def connect(self, **kwargs: t.Any) -> http.client.HTTPConnection:
"""Create a connection to the server, without sending a request.
Useful if a test requires lower level methods to try something that
``HTTPClient.request`` will not do.
If the server's scheme is HTTPS and the TLS ``context`` argument is not
given, a default permissive context is used.
:param kwargs: Arguments to :class:`http.client.HTTPConnection`.
"""
if self.scheme == "https":
if "context" not in kwargs:
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
kwargs["context"] = context
return http.client.HTTPSConnection(self.addr, **kwargs)
if self.scheme == "unix":
return UnixSocketHTTPConnection(self.addr, **kwargs)
return http.client.HTTPConnection(self.addr, **kwargs)
def request(self, url: str = "", **kwargs: t.Any) -> DataHTTPResponse:
"""Open a connection and make a request to the server, returning the
response.
The response object ``data`` parameter has the result of
``response.read()``. If the response has a ``application/json`` content
type, the ``json`` parameter is populated with ``json.loads(data)``.
:param url: URL to put in the request line.
:param kwargs: Arguments to :meth:`http.client.HTTPConnection.request`.
"""
kwargs.setdefault("method", "GET")
kwargs["url"] = url
response: DataHTTPResponse
with closing(self.connect()) as conn:
conn.request(**kwargs)
with conn.getresponse() as response: # type: ignore[assignment]
response.data = response.read()
if response.headers.get("Content-Type", "").startswith("application/json"):
response.json = json.loads(response.data)
else:
response.json = None
return response
def wait_ready(self) -> None:
"""Wait until a request to ``/ensure`` is successful, indicating the
server has started and is listening.
"""
while True:
try:
self.request("/ensure")
return
# ConnectionRefusedError for http, FileNotFoundError for unix
except (ConnectionRefusedError, FileNotFoundError):
time.sleep(0.1)
def read_log(self) -> str:
"""Read from the current position to the current end of the log."""
assert self._log_read is not None
return self._log_read.read()
def wait_for_log(self, value: str) -> None:
"""Wait until a line in the log contains the given string.
:param value: The string to search for.
"""
assert self._log_read is not None
while True:
for line in self._log_read:
if value in line:
return
time.sleep(0.1)
def wait_for_reload(self) -> None:
"""Wait until the server logs that it is restarting, then wait for it to
be ready.
"""
self.wait_for_log("Restarting with")
self.wait_ready()
| DevServerClient |
python | jmcnamara__XlsxWriter | xlsxwriter/test/drawing/test_drawing_image01.py | {
"start": 399,
"end": 6752
} | class ____(unittest.TestCase):
"""
Test assembling a complete Drawing file.
"""
def test_assemble_xml_file(self):
"""Test writing a drawing with no cell data."""
self.maxDiff = None
fh = StringIO()
drawing = Drawing()
drawing._set_filehandle(fh)
dimensions = [2, 1, 0, 0, 3, 6, 533257, 190357, 1219200, 190500, 0, 0]
drawing_object = DrawingInfo()
drawing_object._drawing_type = DrawingTypes.IMAGE
drawing_object._dimensions = dimensions
drawing_object._width = 1142857
drawing_object._height = 1142857
drawing_object._description = "republic.png"
drawing_object._shape = None
drawing_object._anchor = 2
drawing_object._rel_index = 1
drawing_object._url = None
drawing._add_drawing_object(drawing_object)
drawing.embedded = 1
drawing._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<xdr:wsDr xmlns:xdr="http://schemas.openxmlformats.org/drawingml/2006/spreadsheetDrawing" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<xdr:twoCellAnchor editAs="oneCell">
<xdr:from>
<xdr:col>2</xdr:col>
<xdr:colOff>0</xdr:colOff>
<xdr:row>1</xdr:row>
<xdr:rowOff>0</xdr:rowOff>
</xdr:from>
<xdr:to>
<xdr:col>3</xdr:col>
<xdr:colOff>533257</xdr:colOff>
<xdr:row>6</xdr:row>
<xdr:rowOff>190357</xdr:rowOff>
</xdr:to>
<xdr:pic>
<xdr:nvPicPr>
<xdr:cNvPr id="2" name="Picture 1" descr="republic.png"/>
<xdr:cNvPicPr>
<a:picLocks noChangeAspect="1"/>
</xdr:cNvPicPr>
</xdr:nvPicPr>
<xdr:blipFill>
<a:blip xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" r:embed="rId1"/>
<a:stretch>
<a:fillRect/>
</a:stretch>
</xdr:blipFill>
<xdr:spPr>
<a:xfrm>
<a:off x="1219200" y="190500"/>
<a:ext cx="1142857" cy="1142857"/>
</a:xfrm>
<a:prstGeom prst="rect">
<a:avLst/>
</a:prstGeom>
</xdr:spPr>
</xdr:pic>
<xdr:clientData/>
</xdr:twoCellAnchor>
</xdr:wsDr>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
def test_assemble_xml_file_with_url(self):
"""Test writing a drawing with no cell data."""
self.maxDiff = None
url = Url("https://test")
url.tip = "this is a tooltip"
url._rel_index = 1
fh = StringIO()
drawing = Drawing()
drawing._set_filehandle(fh)
drawing = Drawing()
drawing._set_filehandle(fh)
dimensions = [2, 1, 0, 0, 3, 6, 533257, 190357, 1219200, 190500, 0, 0]
drawing_object = DrawingInfo()
drawing_object._drawing_type = DrawingTypes.IMAGE
drawing_object._dimensions = dimensions
drawing_object._width = 1142857
drawing_object._height = 1142857
drawing_object._description = "republic.png"
drawing_object._shape = None
drawing_object._anchor = 2
drawing_object._rel_index = 2
drawing_object._url = url
drawing._add_drawing_object(drawing_object)
drawing.embedded = 1
drawing._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<xdr:wsDr xmlns:xdr="http://schemas.openxmlformats.org/drawingml/2006/spreadsheetDrawing" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<xdr:twoCellAnchor editAs="oneCell">
<xdr:from>
<xdr:col>2</xdr:col>
<xdr:colOff>0</xdr:colOff>
<xdr:row>1</xdr:row>
<xdr:rowOff>0</xdr:rowOff>
</xdr:from>
<xdr:to>
<xdr:col>3</xdr:col>
<xdr:colOff>533257</xdr:colOff>
<xdr:row>6</xdr:row>
<xdr:rowOff>190357</xdr:rowOff>
</xdr:to>
<xdr:pic>
<xdr:nvPicPr>
<xdr:cNvPr id="2" name="Picture 1" descr="republic.png">
<a:hlinkClick xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" r:id="rId1" tooltip="this is a tooltip"/>
</xdr:cNvPr>
<xdr:cNvPicPr>
<a:picLocks noChangeAspect="1"/>
</xdr:cNvPicPr>
</xdr:nvPicPr>
<xdr:blipFill>
<a:blip xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" r:embed="rId2"/>
<a:stretch>
<a:fillRect/>
</a:stretch>
</xdr:blipFill>
<xdr:spPr>
<a:xfrm>
<a:off x="1219200" y="190500"/>
<a:ext cx="1142857" cy="1142857"/>
</a:xfrm>
<a:prstGeom prst="rect">
<a:avLst/>
</a:prstGeom>
</xdr:spPr>
</xdr:pic>
<xdr:clientData/>
</xdr:twoCellAnchor>
</xdr:wsDr>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleDrawing |
python | getsentry__sentry | tests/sentry/api/endpoints/test_organization_api_key_details.py | {
"start": 727,
"end": 1132
} | class ____(OrganizationApiKeyDetailsBase):
def test_api_key_no_exist(self) -> None:
self.get_error_response(self.organization.slug, 123456, status_code=404)
def test_get_api_details(self) -> None:
response = self.get_success_response(self.organization.slug, self.api_key.id)
assert response.data.get("id") == str(self.api_key.id)
@control_silo_test
| OrganizationApiKeyDetails |
python | django__django | django/forms/utils.py | {
"start": 1420,
"end": 1923
} | class ____:
def get_context(self):
raise NotImplementedError(
"Subclasses of RenderableMixin must provide a get_context() method."
)
def render(self, template_name=None, context=None, renderer=None):
renderer = renderer or self.renderer
template = template_name or self.template_name
context = context or self.get_context()
return mark_safe(renderer.render(template, context))
__str__ = render
__html__ = render
| RenderableMixin |
python | python-excel__xlwt | xlwt/antlr.py | {
"start": 16580,
"end": 16720
} | class ____(TokenStreamException):
def __init__(self, *args):
TokenStreamException.__init__(self, *args)
| TokenStreamRetryException |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/solverHigherOrder5.py | {
"start": 2291,
"end": 5143
} | class ____(Generic[A, B]):
left: A
right: B
def func1(f: Callable[[A], B]) -> Callable[[Pair[A, X]], Pair[B, X]]: ...
def test_3(pair: Pair[Pair[A, B], C]) -> Pair[Pair[A, B], C]:
val1 = func1(func1(identity))
reveal_type(
val1,
expected_text="(Pair[Pair[T@identity, X(1)@func1], X@func1]) -> Pair[Pair[T@identity, X(1)@func1], X@func1]",
)
val2 = val1(pair)
reveal_type(val2, expected_text="Pair[Pair[A@test_3, B@test_3], C@test_3]")
return val2
def test_4(pair: Pair[Pair[Pair[A, B], C], D]) -> Pair[Pair[Pair[A, B], C], D]:
val1 = func1(func1(func1(identity)))
reveal_type(
val1,
expected_text="(Pair[Pair[Pair[T@identity, X(2)@func1], X(1)@func1], X@func1]) -> Pair[Pair[Pair[T@identity, X(2)@func1], X(1)@func1], X@func1]",
)
val2 = val1(pair)
return val2
@overload
def test_5(a: Callable[P, type[T]], *, b: Literal[0] = ...) -> type[list[type[T]]]: ...
@overload
def test_5(a: T, *args: int, b: Literal[False, None] = ...) -> type[list[T]]: ...
@overload
def test_5(a: T, *args: int, b: Literal[True] = ...) -> type[list[T]]: ...
def test_5(a: Any, *args: int, b: Any = ...) -> Any: ...
val3 = test_5(test_5, **{})
reveal_type(
val3,
expected_text="Unknown",
)
val4 = test_5(test_5, b=True)
reveal_type(
val4,
expected_text="type[list[Overload[(a: (**P(1)@test_5) -> type[T(1)@test_5], *, b: Literal[0] = ...) -> type[list[type[T(1)@test_5]]], (a: T(1)@test_5, *args: int, b: Literal[False] | None = ...) -> type[list[T(1)@test_5]], (a: T(1)@test_5, *args: int, b: Literal[True] = ...) -> type[list[T(1)@test_5]]]]]",
)
def test_6(g: Callable[[B], C]) -> Callable[[Callable[[A], B]], Callable[[A], C]]: ...
val5 = test_6(test_6)
reveal_type(
val5,
expected_text="((A@test_6) -> ((B(1)@test_6) -> C(1)@test_6)) -> ((A@test_6) -> ((((A(1)@test_6) -> B(1)@test_6)) -> ((A(1)@test_6) -> C(1)@test_6)))",
)
def test_7(
g: Callable[[C], D],
) -> Callable[[Callable[[A], Callable[[B], C]]], Callable[[A], Callable[[B], D]]]:
val6 = test_6(test_6)(test_6)(g)
reveal_type(
val6,
expected_text="((A(1)@test_6) -> ((A(2)@test_6) -> C@test_7)) -> ((A(1)@test_6) -> ((A(2)@test_6) -> D@test_7))",
)
return val6
def test_8(fn: Callable[[*Ts], Callable[[A], B]]) -> Callable[[A, *Ts], B]: ...
def test_9(x: Callable[[bool], Callable[[int], Callable[[str], None]]]):
test_8(test_8(x))
def test_10(func: Callable[[*Ts], Any], *args: *Ts) -> Any: ...
def func2() -> None: ...
test_10(test_10, func2)
def test_11(func: Callable[[*Ts], T], *args: *Ts) -> T:
return func(*args)
def func3(num: int, /) -> int:
return num
test_11(test_11, func3, 123)
# This will generate an error, but it should not crash or cause an infinite loop.
test_11(test_11, test_11, func3, 123)
| Pair |
python | tiangolo__fastapi | docs_src/header_param_models/tutorial002_an.py | {
"start": 158,
"end": 486
} | class ____(BaseModel):
model_config = {"extra": "forbid"}
host: str
save_data: bool
if_modified_since: Union[str, None] = None
traceparent: Union[str, None] = None
x_tag: List[str] = []
@app.get("/items/")
async def read_items(headers: Annotated[CommonHeaders, Header()]):
return headers
| CommonHeaders |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/roles.py | {
"start": 4663,
"end": 4813
} | class ____(SQLRole):
__slots__ = ()
_role_name = (
"IN expression list, SELECT construct, or bound parameter object"
)
| InElementRole |
python | pandas-dev__pandas | pandas/tests/io/formats/test_to_latex.py | {
"start": 7609,
"end": 10593
} | class ____:
def test_to_latex_no_header_with_index(self):
# GH 7124
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
result = df.to_latex(header=False)
expected = _dedent(
r"""
\begin{tabular}{lrl}
\toprule
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_no_header_without_index(self):
# GH 7124
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
result = df.to_latex(index=False, header=False)
expected = _dedent(
r"""
\begin{tabular}{rl}
\toprule
\midrule
1 & b1 \\
2 & b2 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_specified_header_with_index(self):
# GH 7124
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
result = df.to_latex(header=["AA", "BB"])
expected = _dedent(
r"""
\begin{tabular}{lrl}
\toprule
& AA & BB \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_specified_header_without_index(self):
# GH 7124
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
result = df.to_latex(header=["AA", "BB"], index=False)
expected = _dedent(
r"""
\begin{tabular}{rl}
\toprule
AA & BB \\
\midrule
1 & b1 \\
2 & b2 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
@pytest.mark.parametrize(
"header, num_aliases",
[
(["A"], 1),
(("B",), 1),
(("Col1", "Col2", "Col3"), 3),
(("Col1", "Col2", "Col3", "Col4"), 4),
],
)
def test_to_latex_number_of_items_in_header_missmatch_raises(
self,
header,
num_aliases,
):
# GH 7124
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
msg = f"Writing 2 cols but got {num_aliases} aliases"
with pytest.raises(ValueError, match=msg):
df.to_latex(header=header)
def test_to_latex_decimal(self):
# GH 12031
df = DataFrame({"a": [1.0, 2.1], "b": ["b1", "b2"]})
result = df.to_latex(decimal=",")
expected = _dedent(
r"""
\begin{tabular}{lrl}
\toprule
& a & b \\
\midrule
0 & 1,000000 & b1 \\
1 & 2,100000 & b2 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
| TestToLatexHeader |
python | kamyu104__LeetCode-Solutions | Python/minimize-length-of-array-using-operations.py | {
"start": 38,
"end": 275
} | class ____(object):
def minimumArrayLength(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
mn = min(nums)
return (nums.count(mn)+1)//2 if all(x%mn == 0 for x in nums) else 1
| Solution |
python | RaRe-Technologies__gensim | gensim/models/word2vec.py | {
"start": 95570,
"end": 97078
} | class ____:
def __init__(self, fname, max_sentence_length=MAX_WORDS_IN_BATCH):
"""Iterate over sentences from the "text8" corpus, unzipped from https://mattmahoney.net/dc/text8.zip."""
self.fname = fname
self.max_sentence_length = max_sentence_length
def __iter__(self):
# the entire corpus is one gigantic line -- there are no sentence marks at all
# so just split the sequence of tokens arbitrarily: 1 sentence = 1000 tokens
sentence, rest = [], b''
with utils.open(self.fname, 'rb') as fin:
while True:
text = rest + fin.read(8192) # avoid loading the entire file (=1 line) into RAM
if text == rest: # EOF
words = utils.to_unicode(text).split()
sentence.extend(words) # return the last chunk of words, too (may be shorter/longer)
if sentence:
yield sentence
break
last_token = text.rfind(b' ') # last token may have been split in two... keep for next iteration
words, rest = (utils.to_unicode(text[:last_token]).split(),
text[last_token:].strip()) if last_token >= 0 else ([], text)
sentence.extend(words)
while len(sentence) >= self.max_sentence_length:
yield sentence[:self.max_sentence_length]
sentence = sentence[self.max_sentence_length:]
| Text8Corpus |
python | huggingface__transformers | src/transformers/models/data2vec/modeling_data2vec_vision.py | {
"start": 19442,
"end": 22434
} | class ____(GradientCheckpointingLayer):
"""This corresponds to the Block class in the timm implementation."""
def __init__(
self, config: Data2VecVisionConfig, window_size: Optional[tuple] = None, drop_path_rate: float = 0.0
) -> None:
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = Data2VecVisionAttention(config, window_size=window_size)
self.intermediate = Data2VecVisionIntermediate(config)
self.output = Data2VecVisionOutput(config)
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.drop_path = Data2VecVisionDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
init_values = config.layer_scale_init_value
if init_values > 0:
self.lambda_1 = nn.Parameter(init_values * torch.ones(config.hidden_size), requires_grad=True)
self.lambda_2 = nn.Parameter(init_values * torch.ones(config.hidden_size), requires_grad=True)
else:
self.lambda_1, self.lambda_2 = None, None
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: bool = False,
relative_position_bias: Optional[torch.Tensor] = None,
interpolate_pos_encoding: bool = False,
resolution: Optional[tuple[int, int]] = None,
) -> Union[tuple[torch.Tensor], tuple[torch.Tensor, torch.Tensor]]:
self_attention_outputs = self.attention(
self.layernorm_before(hidden_states), # in Data2VecVision, layernorm is applied before self-attention
output_attentions=output_attentions,
relative_position_bias=relative_position_bias,
interpolate_pos_encoding=interpolate_pos_encoding,
resolution=resolution,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
# apply lambda_1 if present
if self.lambda_1 is not None:
attention_output = self.lambda_1 * attention_output
# first residual connection
hidden_states = self.drop_path(attention_output) + hidden_states
# in Data2VecVision, layernorm is also applied after self-attention
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = self.output(layer_output)
if self.lambda_2 is not None:
layer_output = self.lambda_2 * layer_output
# second residual connection
layer_output = self.drop_path(layer_output) + hidden_states
outputs = (layer_output,) + outputs
return outputs
# Copied from transformers.models.beit.modeling_beit.BeitRelativePositionBias with Beit->Data2VecVision
| Data2VecVisionLayer |
python | prabhupant__python-ds | data_structures/array/transpose_matrix.py | {
"start": 0,
"end": 337
} | class ____:
def transpose(self, A: List[List[int]]) -> List[List[int]]:
l=[]
i=0
while(i!=len(A[0])):
x=[]
j=0
while(j<len(A)):
x.append(A[j][i])
j+=1
if(x!=[]):
l.append(x)
i+=1
return(l)
| Solution |
python | spyder-ide__spyder | spyder/plugins/projects/widgets/qcookiecutter.py | {
"start": 772,
"end": 2720
} | class ____(QtWidgets.QDialog):
"""
QDialog to display cookiecutter.json options.
cookiecutter_settings: dict
A cookiecutter.json settings content.
pre_gen_code: str
The code of the pregeneration script.
"""
sig_validated = QtCore.Signal(int, str)
"""
This signal is emitted after validation has been executed.
It provides the process exit code and the output captured.
"""
def __init__(self, parent, cookiecutter_settings=None, pre_gen_code=None):
super().__init__(parent)
self._widget = CookiecutterWidget(
self, cookiecutter_settings,
pre_gen_code
)
self._info_label = QtWidgets.QLabel()
self._validate_button = QtWidgets.QPushButton("Validate")
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self._widget)
layout.addWidget(self._info_label)
layout.addWidget(self._validate_button)
self.setLayout(layout)
# Signals
self._validate_button.clicked.connect(self.validate)
self._widget.sig_validated.connect(self._set_message)
self._widget.sig_validated.connect(self.sig_validated)
def _set_message(self, exit_code, message):
if exit_code != 0:
self._info_label.setText(message)
def setup(self, cookiecutter_settings):
"""
Setup the widget using options.
"""
self._widget.setup(cookiecutter_settings)
def set_pre_gen_code(self, pre_gen_code):
"""
Set the cookiecutter pregeneration code.
"""
self._widget.set_pre_gen_code(pre_gen_code)
def validate(self):
"""
Run, pre generation script and provide information on finished.
"""
self._widget.validate()
def get_values(self):
"""
Return all entered and generated values.
"""
return self._widget.get_values()
| CookiecutterDialog |
python | PyCQA__pylint | tests/functional/u/useless/useless_parent_delegation.py | {
"start": 15177,
"end": 15284
} | class ____(Exception):
def __init__(self, message="default"):
super().__init__(message)
| CustomError |
python | lxml__lxml | src/lxml/html/__init__.py | {
"start": 24971,
"end": 25031
} | class ____(HtmlMixin, etree.CommentBase):
pass
| HtmlComment |
python | FactoryBoy__factory_boy | tests/test_using.py | {
"start": 92235,
"end": 94980
} | class ____(unittest.TestCase):
def test_empty_list(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = factory.List([])
o = TestObjectFactory()
self.assertEqual([], o.one)
def test_naive_list(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = factory.List([1])
o = TestObjectFactory()
self.assertEqual([1], o.one)
def test_long_list(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = factory.List(list(range(100)))
o = TestObjectFactory()
self.assertEqual(list(range(100)), o.one)
def test_sequence_list(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = factory.List([factory.Sequence(lambda n: n + 2)])
o1 = TestObjectFactory()
o2 = TestObjectFactory()
self.assertEqual([2], o1.one)
self.assertEqual([3], o2.one)
def test_list_override(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = factory.List([1])
o = TestObjectFactory(one__0=2)
self.assertEqual([2], o.one)
def test_list_extra_key(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = factory.List([1])
o = TestObjectFactory(one__1=2)
self.assertEqual([1, 2], o.one)
def test_list_merged_fields(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
two = 13
one = factory.List([
1,
2,
factory.SelfAttribute('1'),
])
o = TestObjectFactory(one__0=42)
self.assertEqual([42, 2, 2], o.one)
def test_nested_lists(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = 1
two = factory.List([
3,
factory.SelfAttribute('0'),
factory.List([
5,
factory.SelfAttribute('..0'),
factory.SelfAttribute('...one'),
]),
])
o = TestObjectFactory()
self.assertEqual(1, o.one)
self.assertEqual([
3,
3,
[
5,
3,
1,
],
], o.two)
| ListTestCase |
python | keon__algorithms | algorithms/graph/graph.py | {
"start": 1542,
"end": 2926
} | class ____:
"""
A directed graph.
Stores a set of nodes, edges and adjacency matrix.
"""
# pylint: disable=dangerous-default-value
def __init__(self, load_dict={}):
self.nodes = []
self.edges = []
self.adjacency_list = {}
if load_dict and isinstance(load_dict, dict):
for vertex in load_dict:
node_from = self.add_node(vertex)
self.adjacency_list[node_from] = []
for neighbor in load_dict[vertex]:
node_to = self.add_node(neighbor)
self.adjacency_list[node_from].append(node_to)
self.add_edge(vertex, neighbor)
def add_node(self, node_name):
"""
Add a new named node to the graph.
"""
try:
return self.nodes[self.nodes.index(node_name)]
except ValueError:
node = Node(node_name)
self.nodes.append(node)
return node
def add_edge(self, node_name_from, node_name_to):
"""
Add a new edge to the graph between two nodes.
"""
try:
node_from = self.nodes[self.nodes.index(node_name_from)]
node_to = self.nodes[self.nodes.index(node_name_to)]
self.edges.append(DirectedEdge(node_from, node_to))
except ValueError:
pass
| DirectedGraph |
python | openai__openai-python | src/openai/_base_client.py | {
"start": 28697,
"end": 45163
} | class ____(BaseClient[httpx.Client, Stream[Any]]):
_client: httpx.Client
_default_stream_cls: type[Stream[Any]] | None = None
def __init__(
self,
*,
version: str,
base_url: str | URL,
max_retries: int = DEFAULT_MAX_RETRIES,
timeout: float | Timeout | None | NotGiven = not_given,
http_client: httpx.Client | None = None,
custom_headers: Mapping[str, str] | None = None,
custom_query: Mapping[str, object] | None = None,
_strict_response_validation: bool,
) -> None:
if not is_given(timeout):
# if the user passed in a custom http client with a non-default
# timeout set then we use that timeout.
#
# note: there is an edge case here where the user passes in a client
# where they've explicitly set the timeout to match the default timeout
# as this check is structural, meaning that we'll think they didn't
# pass in a timeout and will ignore it
if http_client and http_client.timeout != HTTPX_DEFAULT_TIMEOUT:
timeout = http_client.timeout
else:
timeout = DEFAULT_TIMEOUT
if http_client is not None and not isinstance(http_client, httpx.Client): # pyright: ignore[reportUnnecessaryIsInstance]
raise TypeError(
f"Invalid `http_client` argument; Expected an instance of `httpx.Client` but got {type(http_client)}"
)
super().__init__(
version=version,
# cast to a valid type because mypy doesn't understand our type narrowing
timeout=cast(Timeout, timeout),
base_url=base_url,
max_retries=max_retries,
custom_query=custom_query,
custom_headers=custom_headers,
_strict_response_validation=_strict_response_validation,
)
self._client = http_client or SyncHttpxClientWrapper(
base_url=base_url,
# cast to a valid type because mypy doesn't understand our type narrowing
timeout=cast(Timeout, timeout),
)
def is_closed(self) -> bool:
return self._client.is_closed
def close(self) -> None:
"""Close the underlying HTTPX client.
The client will *not* be usable after this.
"""
# If an error is thrown while constructing a client, self._client
# may not be present
if hasattr(self, "_client"):
self._client.close()
def __enter__(self: _T) -> _T:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self.close()
def _prepare_options(
self,
options: FinalRequestOptions, # noqa: ARG002
) -> FinalRequestOptions:
"""Hook for mutating the given options"""
return options
def _prepare_request(
self,
request: httpx.Request, # noqa: ARG002
) -> None:
"""This method is used as a callback for mutating the `Request` object
after it has been constructed.
This is useful for cases where you want to add certain headers based off of
the request properties, e.g. `url`, `method` etc.
"""
return None
@overload
def request(
self,
cast_to: Type[ResponseT],
options: FinalRequestOptions,
*,
stream: Literal[True],
stream_cls: Type[_StreamT],
) -> _StreamT: ...
@overload
def request(
self,
cast_to: Type[ResponseT],
options: FinalRequestOptions,
*,
stream: Literal[False] = False,
) -> ResponseT: ...
@overload
def request(
self,
cast_to: Type[ResponseT],
options: FinalRequestOptions,
*,
stream: bool = False,
stream_cls: Type[_StreamT] | None = None,
) -> ResponseT | _StreamT: ...
def request(
self,
cast_to: Type[ResponseT],
options: FinalRequestOptions,
*,
stream: bool = False,
stream_cls: type[_StreamT] | None = None,
) -> ResponseT | _StreamT:
cast_to = self._maybe_override_cast_to(cast_to, options)
# create a copy of the options we were given so that if the
# options are mutated later & we then retry, the retries are
# given the original options
input_options = model_copy(options)
if input_options.idempotency_key is None and input_options.method.lower() != "get":
# ensure the idempotency key is reused between requests
input_options.idempotency_key = self._idempotency_key()
response: httpx.Response | None = None
max_retries = input_options.get_max_retries(self.max_retries)
retries_taken = 0
for retries_taken in range(max_retries + 1):
options = model_copy(input_options)
options = self._prepare_options(options)
remaining_retries = max_retries - retries_taken
request = self._build_request(options, retries_taken=retries_taken)
self._prepare_request(request)
kwargs: HttpxSendArgs = {}
if self.custom_auth is not None:
kwargs["auth"] = self.custom_auth
if options.follow_redirects is not None:
kwargs["follow_redirects"] = options.follow_redirects
log.debug("Sending HTTP Request: %s %s", request.method, request.url)
response = None
try:
response = self._client.send(
request,
stream=stream or self._should_stream_response_body(request=request),
**kwargs,
)
except httpx.TimeoutException as err:
log.debug("Encountered httpx.TimeoutException", exc_info=True)
if remaining_retries > 0:
self._sleep_for_retry(
retries_taken=retries_taken,
max_retries=max_retries,
options=input_options,
response=None,
)
continue
log.debug("Raising timeout error")
raise APITimeoutError(request=request) from err
except Exception as err:
log.debug("Encountered Exception", exc_info=True)
if remaining_retries > 0:
self._sleep_for_retry(
retries_taken=retries_taken,
max_retries=max_retries,
options=input_options,
response=None,
)
continue
log.debug("Raising connection error")
raise APIConnectionError(request=request) from err
log.debug(
'HTTP Response: %s %s "%i %s" %s',
request.method,
request.url,
response.status_code,
response.reason_phrase,
response.headers,
)
log.debug("request_id: %s", response.headers.get("x-request-id"))
try:
response.raise_for_status()
except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code
log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
if remaining_retries > 0 and self._should_retry(err.response):
err.response.close()
self._sleep_for_retry(
retries_taken=retries_taken,
max_retries=max_retries,
options=input_options,
response=response,
)
continue
# If the response is streamed then we need to explicitly read the response
# to completion before attempting to access the response text.
if not err.response.is_closed:
err.response.read()
log.debug("Re-raising status error")
raise self._make_status_error_from_response(err.response) from None
break
assert response is not None, "could not resolve response (should never happen)"
return self._process_response(
cast_to=cast_to,
options=options,
response=response,
stream=stream,
stream_cls=stream_cls,
retries_taken=retries_taken,
)
def _sleep_for_retry(
self, *, retries_taken: int, max_retries: int, options: FinalRequestOptions, response: httpx.Response | None
) -> None:
remaining_retries = max_retries - retries_taken
if remaining_retries == 1:
log.debug("1 retry left")
else:
log.debug("%i retries left", remaining_retries)
timeout = self._calculate_retry_timeout(remaining_retries, options, response.headers if response else None)
log.info("Retrying request to %s in %f seconds", options.url, timeout)
time.sleep(timeout)
def _process_response(
self,
*,
cast_to: Type[ResponseT],
options: FinalRequestOptions,
response: httpx.Response,
stream: bool,
stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
retries_taken: int = 0,
) -> ResponseT:
if response.request.headers.get(RAW_RESPONSE_HEADER) == "true":
return cast(
ResponseT,
LegacyAPIResponse(
raw=response,
client=self,
cast_to=cast_to,
stream=stream,
stream_cls=stream_cls,
options=options,
retries_taken=retries_taken,
),
)
origin = get_origin(cast_to) or cast_to
if (
inspect.isclass(origin)
and issubclass(origin, BaseAPIResponse)
# we only want to actually return the custom BaseAPIResponse class if we're
# returning the raw response, or if we're not streaming SSE, as if we're streaming
# SSE then `cast_to` doesn't actively reflect the type we need to parse into
and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER)))
):
if not issubclass(origin, APIResponse):
raise TypeError(f"API Response types must subclass {APIResponse}; Received {origin}")
response_cls = cast("type[BaseAPIResponse[Any]]", cast_to)
return cast(
ResponseT,
response_cls(
raw=response,
client=self,
cast_to=extract_response_type(response_cls),
stream=stream,
stream_cls=stream_cls,
options=options,
retries_taken=retries_taken,
),
)
if cast_to == httpx.Response:
return cast(ResponseT, response)
api_response = APIResponse(
raw=response,
client=self,
cast_to=cast("type[ResponseT]", cast_to), # pyright: ignore[reportUnnecessaryCast]
stream=stream,
stream_cls=stream_cls,
options=options,
retries_taken=retries_taken,
)
if bool(response.request.headers.get(RAW_RESPONSE_HEADER)):
return cast(ResponseT, api_response)
return api_response.parse()
def _request_api_list(
self,
model: Type[object],
page: Type[SyncPageT],
options: FinalRequestOptions,
) -> SyncPageT:
def _parser(resp: SyncPageT) -> SyncPageT:
resp._set_private_attributes(
client=self,
model=model,
options=options,
)
return resp
options.post_parser = _parser
return self.request(page, options, stream=False)
@overload
def get(
self,
path: str,
*,
cast_to: Type[ResponseT],
options: RequestOptions = {},
stream: Literal[False] = False,
) -> ResponseT: ...
@overload
def get(
self,
path: str,
*,
cast_to: Type[ResponseT],
options: RequestOptions = {},
stream: Literal[True],
stream_cls: type[_StreamT],
) -> _StreamT: ...
@overload
def get(
self,
path: str,
*,
cast_to: Type[ResponseT],
options: RequestOptions = {},
stream: bool,
stream_cls: type[_StreamT] | None = None,
) -> ResponseT | _StreamT: ...
def get(
self,
path: str,
*,
cast_to: Type[ResponseT],
options: RequestOptions = {},
stream: bool = False,
stream_cls: type[_StreamT] | None = None,
) -> ResponseT | _StreamT:
opts = FinalRequestOptions.construct(method="get", url=path, **options)
# cast is required because mypy complains about returning Any even though
# it understands the type variables
return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
@overload
def post(
self,
path: str,
*,
cast_to: Type[ResponseT],
body: Body | None = None,
options: RequestOptions = {},
files: RequestFiles | None = None,
stream: Literal[False] = False,
) -> ResponseT: ...
@overload
def post(
self,
path: str,
*,
cast_to: Type[ResponseT],
body: Body | None = None,
options: RequestOptions = {},
files: RequestFiles | None = None,
stream: Literal[True],
stream_cls: type[_StreamT],
) -> _StreamT: ...
@overload
def post(
self,
path: str,
*,
cast_to: Type[ResponseT],
body: Body | None = None,
options: RequestOptions = {},
files: RequestFiles | None = None,
stream: bool,
stream_cls: type[_StreamT] | None = None,
) -> ResponseT | _StreamT: ...
def post(
self,
path: str,
*,
cast_to: Type[ResponseT],
body: Body | None = None,
options: RequestOptions = {},
files: RequestFiles | None = None,
stream: bool = False,
stream_cls: type[_StreamT] | None = None,
) -> ResponseT | _StreamT:
opts = FinalRequestOptions.construct(
method="post", url=path, json_data=body, files=to_httpx_files(files), **options
)
return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
def patch(
self,
path: str,
*,
cast_to: Type[ResponseT],
body: Body | None = None,
options: RequestOptions = {},
) -> ResponseT:
opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options)
return self.request(cast_to, opts)
def put(
self,
path: str,
*,
cast_to: Type[ResponseT],
body: Body | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
) -> ResponseT:
opts = FinalRequestOptions.construct(
method="put", url=path, json_data=body, files=to_httpx_files(files), **options
)
return self.request(cast_to, opts)
def delete(
self,
path: str,
*,
cast_to: Type[ResponseT],
body: Body | None = None,
options: RequestOptions = {},
) -> ResponseT:
opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, **options)
return self.request(cast_to, opts)
def get_api_list(
self,
path: str,
*,
model: Type[object],
page: Type[SyncPageT],
body: Body | None = None,
options: RequestOptions = {},
method: str = "get",
) -> SyncPageT:
opts = FinalRequestOptions.construct(method=method, url=path, json_data=body, **options)
return self._request_api_list(model, page, opts)
| SyncAPIClient |
python | pypa__virtualenv | src/virtualenv/activation/nushell/__init__.py | {
"start": 106,
"end": 1464
} | class ____(ViaTemplateActivator):
def templates(self):
yield "activate.nu"
@staticmethod
def quote(string):
"""
Nushell supports raw strings like: r###'this is a string'###.
https://github.com/nushell/nushell.github.io/blob/main/book/working_with_strings.md
This method finds the maximum continuous sharps in the string and then
quote it with an extra sharp.
"""
max_sharps = 0
current_sharps = 0
for char in string:
if char == "#":
current_sharps += 1
max_sharps = max(current_sharps, max_sharps)
else:
current_sharps = 0
wrapping = "#" * (max_sharps + 1)
return f"r{wrapping}'{string}'{wrapping}"
def replacements(self, creator, dest_folder): # noqa: ARG002
return {
"__VIRTUAL_PROMPT__": "" if self.flag_prompt is None else self.flag_prompt,
"__VIRTUAL_ENV__": str(creator.dest),
"__VIRTUAL_NAME__": creator.env_name,
"__BIN_NAME__": str(creator.bin_dir.relative_to(creator.dest)),
"__TCL_LIBRARY__": getattr(creator.interpreter, "tcl_lib", None) or "",
"__TK_LIBRARY__": getattr(creator.interpreter, "tk_lib", None) or "",
}
__all__ = [
"NushellActivator",
]
| NushellActivator |
python | tensorflow__tensorflow | tensorflow/compiler/tests/dynamic_stitch_test.py | {
"start": 983,
"end": 3703
} | class ____(xla_test.XLATestCase):
def _AssertDynamicStitchResultIs(self, indices, data, expected):
with self.session() as session:
index_placeholders = [
array_ops.placeholder(dtypes.as_dtype(arg.dtype)) for arg in indices
]
data_placeholders = [
array_ops.placeholder(dtypes.as_dtype(arg.dtype)) for arg in data
]
with self.test_scope():
output = data_flow_ops.dynamic_stitch(index_placeholders,
data_placeholders)
feed_dict = {}
for placeholder, value in zip(index_placeholders, indices):
feed_dict[placeholder] = value
for placeholder, value in zip(data_placeholders, data):
feed_dict[placeholder] = value
result = session.run(output, feed_dict=feed_dict)
self.assertAllClose(expected, result, rtol=1e-3)
def testSimpleEmpty(self):
idx1 = np.array([0, 2], dtype=np.int32)
idx2 = np.array([[1], [3]], dtype=np.int32)
val1 = np.array([[], []], dtype=np.int32)
val2 = np.array([[[]], [[]]], dtype=np.int32)
self._AssertDynamicStitchResultIs(
[idx1, idx2], [val1, val2],
expected=np.array([[], [], [], []], np.int32))
def testEmptyIndex(self):
idx1 = np.array([], dtype=np.int32)
idx2 = np.array([[], []], dtype=np.int32)
val1 = np.ndarray(shape=(0, 9), dtype=np.int32)
val2 = np.ndarray(shape=(2, 0, 9), dtype=np.int32)
self._AssertDynamicStitchResultIs([idx1, idx2], [val1, val2],
expected=np.ndarray(
shape=(0, 9), dtype=np.int32))
def testSimple1D(self):
val1 = np.array([0, 4, 7], dtype=np.int32)
val2 = np.array([1, 6, 2, 3, 5], dtype=np.int32)
val3 = np.array([0, 40, 70], dtype=np.float32)
val4 = np.array([10, 60, 20, 30, 50], dtype=np.float32)
expected = np.array([0, 10, 20, 30, 40, 50, 60, 70], dtype=np.float32)
self._AssertDynamicStitchResultIs(
[val1, val2], [val3, val4], expected=expected)
def testSimple2D(self):
val1 = np.array([0, 4, 7], dtype=np.int32)
val2 = np.array([1, 6], dtype=np.int32)
val3 = np.array([2, 3, 5], dtype=np.int32)
val4 = np.array([[0, 1], [40, 41], [70, 71]], dtype=np.float32)
val5 = np.array([[10, 11], [60, 61]], dtype=np.float32)
val6 = np.array([[20, 21], [30, 31], [50, 51]], dtype=np.float32)
expected = np.array(
[[0, 1], [10, 11], [20, 21], [30, 31], [40, 41], [50, 51], [60, 61],
[70, 71]],
dtype=np.float32)
self._AssertDynamicStitchResultIs(
[val1, val2, val3], [val4, val5, val6], expected=expected)
if __name__ == "__main__":
googletest.main()
| DynamicStitchTest |
python | astropy__astropy | astropy/io/votable/tests/test_vo.py | {
"start": 24727,
"end": 25927
} | class ____(TestParse):
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename("data/regression.xml"))
self.xmlout = bio = io.BytesIO()
# W39: Bit values can not be masked
with pytest.warns(W39):
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_bit_mask(self):
assert_array_equal(self.mask["bit"], [False, False, False, False, False])
def test_bitarray_mask(self):
assert not np.any(self.mask["bitarray"])
def test_bit_array2_mask(self):
assert not np.any(self.mask["bitarray2"])
def test_schema(self, tmp_path):
# have to use an actual file because assert_validate_schema only works
# on filenames, not file-like objects
fn = tmp_path / "test_through_tabledata.xml"
with open(fn, "wb") as f:
f.write(self.xmlout.getvalue())
assert_validate_schema(fn, "1.1")
| TestThroughTableData |
python | django-debug-toolbar__django-debug-toolbar | debug_toolbar/store.py | {
"start": 4383,
"end": 7442
} | class ____(BaseStore):
@classmethod
def _cleanup_old_entries(cls):
"""
Enforce the cache size limit - keeping only the most recently used entries
up to RESULTS_CACHE_SIZE.
"""
# Determine which entries to keep
keep_ids = cls.request_ids()
# Delete all entries not in the keep list
if keep_ids:
HistoryEntry.objects.exclude(request_id__in=keep_ids).delete()
@classmethod
def request_ids(cls):
"""Return all stored request ids within the cache size limit"""
cache_size = dt_settings.get_config()["RESULTS_CACHE_SIZE"]
return list(
HistoryEntry.objects.all()[:cache_size].values_list("request_id", flat=True)
)
@classmethod
def exists(cls, request_id: str) -> bool:
"""Check if the given request_id exists in the store"""
return HistoryEntry.objects.filter(request_id=request_id).exists()
@classmethod
def set(cls, request_id: str):
"""Set a request_id in the store and clean up old entries"""
with transaction.atomic():
# Create the entry if it doesn't exist (ignore otherwise)
_, created = HistoryEntry.objects.get_or_create(request_id=request_id)
# Only enforce cache size limit when new entries are created
if created:
cls._cleanup_old_entries()
@classmethod
def clear(cls):
"""Remove all requests from the store"""
HistoryEntry.objects.all().delete()
@classmethod
def delete(cls, request_id: str):
"""Delete the stored request for the given request_id"""
HistoryEntry.objects.filter(request_id=request_id).delete()
@classmethod
def save_panel(cls, request_id: str, panel_id: str, data: Any = None):
"""Save the panel data for the given request_id"""
with transaction.atomic():
obj, _ = HistoryEntry.objects.get_or_create(request_id=request_id)
store_data = obj.data
store_data[panel_id] = serialize(data)
obj.data = store_data
obj.save()
@classmethod
def panel(cls, request_id: str, panel_id: str) -> Any:
"""Fetch the panel data for the given request_id"""
try:
data = HistoryEntry.objects.get(request_id=request_id).data
panel_data = data.get(panel_id)
if panel_data is None:
return {}
return deserialize(panel_data)
except HistoryEntry.DoesNotExist:
return {}
@classmethod
def panels(cls, request_id: str) -> Any:
"""Fetch all panel data for the given request_id"""
try:
data = HistoryEntry.objects.get(request_id=request_id).data
for panel_id, panel_data in data.items():
yield panel_id, deserialize(panel_data)
except HistoryEntry.DoesNotExist:
return {}
def get_store() -> BaseStore:
return import_string(dt_settings.get_config()["TOOLBAR_STORE_CLASS"])
| DatabaseStore |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/sensors/bigtable.py | {
"start": 1548,
"end": 5371
} | class ____(BaseSensorOperator, BigtableValidationMixin):
"""
Sensor that waits for Cloud Bigtable table to be fully replicated to its clusters.
No exception will be raised if the instance or the table does not exist.
For more details about cluster states for a table, have a look at the reference:
https://googleapis.github.io/google-cloud-python/latest/bigtable/table.html#google.cloud.bigtable.table.Table.get_cluster_states
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigtableTableReplicationCompletedSensor`
:param instance_id: The ID of the Cloud Bigtable instance.
:param table_id: The ID of the table to check replication status.
:param project_id: Optional, the ID of the Google Cloud project.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
REQUIRED_ATTRIBUTES = ("instance_id", "table_id")
template_fields: Sequence[str] = (
"project_id",
"instance_id",
"table_id",
"impersonation_chain",
)
operator_extra_links = (BigtableTablesLink(),)
def __init__(
self,
*,
instance_id: str,
table_id: str,
project_id: str = PROVIDE_PROJECT_ID,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.project_id = project_id
self.instance_id = instance_id
self.table_id = table_id
self.gcp_conn_id = gcp_conn_id
self._validate_inputs()
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"instance_id": self.instance_id,
"project_id": self.project_id,
}
def poke(self, context: Context) -> bool:
hook = BigtableHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
instance = hook.get_instance(project_id=self.project_id, instance_id=self.instance_id)
if not instance:
self.log.info("Dependency: instance '%s' does not exist.", self.instance_id)
return False
try:
cluster_states = hook.get_cluster_states_for_table(instance=instance, table_id=self.table_id)
except google.api_core.exceptions.NotFound:
self.log.info(
"Dependency: table '%s' does not exist in instance '%s'.", self.table_id, self.instance_id
)
return False
ready_state = ClusterState(enums.Table.ReplicationState.READY)
is_table_replicated = True
for cluster_id in cluster_states.keys():
if cluster_states[cluster_id] != ready_state:
self.log.info("Table '%s' is not yet replicated on cluster '%s'.", self.table_id, cluster_id)
is_table_replicated = False
if not is_table_replicated:
return False
self.log.info("Table '%s' is replicated.", self.table_id)
BigtableTablesLink.persist(context=context)
return True
| BigtableTableReplicationCompletedSensor |
python | ray-project__ray | release/llm_tests/benchmark/load_test.py | {
"start": 7142,
"end": 7562
} | class ____(abc.ABC):
DEFAULT_MODEL_NAME = None
def __init__(self, model, parsed_options):
self.model = model
self.parsed_options = parsed_options
@abc.abstractmethod
def get_url(self):
...
@abc.abstractmethod
def format_payload(self, prompt, max_tokens, images):
...
@abc.abstractmethod
def parse_output_json(self, json, prompt):
...
| BaseProvider |
python | sanic-org__sanic | sanic/exceptions.py | {
"start": 26471,
"end": 26642
} | class ____(SanicException):
"""Exception raised when a websocket is closed."""
quiet = True
message = "Client has closed the websocket connection"
| WebsocketClosed |
python | mlflow__mlflow | tests/pyfunc/test_pyfunc_model_with_type_hints.py | {
"start": 1384,
"end": 1619
} | class ____(pydantic.BaseModel):
long_field: int
str_field: str
bool_field: bool
double_field: float
any_field: Any
optional_str: Optional[str] = None # noqa: UP045
str_or_none: str | None = None
| CustomExample |
python | modin-project__modin | modin/core/dataframe/pandas/partitioning/partition.py | {
"start": 1293,
"end": 14770
} | class ____(
ABC, ClassLogger, modin_layer="BLOCK-PARTITION", log_level=LogLevel.DEBUG
): # pragma: no cover
"""
An abstract class that is base for any partition class of ``pandas`` storage format.
The class providing an API that has to be overridden by child classes.
"""
_length_cache = None
_width_cache = None
_identity_cache = None
_data = None
execution_wrapper = None
# these variables are intentionally initialized at runtime
# so as not to initialize the engine during import
_iloc_func = None
def __init__(self):
if type(self)._iloc_func is None:
# Places `_iloc` function into the storage to speed up
# remote function calls and caches the result.
# It also postpones engine initialization, which happens
# implicitly when `execution_wrapper.put` is called.
if self.execution_wrapper is not None:
type(self)._iloc_func = staticmethod(
self.execution_wrapper.put(self._iloc)
)
else:
type(self)._iloc_func = staticmethod(self._iloc)
@cached_property
def __constructor__(self) -> type[PandasDataframePartition]:
"""
Create a new instance of this object.
Returns
-------
PandasDataframePartition
New instance of pandas partition.
"""
return type(self)
def get(self):
"""
Get the object wrapped by this partition.
Returns
-------
object
The object that was wrapped by this partition.
Notes
-----
This is the opposite of the classmethod `put`.
E.g. if you assign `x = PandasDataframePartition.put(1)`, `x.get()` should
always return 1.
"""
log = get_logger()
self._is_debug(log) and log.debug(f"ENTER::Partition.get::{self._identity}")
self.drain_call_queue()
result = self.execution_wrapper.materialize(self._data)
self._is_debug(log) and log.debug(f"EXIT::Partition.get::{self._identity}")
return result
@property
def list_of_blocks(self):
"""
Get the list of physical partition objects that compose this partition.
Returns
-------
list
A list of physical partition objects (``ray.ObjectRef``, ``distributed.Future`` e.g.).
"""
# Defer draining call queue until we get the partitions.
# TODO Look into draining call queue at the same time as the task
self.drain_call_queue()
return [self._data]
def apply(self, func, *args, **kwargs):
"""
Apply a function to the object wrapped by this partition.
Parameters
----------
func : callable
Function to apply.
*args : iterable
Additional positional arguments to be passed in `func`.
**kwargs : dict
Additional keyword arguments to be passed in `func`.
Returns
-------
PandasDataframePartition
New `PandasDataframePartition` object.
Notes
-----
It is up to the implementation how `kwargs` are handled. They are
an important part of many implementations. As of right now, they
are not serialized.
"""
pass
def add_to_apply_calls(self, func, *args, length=None, width=None, **kwargs):
"""
Add a function to the call queue.
Parameters
----------
func : callable
Function to be added to the call queue.
*args : iterable
Additional positional arguments to be passed in `func`.
length : reference or int, optional
Length, or reference to length, of wrapped ``pandas.DataFrame``.
width : reference or int, optional
Width, or reference to width, of wrapped ``pandas.DataFrame``.
**kwargs : dict
Additional keyword arguments to be passed in `func`.
Returns
-------
PandasDataframePartition
New `PandasDataframePartition` object with the function added to the call queue.
Notes
-----
This function will be executed when `apply` is called. It will be executed
in the order inserted; apply's func operates the last and return.
"""
return self.__constructor__(
self._data,
call_queue=self.call_queue + [[func, args, kwargs]],
length=length,
width=width,
)
def drain_call_queue(self):
"""Execute all operations stored in the call queue on the object wrapped by this partition."""
pass
def wait(self):
"""Wait for completion of computations on the object wrapped by the partition."""
pass
def to_pandas(self):
"""
Convert the object wrapped by this partition to a ``pandas.DataFrame``.
Returns
-------
pandas.DataFrame
Notes
-----
If the underlying object is a pandas DataFrame, this will likely
only need to call `get`.
"""
dataframe = self.get()
assert isinstance(dataframe, (pandas.DataFrame, pandas.Series))
return dataframe
def to_numpy(self, **kwargs):
"""
Convert the object wrapped by this partition to a NumPy array.
Parameters
----------
**kwargs : dict
Additional keyword arguments to be passed in ``to_numpy``.
Returns
-------
np.ndarray
Notes
-----
If the underlying object is a pandas DataFrame, this will return
a 2D NumPy array.
"""
return self.apply(lambda df: df.to_numpy(**kwargs)).get()
@staticmethod
def _iloc(df, row_labels, col_labels): # noqa: RT01, PR01
"""Perform `iloc` on dataframes wrapped in partitions (helper function)."""
return df.iloc[row_labels, col_labels]
def mask(self, row_labels, col_labels):
"""
Lazily create a mask that extracts the indices provided.
Parameters
----------
row_labels : list-like, slice or label
The row labels for the rows to extract.
col_labels : list-like, slice or label
The column labels for the columns to extract.
Returns
-------
PandasDataframePartition
New `PandasDataframePartition` object.
"""
def is_full_axis_mask(index, axis_length):
"""Check whether `index` mask grabs `axis_length` amount of elements."""
if isinstance(index, slice):
return index == slice(None) or (
isinstance(axis_length, int)
and compute_sliced_len(index, axis_length) == axis_length
)
return (
hasattr(index, "__len__")
and isinstance(axis_length, int)
and len(index) == axis_length
)
row_labels = [row_labels] if is_scalar(row_labels) else row_labels
col_labels = [col_labels] if is_scalar(col_labels) else col_labels
if is_full_axis_mask(row_labels, self._length_cache) and is_full_axis_mask(
col_labels, self._width_cache
):
return copy(self)
new_obj = self.add_to_apply_calls(self._iloc_func, row_labels, col_labels)
def try_recompute_cache(indices, previous_cache):
"""Compute new axis-length cache for the masked frame based on its previous cache."""
if not isinstance(indices, slice):
return len(indices)
if not isinstance(previous_cache, int):
return None
return compute_sliced_len(indices, previous_cache)
new_obj._length_cache = try_recompute_cache(row_labels, self._length_cache)
new_obj._width_cache = try_recompute_cache(col_labels, self._width_cache)
return new_obj
@classmethod
def put(cls, obj):
"""
Put an object into a store and wrap it with partition object.
Parameters
----------
obj : object
An object to be put.
Returns
-------
PandasDataframePartition
New `PandasDataframePartition` object.
"""
pass
@classmethod
def preprocess_func(cls, func):
"""
Preprocess a function before an `apply` call.
Parameters
----------
func : callable
Function to preprocess.
Returns
-------
callable
An object that can be accepted by `apply`.
Notes
-----
This is a classmethod because the definition of how to preprocess
should be class-wide. Also, we may want to use this before we
deploy a preprocessed function to multiple `PandasDataframePartition`
objects.
"""
pass
@classmethod
def _length_extraction_fn(cls):
"""
Return the function that computes the length of the object wrapped by this partition.
Returns
-------
callable
The function that computes the length of the object wrapped by this partition.
"""
return length_fn_pandas
@classmethod
def _width_extraction_fn(cls):
"""
Return the function that computes the width of the object wrapped by this partition.
Returns
-------
callable
The function that computes the width of the object wrapped by this partition.
"""
return width_fn_pandas
def length(self, materialize=True):
"""
Get the length of the object wrapped by this partition.
Parameters
----------
materialize : bool, default: True
Whether to forcibly materialize the result into an integer. If ``False``
was specified, may return a future of the result if it hasn't been
materialized yet.
Returns
-------
int or its Future
The length of the object.
"""
if self._length_cache is None:
self._length_cache = self.apply(self._length_extraction_fn()).get()
return self._length_cache
def width(self, materialize=True):
"""
Get the width of the object wrapped by the partition.
Parameters
----------
materialize : bool, default: True
Whether to forcibly materialize the result into an integer. If ``False``
was specified, may return a future of the result if it hasn't been
materialized yet.
Returns
-------
int or its Future
The width of the object.
"""
if self._width_cache is None:
self._width_cache = self.apply(self._width_extraction_fn()).get()
return self._width_cache
@property
def _identity(self):
"""
Calculate identifier on request for debug logging mode.
Returns
-------
str
"""
if self._identity_cache is None:
self._identity_cache = uuid.uuid4().hex
return self._identity_cache
def split(self, split_func, num_splits, *args):
"""
Split the object wrapped by the partition into multiple partitions.
Parameters
----------
split_func : Callable[pandas.DataFrame, List[Any]] -> List[pandas.DataFrame]
The function that will split this partition into multiple partitions. The list contains
pivots to split by, and will have the same dtype as the major column we are shuffling on.
num_splits : int
The number of resulting partitions (may be empty).
*args : List[Any]
Arguments to pass to ``split_func``.
Returns
-------
list
A list of partitions.
"""
log = get_logger()
self._is_debug(log) and log.debug(f"ENTER::Partition.split::{self._identity}")
self._is_debug(log) and log.debug(f"SUBMIT::_split_df::{self._identity}")
outputs = self.execution_wrapper.deploy(
split_func, [self._data] + list(args), num_returns=num_splits
)
self._is_debug(log) and log.debug(f"EXIT::Partition.split::{self._identity}")
return [self.__constructor__(output) for output in outputs]
@classmethod
def empty(cls):
"""
Create a new partition that wraps an empty pandas DataFrame.
Returns
-------
PandasDataframePartition
New `PandasDataframePartition` object.
"""
return cls.put(pandas.DataFrame(), 0, 0)
def _is_debug(self, logger=None):
"""
Check that the logger is set to debug mode.
Parameters
----------
logger : logging.logger, optional
Logger obtained from Modin's `get_logger` utility.
Explicit transmission of this parameter can be used in the case
when within the context of `_is_debug` call there was already
`get_logger` call. This is an optimization.
Returns
-------
bool
"""
if logger is None:
logger = get_logger()
return logger.isEnabledFor(logging.DEBUG)
| PandasDataframePartition |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_dataplex.py | {
"start": 3815,
"end": 29413
} | class ____:
def setup_method(self):
with mock.patch(
BASE_STRING.format("GoogleBaseHook.__init__"),
new=mock_base_gcp_hook_default_project_id,
):
self.hook = DataplexHook(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
@mock.patch(DATAPLEX_HOOK_CLIENT)
def test_create_task(self, mock_client):
self.hook.create_task(
project_id=PROJECT_ID,
region=REGION,
lake_id=LAKE_ID,
body=BODY,
dataplex_task_id=DATAPLEX_TASK_ID,
validate_only=None,
)
parent = f"projects/{PROJECT_ID}/locations/{REGION}/lakes/{LAKE_ID}"
mock_client.return_value.create_task.assert_called_once_with(
request=dict(
parent=parent,
task_id=DATAPLEX_TASK_ID,
task=BODY,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_HOOK_CLIENT)
def test_delete_task(self, mock_client):
self.hook.delete_task(
project_id=PROJECT_ID, region=REGION, lake_id=LAKE_ID, dataplex_task_id=DATAPLEX_TASK_ID
)
name = f"projects/{PROJECT_ID}/locations/{REGION}/lakes/{LAKE_ID}/tasks/{DATAPLEX_TASK_ID}"
mock_client.return_value.delete_task.assert_called_once_with(
request=dict(
name=name,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_HOOK_CLIENT)
def test_list_tasks(self, mock_client):
self.hook.list_tasks(project_id=PROJECT_ID, region=REGION, lake_id=LAKE_ID)
parent = f"projects/{PROJECT_ID}/locations/{REGION}/lakes/{LAKE_ID}"
mock_client.return_value.list_tasks.assert_called_once_with(
request=dict(
parent=parent,
page_size=None,
page_token=None,
filter=None,
order_by=None,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_HOOK_CLIENT)
def test_get_task(self, mock_client):
self.hook.get_task(
project_id=PROJECT_ID, region=REGION, lake_id=LAKE_ID, dataplex_task_id=DATAPLEX_TASK_ID
)
name = f"projects/{PROJECT_ID}/locations/{REGION}/lakes/{LAKE_ID}/tasks/{DATAPLEX_TASK_ID}"
mock_client.return_value.get_task.assert_called_once_with(
request=dict(
name=name,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_HOOK_CLIENT)
def test_create_lake(self, mock_client):
self.hook.create_lake(
project_id=PROJECT_ID,
region=REGION,
lake_id=LAKE_ID,
body=BODY,
validate_only=None,
)
parent = f"projects/{PROJECT_ID}/locations/{REGION}"
mock_client.return_value.create_lake.assert_called_once_with(
request=dict(
parent=parent,
lake_id=LAKE_ID,
lake=BODY,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_HOOK_CLIENT)
def test_delete_lake(self, mock_client):
self.hook.delete_lake(project_id=PROJECT_ID, region=REGION, lake_id=LAKE_ID)
name = f"projects/{PROJECT_ID}/locations/{REGION}/lakes/{LAKE_ID}"
mock_client.return_value.delete_lake.assert_called_once_with(
request=dict(
name=name,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_HOOK_CLIENT)
def test_get_lake(self, mock_client):
self.hook.get_lake(project_id=PROJECT_ID, region=REGION, lake_id=LAKE_ID)
name = f"projects/{PROJECT_ID}/locations/{REGION}/lakes/{LAKE_ID}/"
mock_client.return_value.get_lake.assert_called_once_with(
request=dict(
name=name,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_HOOK_CLIENT)
def test_create_zone(self, mock_client):
self.hook.create_zone(project_id=PROJECT_ID, region=REGION, lake_id=LAKE_ID, zone_id=ZONE_ID, body={})
mock_client.return_value.create_zone.assert_called_once_with(
request=dict(
parent=ZONE_NAME,
zone_id=ZONE_ID,
zone={},
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_HOOK_CLIENT)
def test_delete_zone(self, mock_client):
self.hook.delete_zone(project_id=PROJECT_ID, region=REGION, lake_id=LAKE_ID, zone_id=ZONE_ID)
mock_client.return_value.delete_zone.assert_called_once_with(
request=dict(
name=ZONE_PARENT,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_HOOK_CLIENT)
def test_create_asset(self, mock_client):
self.hook.create_asset(
project_id=PROJECT_ID,
region=REGION,
lake_id=LAKE_ID,
zone_id=ZONE_ID,
asset_id=ASSET_ID,
body={},
)
mock_client.return_value.create_asset.assert_called_once_with(
request=dict(
parent=ZONE_PARENT,
asset={},
asset_id=ASSET_ID,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_HOOK_CLIENT)
def test_delete_asset(self, mock_client):
self.hook.delete_asset(
project_id=PROJECT_ID, region=REGION, lake_id=LAKE_ID, zone_id=ZONE_ID, asset_id=ASSET_ID
)
mock_client.return_value.delete_asset.assert_called_once_with(
request=dict(
name=ASSET_PARENT,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_HOOK_DS_CLIENT)
def test_create_data_scan(self, mock_client):
self.hook.create_data_scan(project_id=PROJECT_ID, region=REGION, data_scan_id=DATA_SCAN_ID, body={})
mock_client.return_value.create_data_scan.assert_called_once_with(
request=dict(parent=DATASCAN_PARENT, data_scan_id=DATA_SCAN_ID, data_scan={}),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_HOOK_DS_CLIENT)
def test_run_data_scan(self, mock_client):
self.hook.run_data_scan(project_id=PROJECT_ID, region=REGION, data_scan_id=DATA_SCAN_ID)
mock_client.return_value.run_data_scan.assert_called_once_with(
request=dict(
name=DATA_SCAN_NAME,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_HOOK_DS_CLIENT)
def test_get_data_scan_job(self, mock_client):
self.hook.get_data_scan_job(
project_id=PROJECT_ID, region=REGION, job_id=JOB_ID, data_scan_id=DATA_SCAN_ID
)
mock_client.return_value.get_data_scan_job.assert_called_once_with(
request=dict(name=DATA_SCAN_JOB_NAME, view="FULL"),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_HOOK_DS_CLIENT)
def test_delete_data_scan(self, mock_client):
self.hook.delete_data_scan(project_id=PROJECT_ID, region=REGION, data_scan_id=DATA_SCAN_ID)
mock_client.return_value.delete_data_scan.assert_called_once_with(
request=dict(
name=DATA_SCAN_NAME,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_HOOK_DS_CLIENT)
def test_get_data_scan(self, mock_client):
self.hook.get_data_scan(project_id=PROJECT_ID, region=REGION, data_scan_id=DATA_SCAN_ID)
mock_client.return_value.get_data_scan.assert_called_once_with(
request=dict(name=DATA_SCAN_NAME, view="FULL"),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_CATALOG_HOOK_CLIENT)
def test_create_entry_group(self, mock_client):
mock_common_location_path = mock_client.return_value.common_location_path
mock_common_location_path.return_value = COMMON_PARENT
self.hook.create_entry_group(
project_id=PROJECT_ID,
location=LOCATION,
entry_group_id=ENTRY_GROUP_ID,
entry_group_configuration=ENTRY_GROUP_BODY,
validate_only=False,
)
mock_client.return_value.create_entry_group.assert_called_once_with(
request=dict(
parent=COMMON_PARENT,
entry_group_id=ENTRY_GROUP_ID,
entry_group=ENTRY_GROUP_BODY,
validate_only=False,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_CATALOG_HOOK_CLIENT)
def test_delete_entry_group(self, mock_client):
mock_common_location_path = mock_client.return_value.entry_group_path
mock_common_location_path.return_value = ENTRY_GROUP_PARENT
self.hook.delete_entry_group(project_id=PROJECT_ID, location=LOCATION, entry_group_id=ENTRY_GROUP_ID)
mock_client.return_value.delete_entry_group.assert_called_once_with(
request=dict(
name=ENTRY_GROUP_PARENT,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_CATALOG_HOOK_CLIENT)
def test_list_entry_groups(self, mock_client):
mock_common_location_path = mock_client.return_value.common_location_path
mock_common_location_path.return_value = COMMON_PARENT
self.hook.list_entry_groups(
project_id=PROJECT_ID,
location=LOCATION,
order_by="name",
page_size=2,
filter_by="'description' = 'Some descr'",
)
mock_client.return_value.list_entry_groups.assert_called_once_with(
request=dict(
parent=COMMON_PARENT,
page_size=2,
page_token=None,
filter="'description' = 'Some descr'",
order_by="name",
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_CATALOG_HOOK_CLIENT)
def test_get_entry_group(self, mock_client):
mock_common_location_path = mock_client.return_value.entry_group_path
mock_common_location_path.return_value = ENTRY_GROUP_PARENT
self.hook.get_entry_group(project_id=PROJECT_ID, location=LOCATION, entry_group_id=ENTRY_GROUP_ID)
mock_client.return_value.get_entry_group.assert_called_once_with(
request=dict(
name=ENTRY_GROUP_PARENT,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_CATALOG_HOOK_CLIENT)
def test_update_entry_group(self, mock_client):
mock_common_location_path = mock_client.return_value.entry_group_path
mock_common_location_path.return_value = ENTRY_GROUP_PARENT
self.hook.update_entry_group(
project_id=PROJECT_ID,
location=LOCATION,
entry_group_id=ENTRY_GROUP_ID,
entry_group_configuration=ENTRY_GROUP_UPDATED_BODY,
update_mask=UPDATE_MASK,
validate_only=False,
)
mock_client.return_value.update_entry_group.assert_called_once_with(
request=dict(
entry_group={**ENTRY_GROUP_UPDATED_BODY, "name": ENTRY_GROUP_PARENT},
update_mask=FieldMask(paths=UPDATE_MASK),
validate_only=False,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_CATALOG_HOOK_CLIENT)
def test_create_entry_type(self, mock_client):
mock_common_location_path = mock_client.return_value.common_location_path
mock_common_location_path.return_value = COMMON_PARENT
self.hook.create_entry_type(
project_id=PROJECT_ID,
location=LOCATION,
entry_type_id=ENTRY_TYPE_ID,
entry_type_configuration=ENTRY_TYPE_BODY,
validate_only=False,
)
mock_client.return_value.create_entry_type.assert_called_once_with(
request=dict(
parent=COMMON_PARENT,
entry_type_id=ENTRY_TYPE_ID,
entry_type=ENTRY_TYPE_BODY,
validate_only=False,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_CATALOG_HOOK_CLIENT)
def test_delete_entry_type(self, mock_client):
mock_common_location_path = mock_client.return_value.entry_type_path
mock_common_location_path.return_value = ENTRY_TYPE_PARENT
self.hook.delete_entry_type(project_id=PROJECT_ID, location=LOCATION, entry_type_id=ENTRY_TYPE_ID)
mock_client.return_value.delete_entry_type.assert_called_once_with(
request=dict(
name=ENTRY_TYPE_PARENT,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_CATALOG_HOOK_CLIENT)
def test_list_entry_types(self, mock_client):
mock_common_location_path = mock_client.return_value.common_location_path
mock_common_location_path.return_value = COMMON_PARENT
self.hook.list_entry_types(
project_id=PROJECT_ID,
location=LOCATION,
order_by="name",
page_size=2,
filter_by="'description' = 'Some descr'",
)
mock_client.return_value.list_entry_types.assert_called_once_with(
request=dict(
parent=COMMON_PARENT,
page_size=2,
page_token=None,
filter="'description' = 'Some descr'",
order_by="name",
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_CATALOG_HOOK_CLIENT)
def test_get_entry_type(self, mock_client):
mock_common_location_path = mock_client.return_value.entry_type_path
mock_common_location_path.return_value = ENTRY_TYPE_PARENT
self.hook.get_entry_type(project_id=PROJECT_ID, location=LOCATION, entry_type_id=ENTRY_TYPE_ID)
mock_client.return_value.get_entry_type.assert_called_once_with(
request=dict(
name=ENTRY_TYPE_PARENT,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_CATALOG_HOOK_CLIENT)
def test_update_entry_type(self, mock_client):
mock_common_location_path = mock_client.return_value.entry_type_path
mock_common_location_path.return_value = ENTRY_TYPE_PARENT
self.hook.update_entry_type(
project_id=PROJECT_ID,
location=LOCATION,
entry_type_id=ENTRY_TYPE_ID,
entry_type_configuration=ENTRY_TYPE_UPDATED_BODY,
update_mask=UPDATE_MASK,
validate_only=False,
)
mock_client.return_value.update_entry_type.assert_called_once_with(
request=dict(
entry_type={**ENTRY_TYPE_UPDATED_BODY, "name": ENTRY_TYPE_PARENT},
update_mask=FieldMask(paths=UPDATE_MASK),
validate_only=False,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_CATALOG_HOOK_CLIENT)
def test_create_aspect_type(self, mock_client):
mock_common_location_path = mock_client.return_value.common_location_path
mock_common_location_path.return_value = COMMON_PARENT
self.hook.create_aspect_type(
project_id=PROJECT_ID,
location=LOCATION,
aspect_type_id=ASPECT_TYPE_ID,
aspect_type_configuration=ASPECT_TYPE_BODY,
validate_only=False,
)
mock_client.return_value.create_aspect_type.assert_called_once_with(
request=dict(
parent=COMMON_PARENT,
aspect_type_id=ASPECT_TYPE_ID,
aspect_type=ASPECT_TYPE_BODY,
validate_only=False,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_CATALOG_HOOK_CLIENT)
def test_delete_aspect_type(self, mock_client):
mock_common_location_path = mock_client.return_value.aspect_type_path
mock_common_location_path.return_value = ASPECT_TYPE_PARENT
self.hook.delete_aspect_type(project_id=PROJECT_ID, location=LOCATION, aspect_type_id=ASPECT_TYPE_ID)
mock_client.return_value.delete_aspect_type.assert_called_once_with(
request=dict(
name=ASPECT_TYPE_PARENT,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_CATALOG_HOOK_CLIENT)
def test_list_aspect_types(self, mock_client):
mock_common_location_path = mock_client.return_value.common_location_path
mock_common_location_path.return_value = COMMON_PARENT
self.hook.list_aspect_types(
project_id=PROJECT_ID,
location=LOCATION,
order_by="name",
page_size=2,
filter_by="'description' = 'Some descr'",
)
mock_client.return_value.list_aspect_types.assert_called_once_with(
request=dict(
parent=COMMON_PARENT,
page_size=2,
page_token=None,
filter="'description' = 'Some descr'",
order_by="name",
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_CATALOG_HOOK_CLIENT)
def test_get_aspect_type(self, mock_client):
mock_common_location_path = mock_client.return_value.aspect_type_path
mock_common_location_path.return_value = ASPECT_TYPE_PARENT
self.hook.get_aspect_type(project_id=PROJECT_ID, location=LOCATION, aspect_type_id=ASPECT_TYPE_ID)
mock_client.return_value.get_aspect_type.assert_called_once_with(
request=dict(
name=ASPECT_TYPE_PARENT,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_CATALOG_HOOK_CLIENT)
def test_update_aspect_type(self, mock_client):
mock_common_location_path = mock_client.return_value.entry_type_path
mock_common_location_path.return_value = ENTRY_TYPE_PARENT
self.hook.update_entry_type(
project_id=PROJECT_ID,
location=LOCATION,
entry_type_id=ENTRY_TYPE_ID,
entry_type_configuration=ENTRY_TYPE_UPDATED_BODY,
update_mask=UPDATE_MASK,
validate_only=False,
)
mock_client.return_value.update_entry_type.assert_called_once_with(
request=dict(
entry_type={**ENTRY_TYPE_UPDATED_BODY, "name": ENTRY_TYPE_PARENT},
update_mask=FieldMask(paths=UPDATE_MASK),
validate_only=False,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_CATALOG_HOOK_CLIENT)
def test_create_entry(self, mock_client):
mock_common_location_path = mock_client.return_value.entry_group_path
mock_common_location_path.return_value = ENTRY_GROUP_PARENT
self.hook.create_entry(
project_id=PROJECT_ID,
location=LOCATION,
entry_id=ENTRY_ID,
entry_group_id=ENTRY_GROUP_ID,
entry_configuration=ENTRY_ID_BODY,
)
mock_client.return_value.create_entry.assert_called_once_with(
request=dict(
parent=ENTRY_GROUP_PARENT,
entry_id=ENTRY_ID,
entry=ENTRY_ID_BODY,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_CATALOG_HOOK_CLIENT)
def test_delete_entry(self, mock_client):
mock_common_location_path = mock_client.return_value.entry_path
mock_common_location_path.return_value = ENTRY_PARENT
self.hook.delete_entry(
project_id=PROJECT_ID,
location=LOCATION,
entry_id=ENTRY_ID,
entry_group_id=ENTRY_GROUP_ID,
)
mock_client.return_value.delete_entry.assert_called_once_with(
request=dict(
name=ENTRY_PARENT,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_CATALOG_HOOK_CLIENT)
def test_list_entries(self, mock_client):
mock_common_location_path = mock_client.return_value.entry_group_path
mock_common_location_path.return_value = ENTRY_GROUP_PARENT
self.hook.list_entries(
project_id=PROJECT_ID,
entry_group_id=ENTRY_GROUP_ID,
location=LOCATION,
page_size=2,
filter_by="'description' = 'Some descr'",
)
mock_client.return_value.list_entries.assert_called_once_with(
request=dict(
parent=ENTRY_GROUP_PARENT,
page_size=2,
page_token=None,
filter="'description' = 'Some descr'",
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_CATALOG_HOOK_CLIENT)
def test_search_entries(self, mock_client):
mock_common_location_path = mock_client.return_value.common_location_path
mock_common_location_path.return_value = COMMON_PARENT
self.hook.search_entries(
project_id=PROJECT_ID,
location="global",
query="displayname:Display Name",
)
mock_client.return_value.search_entries.assert_called_once_with(
request=dict(
name=COMMON_PARENT,
query="displayname:Display Name",
order_by=None,
page_size=None,
page_token=None,
scope=None,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_CATALOG_HOOK_CLIENT)
def test_get_entry(self, mock_client):
mock_common_location_path = mock_client.return_value.entry_path
mock_common_location_path.return_value = ENTRY_PARENT
self.hook.get_entry(
project_id=PROJECT_ID,
location=LOCATION,
entry_id=ENTRY_ID,
entry_group_id=ENTRY_GROUP_ID,
)
mock_client.return_value.get_entry.assert_called_once_with(
request=dict(
name=ENTRY_PARENT,
view=None,
aspect_types=None,
paths=None,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_CATALOG_HOOK_CLIENT)
def test_lookup_entry(self, mock_client):
mock_common_location_path = mock_client.return_value.common_location_path
mock_entry_location_path = mock_client.return_value.entry_path
mock_common_location_path.return_value = COMMON_PARENT
mock_entry_location_path.return_value = ENTRY_PARENT
self.hook.lookup_entry(
project_id=PROJECT_ID,
location=LOCATION,
entry_id=ENTRY_ID,
entry_group_id=ENTRY_GROUP_ID,
)
mock_client.return_value.lookup_entry.assert_called_once_with(
request=dict(
name=COMMON_PARENT,
entry=ENTRY_PARENT,
view=None,
aspect_types=None,
paths=None,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
@mock.patch(DATAPLEX_CATALOG_HOOK_CLIENT)
def test_update_entry(self, mock_client):
mock_common_location_path = mock_client.return_value.entry_path
mock_common_location_path.return_value = ENTRY_PARENT
self.hook.update_entry(
project_id=PROJECT_ID,
location=LOCATION,
entry_id=ENTRY_ID,
entry_group_id=ENTRY_GROUP_ID,
entry_configuration=ENTRY_ID_UPDATED_BODY,
update_mask=UPDATE_MASK_FOR_ENTRY,
)
mock_client.return_value.update_entry.assert_called_once_with(
request=dict(
entry={**ENTRY_ID_UPDATED_BODY, "name": ENTRY_PARENT},
update_mask=FieldMask(paths=UPDATE_MASK_FOR_ENTRY),
allow_missing=False,
delete_missing_aspects=False,
aspect_keys=None,
),
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestDataplexHook |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/rnn_test.py | {
"start": 1996,
"end": 2297
} | class ____(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 5
@property
def state_size(self):
return 5
def call(self, input_, state, scope=None):
return (input_ + 1, state + 1)
| Plus1RNNCell |
python | kamyu104__LeetCode-Solutions | Python/minimum-cost-to-separate-sentence-into-rows.py | {
"start": 2247,
"end": 3364
} | class ____(object):
def minimumCost(self, sentence, k):
"""
:type sentence: str
:type k: int
:rtype: int
"""
word_lens = []
j = 0
for i in xrange(len(sentence)+1):
if i != len(sentence) and sentence[i] != ' ':
continue
word_lens.append(i-j)
j = i+1
dp = [float("inf")]*(1+(len(word_lens)-1)) # dp[i]: min cost of the first i word_lens where i in [0, len(words)-1]
dp[0] = 0
for i in xrange(1, (len(word_lens)-1)+1):
total = word_lens[i-1]
for j in reversed(xrange(i)):
dp[i] = min(dp[i], dp[j] + (k-total)**2)
if j-1 < 0:
continue
total += (word_lens[j-1]+1)
if total > k:
break
i, total = len(word_lens)-1, -1
while i >= 0 and total + (word_lens[i]+1) <= k: # find max i s.t. the length of the last line > k
total += (word_lens[i]+1)
i -= 1
return min(dp[j] for j in xrange(i+1, len(dp)))
| Solution3 |
python | pytorch__pytorch | torchgen/_autoheuristic/mixed_mm/train_decision_mixedmm.py | {
"start": 252,
"end": 2098
} | class ____(AHTrainDecisionTree):
def __init__(self):
super().__init__()
def add_new_features(self, results):
ops = mixed_mm_operations()
added_categorical_features = []
for op in ops:
results[op.name] = results.apply(op.func, axis=1)
if op.is_categorical:
added_categorical_features.append(op.name)
return (results, added_categorical_features)
def get_default_config(self, row):
return "extern_fallback_mixed_mm"
def get_allowed_wrong_prediction_pct(self):
# it is okay to have wrong predictions
# we introduce uncertainty by marking leaves as unsafe instead
return 1.0
def get_test_and_val_size(self):
return (0.01, 0.19)
def is_unsafe_leaf(self, row, predicted_config, choice2time):
if predicted_config not in choice2time:
# heuristic always returns "unsure" in such a case
return False
predicted_time = choice2time[predicted_config]
fallback_time = choice2time[self.get_default_config(row)]
# we mark leaves as unsafe if there is a chance our choice will be 5% slower than fallback
# we are okay with making the wrong choice, as long as our choice is better than fallback because
# fallback is the default when max_autotune is false
return 1.05 * fallback_time < predicted_time
def get_grid_search_values(self):
# A lot of different hyperparameters perform very similar on mixed_mm
# it is kind of hard to automatically pick one so I just manually picked one with a small max_depth
return {"max_depth": [5], "min_samples_leaf": [0.01], "criterion": ["entropy"]}
if __name__ == "__main__":
train = AHTrainDecisionTreeMixedMM()
train.generate_heuristic()
| AHTrainDecisionTreeMixedMM |
python | openai__openai-python | tests/api_resources/test_conversations.py | {
"start": 6733,
"end": 13586
} | class ____:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
conversation = await async_client.conversations.create()
assert_matches_type(Conversation, conversation, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
conversation = await async_client.conversations.create(
items=[
{
"content": "string",
"role": "user",
"type": "message",
}
],
metadata={"foo": "string"},
)
assert_matches_type(Conversation, conversation, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.conversations.with_raw_response.create()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
conversation = response.parse()
assert_matches_type(Conversation, conversation, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.conversations.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
conversation = await response.parse()
assert_matches_type(Conversation, conversation, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
conversation = await async_client.conversations.retrieve(
"conv_123",
)
assert_matches_type(Conversation, conversation, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
response = await async_client.conversations.with_raw_response.retrieve(
"conv_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
conversation = response.parse()
assert_matches_type(Conversation, conversation, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
async with async_client.conversations.with_streaming_response.retrieve(
"conv_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
conversation = await response.parse()
assert_matches_type(Conversation, conversation, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
await async_client.conversations.with_raw_response.retrieve(
"",
)
@parametrize
async def test_method_update(self, async_client: AsyncOpenAI) -> None:
conversation = await async_client.conversations.update(
conversation_id="conv_123",
metadata={"foo": "string"},
)
assert_matches_type(Conversation, conversation, path=["response"])
@parametrize
async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
response = await async_client.conversations.with_raw_response.update(
conversation_id="conv_123",
metadata={"foo": "string"},
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
conversation = response.parse()
assert_matches_type(Conversation, conversation, path=["response"])
@parametrize
async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
async with async_client.conversations.with_streaming_response.update(
conversation_id="conv_123",
metadata={"foo": "string"},
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
conversation = await response.parse()
assert_matches_type(Conversation, conversation, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
await async_client.conversations.with_raw_response.update(
conversation_id="",
metadata={"foo": "string"},
)
@parametrize
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
conversation = await async_client.conversations.delete(
"conv_123",
)
assert_matches_type(ConversationDeletedResource, conversation, path=["response"])
@parametrize
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
response = await async_client.conversations.with_raw_response.delete(
"conv_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
conversation = response.parse()
assert_matches_type(ConversationDeletedResource, conversation, path=["response"])
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
async with async_client.conversations.with_streaming_response.delete(
"conv_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
conversation = await response.parse()
assert_matches_type(ConversationDeletedResource, conversation, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
await async_client.conversations.with_raw_response.delete(
"",
)
| TestAsyncConversations |
python | huggingface__transformers | src/transformers/models/sam3_tracker_video/modular_sam3_tracker_video.py | {
"start": 18616,
"end": 18704
} | class ____(Sam2VideoVisionRotaryEmbedding):
pass
| Sam3TrackerVideoVisionRotaryEmbedding |
python | django__django | tests/null_queries/models.py | {
"start": 339,
"end": 412
} | class ____(models.Model):
data = models.CharField(max_length=10)
| OuterB |
python | PrefectHQ__prefect | src/prefect/exceptions.py | {
"start": 7662,
"end": 7784
} | class ____(PrefectException, ValueError):
"""
Raised when a profile name does not exist.
"""
| MissingProfileError |
python | pytorch__pytorch | test/jit/test_backend_nnapi.py | {
"start": 1411,
"end": 5236
} | class ____(TestNNAPI):
def setUp(self):
super().setUp()
# Save default dtype
module = torch.nn.PReLU()
self.default_dtype = module.weight.dtype
# Change dtype to float32 (since a different unit test changed dtype to float64,
# which is not supported by the Android NNAPI delegate)
# Float32 should typically be the default in other files.
torch.set_default_dtype(torch.float32)
# Load nnapi delegate library
torch.ops.load_library(str(lib_path))
# Override
def call_lowering_to_nnapi(self, traced_module, args):
compile_spec = {"forward": {"inputs": args}}
return torch._C._jit_to_backend("nnapi", traced_module, compile_spec)
def test_tensor_input(self):
# Lower a simple module
args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
module = torch.nn.PReLU()
traced = torch.jit.trace(module, args)
# Argument input is a single Tensor
self.call_lowering_to_nnapi(traced, args)
# Argument input is a Tensor in a list
self.call_lowering_to_nnapi(traced, [args])
# Test exceptions for incorrect compile specs
def test_compile_spec_santiy(self):
args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
module = torch.nn.PReLU()
traced = torch.jit.trace(module, args)
errorMsgTail = r"""
method_compile_spec should contain a Tensor or Tensor List which bundles input parameters: shape, dtype, quantization, and dimorder.
For input shapes, use 0 for run/load time flexible input.
method_compile_spec must use the following format:
{"forward": {"inputs": at::Tensor}} OR {"forward": {"inputs": c10::List<at::Tensor>}}"""
# No forward key
compile_spec = {"backward": {"inputs": args}}
with self.assertRaisesRegex(
RuntimeError,
'method_compile_spec does not contain the "forward" key.' + errorMsgTail,
):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No dictionary under the forward key
compile_spec = {"forward": 1}
with self.assertRaisesRegex(
RuntimeError,
'method_compile_spec does not contain a dictionary with an "inputs" key, '
'under it\'s "forward" key.' + errorMsgTail,
):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No inputs key (in the dictionary under the forward key)
compile_spec = {"forward": {"not inputs": args}}
with self.assertRaisesRegex(
RuntimeError,
'method_compile_spec does not contain a dictionary with an "inputs" key, '
'under it\'s "forward" key.' + errorMsgTail,
):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No Tensor or TensorList under the inputs key
compile_spec = {"forward": {"inputs": 1}}
with self.assertRaisesRegex(
RuntimeError,
'method_compile_spec does not contain either a Tensor or TensorList, under it\'s "inputs" key.'
+ errorMsgTail,
):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
compile_spec = {"forward": {"inputs": [1]}}
with self.assertRaisesRegex(
RuntimeError,
'method_compile_spec does not contain either a Tensor or TensorList, under it\'s "inputs" key.'
+ errorMsgTail,
):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
def tearDown(self):
# Change dtype back to default (Otherwise, other unit tests will complain)
torch.set_default_dtype(self.default_dtype)
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
| TestNnapiBackend |
python | huggingface__transformers | src/transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py | {
"start": 50217,
"end": 63973
} | class ____(KyutaiSpeechToTextPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
_keep_in_fp32_modules_strict = ["codec_model"]
output_modalities = ("audio", "text")
def __init__(self, config):
super().__init__(config)
self.model = KyutaiSpeechToTextModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.codec_model = AutoModel.from_config(config.codec_config)
# we are in an edge case where for the codec_model self.can_generate is False, setting self.codec_model.generation_config to None
# yet the codec_model needs a generation config to initialize it's cache for streaming inference
# we therefore initialize a generation config for the codec model
self.codec_model.generation_config = GenerationConfig.from_model_config(config.codec_config)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> import torch
>>> from datasets import load_dataset, Audio
>>> from transformers import KyutaiSpeechToTextProcessor, KyutaiSpeechToTextForConditionalGeneration
>>> torch_device = "cuda" if torch.cuda.is_available() else "cpu"
>>> model_id = "kyutai/stt-2.6b-en-trfs"
>>> processor = KyutaiSpeechToTextProcessor.from_pretrained(model_id)
>>> model = KyutaiSpeechToTextForConditionalGeneration.from_pretrained(model_id, device_map=torch_device)
>>> ds = load_dataset(
... "hf-internal-testing/librispeech_asr_dummy", "clean", split="validation"
... )
>>> ds = ds.cast_column("audio", Audio(sampling_rate=24000))
>>> inputs = processor(
... ds[0]["audio"]["array"],
... )
>>> inputs.to(torch_device)
>>> output_tokens = model.generate(**inputs)
>>> print(processor.batch_decode(output_tokens, skip_special_tokens=True))
```"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def _prepare_generation_config(self, *args, **kwargs):
generation_config, model_kwargs = super()._prepare_generation_config(*args, **kwargs)
# this should be passed to the model kwargs for the input preparation
model_kwargs["audio_window_size"] = (
generation_config.audio_window_size if hasattr(generation_config, "audio_window_size") else None
)
return generation_config, model_kwargs
def _prepare_model_inputs(
self,
inputs: Optional[torch.Tensor] = None,
bos_token_id: Optional[torch.Tensor] = None,
model_kwargs: Optional[dict[str, torch.Tensor]] = None,
) -> tuple[torch.Tensor, Optional[str], dict[str, torch.Tensor]]:
inputs, input_name, model_kwargs = super()._prepare_model_inputs(
inputs=inputs,
bos_token_id=bos_token_id,
model_kwargs=model_kwargs,
)
audio_window_size = model_kwargs.get("audio_window_size", None)
if audio_window_size is None:
audio_window_size = self.codec_model.get_encoded_length(model_kwargs["input_values"].shape[-1]).item()
model_kwargs["audio_window_size"] = audio_window_size
batch_size = inputs.shape[0]
device = inputs.device
# initialize audio tokens
model_kwargs["audio_tokens"] = torch.zeros(
(batch_size, audio_window_size, self.config.num_codebooks),
device=device,
dtype=torch.long,
)
model_kwargs["current_window"] = (
torch.tensor([0, 0], device=device, dtype=torch.long).expand(batch_size, -1).contiguous()
)
# let's use generate's cache preparation to prepare the cache for the codec model
temporary_model_kwargs = {}
# monkey patching the codec model with cache preparation methods since we don't want it to inherit fully from GenerationMixin
# Add cache-related methods from GenerationMixin to codec model
cache_methods = [
"_prepare_cache_for_generation",
"_get_cache",
]
for method in cache_methods:
setattr(self.codec_model, method, types.MethodType(getattr(self, method).__func__, self.codec_model))
setattr(
self.codec_model, "_supports_default_dynamic_cache", types.MethodType(lambda x: True, self.codec_model)
)
self.codec_model.generation_config.cache_implementation = "dynamic"
self.codec_model._prepare_cache_for_generation(
generation_config=self.codec_model.generation_config,
model_kwargs=temporary_model_kwargs,
generation_mode=None,
batch_size=batch_size,
max_cache_length=self.config.codec_config.sliding_window,
)
if "past_key_values" in temporary_model_kwargs:
model_kwargs["encoder_past_key_values"] = temporary_model_kwargs["past_key_values"]
# initialize the padding cache for the codec model
per_layer_padding, per_layer_padding_mode, per_layer_in_channels = [], [], []
for layer_name in self.codec_model.encoder._mimiconv1d_layer_names:
per_layer_padding.append(self.codec_model.encoder.get_submodule(layer_name).padding_total)
per_layer_padding_mode.append(self.codec_model.encoder.get_submodule(layer_name).pad_mode)
per_layer_in_channels.append(self.codec_model.encoder.get_submodule(layer_name).in_channels)
# downsample layer
per_layer_padding.append(self.codec_model.downsample.padding_total)
per_layer_padding_mode.append(self.codec_model.downsample.pad_mode)
per_layer_in_channels.append(self.codec_model.downsample.in_channels)
model_kwargs["padding_cache"] = KyutaiSpeechToTextConv1dPaddingCache(
num_layers=len(self.codec_model.encoder._mimiconv1d_layer_names) + 1,
per_layer_padding=per_layer_padding,
per_layer_padding_mode=per_layer_padding_mode,
per_layer_in_channels=per_layer_in_channels,
)
return inputs, input_name, model_kwargs
def prepare_inputs_for_generation(
self,
*args,
audio_tokens: Optional[torch.LongTensor] = None,
input_values: Optional[torch.FloatTensor] = None,
padding_mask: Optional[torch.Tensor] = None,
audio_window_size: Optional[int] = None,
current_window: Optional[tuple[int, int]] = None,
encoder_past_key_values: Optional[Cache] = None,
padding_cache: Optional[KyutaiSpeechToTextConv1dPaddingCache] = None,
**kwargs,
):
model_inputs = super().prepare_inputs_for_generation(*args, **kwargs)
if input_values is not None:
cache_position = model_inputs["cache_position"]
start, end = current_window[0]
# first cache position is for bos token, so we need to offset by -1
if cache_position[-1] - 1 >= end:
# we need to encode the new audio tokens
with torch.no_grad():
input_values_start_idx = start * self.config.frame_size
input_values_end_idx = (start + audio_window_size) * self.config.frame_size
current_input_values = input_values[..., input_values_start_idx:input_values_end_idx]
codec_model_output = self.codec_model.encode(
current_input_values,
encoder_past_key_values=encoder_past_key_values,
padding_cache=padding_cache,
)
new_audio_tokens = codec_model_output.audio_codes.transpose(1, 2)
audio_tokens.copy_(new_audio_tokens)
start = end.clone()
end = end + audio_window_size
current_window.copy_(
torch.tensor([start, end], device=current_window.device).expand(current_window.shape[0], -1)
)
# first cache position is for bos token, so we need to offset by -1
current_audio_tokens_idxs = (cache_position - start - 1).clamp(min=0)
current_audio_tokens = audio_tokens[:, current_audio_tokens_idxs, :]
current_audio_tokens[:, cache_position == 0, :] = self.config.audio_bos_token_id
input_ids = model_inputs.pop("input_ids")
input_ids = torch.cat(
[input_ids.unsqueeze(2), current_audio_tokens],
dim=2,
)
model_inputs["input_ids"] = input_ids
return model_inputs
# TODO: @eustlb, this should be standardized
@classmethod
def from_pretrained(cls, *args, **kwargs):
if kwargs.get("output_loading_info", False):
model, loading_info = super().from_pretrained(*args, **kwargs)
else:
model = super().from_pretrained(*args, **kwargs)
# copy depth decoder generation conf attr to the depth decoder generation config
prefix = "codec_"
prefix_len = len(prefix)
codec_model_attrs = {
attr[prefix_len:]: value
for attr, value in vars(model.generation_config).items()
if attr.startswith(prefix)
}
vars(model.codec_model.generation_config).update({"_from_model_config": False, **codec_model_attrs})
# remove the depth decoder generation conf attr from the model generation config
for attr in codec_model_attrs:
delattr(model.generation_config, prefix + attr)
if "output_loading_info" in kwargs:
return model, loading_info
else:
return model
# TODO: @eustlb, this should be standardized
def save_pretrained(self, *args, **kwargs):
prefix = "codec_"
codec_model_attrs = self.codec_model.generation_config.to_diff_dict()
codec_model_attrs.pop("transformers_version", None)
for attr, value in codec_model_attrs.items():
setattr(self.generation_config, prefix + attr, value)
super().save_pretrained(*args, **kwargs)
def generate(self, *args, **kwargs):
r"""
This method forwards all its arguments to GenerationMixin's [`~GenerationMixin.generate`]. Please refer to the docstring of this method for more information.
"""
max_new_tokens = kwargs.pop("max_new_tokens", None)
input_values = kwargs.get("input_values")
# TODO: @eustlb, we should have per-batch-idx values
# here we do not use padding_mask to be aligned to what's done in the original codebase
max_audio_frames = input_values.shape[-1] // self.config.codec_config.frame_size
if max_new_tokens is None or max_new_tokens > max_audio_frames:
if max_new_tokens is not None:
logger.warning(
f"`max_new_tokens` ({max_new_tokens}) is greater than the maximum number of audio frames ({max_audio_frames})."
f"Setting `max_new_tokens` to {max_audio_frames}."
)
max_new_tokens = max_audio_frames
return super().generate(
*args,
max_new_tokens=max_new_tokens,
**kwargs,
)
__all__ = [
"KyutaiSpeechToTextPreTrainedModel",
"KyutaiSpeechToTextModel",
"KyutaiSpeechToTextForConditionalGeneration",
]
| KyutaiSpeechToTextForConditionalGeneration |
python | joke2k__faker | tests/providers/test_address.py | {
"start": 15493,
"end": 16984
} | class ____:
"""Test el_GR address provider methods"""
def test_line_address(self, faker, num_samples):
for _ in range(num_samples):
address = faker.line_address()
assert isinstance(address, str)
def test_street_prefix_short(self, faker, num_samples):
for _ in range(num_samples):
street_prefix_short = faker.street_prefix_short()
assert isinstance(street_prefix_short, str)
assert street_prefix_short in ElGrAddressProvider.street_prefixes_short
def test_street_prefix_long(self, faker, num_samples):
for _ in range(num_samples):
street_prefix_long = faker.street_prefix_long()
assert isinstance(street_prefix_long, str)
assert street_prefix_long in ElGrAddressProvider.street_prefixes_long
def test_street(self, faker, num_samples):
for _ in range(num_samples):
street = faker.street()
assert isinstance(street, str)
assert street in ElGrAddressProvider.localities
def test_city(self, faker, num_samples):
for _ in range(num_samples):
city = faker.city()
assert isinstance(city, str)
assert city in ElGrAddressProvider.cities
def test_region(self, faker, num_samples):
for _ in range(num_samples):
region = faker.region()
assert isinstance(region, str)
assert region in ElGrAddressProvider.regions
| TestElGr |
python | scikit-learn__scikit-learn | asv_benchmarks/benchmarks/cluster.py | {
"start": 1574,
"end": 2925
} | class ____(Predictor, Transformer, Estimator, Benchmark):
"""
Benchmarks for MiniBatchKMeans.
"""
param_names = ["representation", "init"]
params = (["dense", "sparse"], ["random", "k-means++"])
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, init = params
if representation == "sparse":
data = _20newsgroups_highdim_dataset()
else:
data = _blobs_dataset(n_clusters=20)
return data
def make_estimator(self, params):
representation, init = params
max_iter = 5 if representation == "sparse" else 2
estimator = MiniBatchKMeans(
n_clusters=20,
init=init,
n_init=1,
max_iter=max_iter,
batch_size=1000,
max_no_improvement=None,
compute_labels=False,
random_state=0,
)
return estimator
def make_scorers(self):
self.train_scorer = lambda _, __: neg_mean_inertia(
self.X, self.estimator.predict(self.X), self.estimator.cluster_centers_
)
self.test_scorer = lambda _, __: neg_mean_inertia(
self.X_val,
self.estimator.predict(self.X_val),
self.estimator.cluster_centers_,
)
| MiniBatchKMeansBenchmark |
python | ray-project__ray | python/ray/data/_internal/logical/operators/write_operator.py | {
"start": 293,
"end": 1215
} | class ____(AbstractMap):
"""Logical operator for write."""
def __init__(
self,
input_op: LogicalOperator,
datasink_or_legacy_datasource: Union[Datasink, Datasource],
ray_remote_args: Optional[Dict[str, Any]] = None,
concurrency: Optional[int] = None,
**write_args,
):
if isinstance(datasink_or_legacy_datasource, Datasink):
min_rows_per_bundled_input = (
datasink_or_legacy_datasource.min_rows_per_write
)
else:
min_rows_per_bundled_input = None
super().__init__(
"Write",
input_op,
min_rows_per_bundled_input=min_rows_per_bundled_input,
ray_remote_args=ray_remote_args,
)
self._datasink_or_legacy_datasource = datasink_or_legacy_datasource
self._write_args = write_args
self._concurrency = concurrency
| Write |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-faiss/llama_index/vector_stores/faiss/map_store.py | {
"start": 750,
"end": 9900
} | class ____(FaissVectorStore):
"""
Faiss Map Vector Store.
This wraps the base Faiss vector store and adds handling for
the Faiss IDMap and IDMap2 indexes. This allows for
update/delete functionality through node_id and faiss_id mapping.
Embeddings are stored within a Faiss index.
During query time, the index uses Faiss to query for the top
k embeddings, and returns the corresponding indices.
Args:
faiss_index (faiss.IndexIDMap or faiss.IndexIDMap2): Faiss id map index instance
Examples:
`pip install llama-index-vector-stores-faiss faiss-cpu`
```python
from llama_index.vector_stores.faiss import FaissMapVectorStore
import faiss
# create a faiss index
d = 1536 # dimension
faiss_index = faiss.IndexFlatL2(d)
# wrap it in an IDMap or IDMap2
id_map_index = faiss.IndexIDMap2(faiss_index)
vector_store = FaissMapVectorStore(faiss_index=id_map_index)
```
"""
# _node_id_to_faiss_id_map is used to map the node id to the faiss id
_node_id_to_faiss_id_map = PrivateAttr()
# _faiss_id_to_node_id_map is used to map the faiss id to the node id
_faiss_id_to_node_id_map = PrivateAttr()
def __init__(
self,
faiss_index: Any,
) -> None:
"""Initialize params."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss
except ImportError:
raise ImportError(import_err_msg)
if not isinstance(faiss_index, faiss.IndexIDMap) and not isinstance(
faiss_index, faiss.IndexIDMap2
):
raise ValueError(
"FaissVectorMapStore requires a faiss.IndexIDMap or faiss.IndexIDMap2 index. "
"Please create an IndexIDMap2 index and pass it to the FaissVectorMapStore."
)
super().__init__(faiss_index=faiss_index)
self._node_id_to_faiss_id_map = {}
self._faiss_id_to_node_id_map = {}
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""
Add nodes to index.
NOTE: in the Faiss vector store, we do not store text in Faiss.
Args:
nodes: List[BaseNode]: list of nodes with embeddings
"""
new_ids = []
for node in nodes:
text_embedding = node.get_embedding()
text_embedding_np = np.array(text_embedding, dtype="float32")[np.newaxis, :]
self._node_id_to_faiss_id_map[node.id_] = self._faiss_index.ntotal
self._faiss_id_to_node_id_map[self._faiss_index.ntotal] = node.id_
self._faiss_index.add_with_ids(text_embedding_np, self._faiss_index.ntotal)
new_ids.append(node.id_)
return new_ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
# only handle delete on node_ids
if ref_doc_id in self._node_id_to_faiss_id_map:
faiss_id = self._node_id_to_faiss_id_map[ref_doc_id]
# remove the faiss id from the faiss index
self._faiss_index.remove_ids(np.array([faiss_id], dtype=np.int64))
# remove the node id from the node id map
if ref_doc_id in self._node_id_to_faiss_id_map:
del self._node_id_to_faiss_id_map[ref_doc_id]
# remove the faiss id from the faiss id map
if faiss_id in self._faiss_id_to_node_id_map:
del self._faiss_id_to_node_id_map[faiss_id]
def delete_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None:
"""Delete nodes from vector store."""
if filters is not None:
raise NotImplementedError("Metadata filters not implemented for Faiss yet.")
if node_ids is None:
raise ValueError("node_ids must be provided to delete nodes.")
faiss_ids = []
for node_id in node_ids:
# get the faiss id from the node_id_map
faiss_id = self._node_id_to_faiss_id_map.get(node_id)
if faiss_id is not None:
faiss_ids.append(faiss_id)
if not faiss_ids:
return
self._faiss_index.remove_ids(np.array(faiss_ids, dtype=np.int64))
# cleanup references
for node_id in node_ids:
# get the faiss id from the node_id_map
faiss_id = self._node_id_to_faiss_id_map.get(node_id)
if faiss_id is not None and faiss_id in self._faiss_id_to_node_id_map:
del self._faiss_id_to_node_id_map[faiss_id]
if node_id in self._node_id_to_faiss_id_map:
del self._node_id_to_faiss_id_map[node_id]
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""
Query index for top k most similar nodes.
Args:
query_embedding (List[float]): query embedding
similarity_top_k (int): top k most similar nodes
"""
if query.filters is not None:
raise ValueError("Metadata filters not implemented for Faiss yet.")
query_embedding = cast(List[float], query.query_embedding)
query_embedding_np = np.array(query_embedding, dtype="float32")[np.newaxis, :]
dists, indices = self._faiss_index.search(
query_embedding_np, query.similarity_top_k
)
dists = list(dists[0])
# if empty, then return an empty response
if len(indices) == 0:
return VectorStoreQueryResult(similarities=[], ids=[])
# returned dimension is 1 x k
node_idxs = indices[0]
filtered_dists = []
filtered_node_idxs = []
for dist, idx in zip(dists, node_idxs):
if idx < 0:
continue
filtered_dists.append(dist)
filtered_node_idxs.append(self._faiss_id_to_node_id_map[idx])
return VectorStoreQueryResult(
similarities=filtered_dists, ids=filtered_node_idxs
)
def persist(
self,
persist_path: str = DEFAULT_PERSIST_PATH,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> None:
"""
Save to file.
This method saves the vector store to disk.
Args:
persist_path (str): The save_path of the file.
"""
super().persist(persist_path=persist_path, fs=fs)
dirpath = os.path.dirname(persist_path)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
id_map = {}
id_map["node_id_to_faiss_id_map"] = self._node_id_to_faiss_id_map
id_map["faiss_id_to_node_id_map"] = self._faiss_id_to_node_id_map
# save the id map
id_map_path = os.path.join(dirpath, DEFAULT_ID_MAP_NAME)
with open(id_map_path, "w") as f:
f.write(str(id_map))
@classmethod
def from_persist_dir(
cls,
persist_dir: str = DEFAULT_PERSIST_DIR,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> "FaissMapVectorStore":
persist_path = os.path.join(
persist_dir,
f"{DEFAULT_VECTOR_STORE}{NAMESPACE_SEP}{DEFAULT_PERSIST_FNAME}",
)
# only support local storage for now
if fs and not isinstance(fs, LocalFileSystem):
raise NotImplementedError("FAISS only supports local storage for now.")
return cls.from_persist_path(persist_path=persist_path, fs=None)
@classmethod
def from_persist_path(
cls,
persist_path: str,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> "FaissMapVectorStore":
import faiss
# I don't think FAISS supports fsspec, it requires a path in the SWIG interface
# TODO: copy to a temp file and load into memory from there
if fs and not isinstance(fs, LocalFileSystem):
raise NotImplementedError("FAISS only supports local storage for now.")
if not os.path.exists(persist_path):
raise ValueError(f"No existing {__name__} found at {persist_path}.")
dirpath = os.path.dirname(persist_path)
id_map_path = os.path.join(dirpath, DEFAULT_ID_MAP_NAME)
if not os.path.exists(persist_path):
raise ValueError(f"No existing {__name__} found at {persist_path}.")
faiss_index = faiss.read_index(persist_path)
with open(id_map_path, "r") as f:
id_map = eval(f.read())
map_vs = cls(faiss_index=faiss_index)
map_vs._node_id_to_faiss_id_map = id_map["node_id_to_faiss_id_map"]
map_vs._faiss_id_to_node_id_map = id_map["faiss_id_to_node_id_map"]
return map_vs
| FaissMapVectorStore |
python | pytorch__pytorch | torchgen/dest/lazy_ir.py | {
"start": 14749,
"end": 26485
} | class ____:
class_method_name: str
backend_index: BackendIndex
tensor_class: str
gen_forced_fallback_code: bool
backend_namespace: str
get_tensorlist: str
get_tensor_or_wrap_number: str
try_get_tensor: str
metrics_counter: str
create_tensor: str
create_from_first_tensor: bool
create_aten_from_ltc_tensor: str
tuple_aten_from_ltc_tensors: str
lazy_tensor_ptr: str
get_device_fn: str
def lazy_tensor_decls(self, func: NativeFunction, schema: LazyIrSchema) -> str:
value_args = schema.filtered_args(values=True, scalars=False)
# Generates lazy_{name} variables for LazyTensors wrapping input tensors
lazy_tensor_decls: list[str] = []
for arg in value_args:
if arg.is_wrapped_scalar:
if isinstance(arg.lazy_type, OptionalCType):
lazy_tensor_decls.append(
f"""auto node_{arg.name} = {arg.name} ?
std::make_optional(torch::lazy::LazyGraphExecutor::Get()->
GetIrValueForScalarFromCodegen(*{arg.name}, *common_device)):
::std::nullopt;"""
)
else:
lazy_tensor_decls.append(
f"""auto node_{arg.name} = torch::lazy::LazyGraphExecutor::Get()->
GetIrValueForScalarFromCodegen({arg.name}, *common_device);"""
)
elif arg.is_symint_or_list:
continue # values are extracted in isValueType
elif isinstance(arg.lazy_type, BaseCType):
if arg.lazy_type.type is tensorListValueT:
lazy_tensor_decls.append(
f"auto lazy_{arg.name}_tensorlist = "
f"{self.backend_namespace}::{self.get_tensorlist}({arg.name});"
)
else:
lazy_tensor_decls.append(
f"{self.lazy_tensor_ptr} lazy_{arg.name} = "
f"{self.backend_namespace}::{self.get_tensor_or_wrap_number}({arg.name}, *common_device);"
)
elif isinstance(arg.lazy_type, OptionalCType):
assert arg.lazy_type.elem == BaseCType(getValueT()), arg.lazy_type.elem
# TODO(alanwaketan): Maybe we want to apply GetLtcTensorOrCreateForWrappedNumber here, but hold it
# until we encounter a real world example.
lazy_tensor_decls.append(
f"{self.lazy_tensor_ptr} lazy_{arg.name} = "
f"{self.backend_namespace}::{self.try_get_tensor}({arg.name}.value_or(at::Tensor()));"
)
else:
raise AssertionError(
f"TODO not sure if there are other valid types to handle here ({arg.lazy_type})"
)
return ("\n ").join(lazy_tensor_decls)
def force_eager_fallback(
self,
func: NativeFunction,
schema: LazyIrSchema,
metadata: BackendMetadata,
sig: DispatcherSignature | NativeSignature,
) -> str:
if self.gen_forced_fallback_code:
return gen_fallback_code(
schema, sig, overload_name=func.func.name.overload_name
)
return ""
def metrics(self, func: NativeFunction, schema: LazyIrSchema) -> str:
return f"{self.metrics_counter};"
def get_device(self, func: NativeFunction, schema: LazyIrSchema) -> str:
value_args = schema.filtered_args(values=True, scalars=False)
scalar_args = schema.filtered_args(values=False, scalars=True)
value_types_names = [f"{a.name}" for a in value_args if not a.is_wrapped_scalar]
optional_device = OptionalCType(BaseCType(deviceT))
optional_devices = [
a.name for a in scalar_args if a.lazy_type == optional_device
]
assert len(value_types_names) > 0 or len(optional_devices) > 0, (
"Expected at least one Value or Device type"
)
get_device_str = (
f"{self.get_device_fn}({', '.join(value_types_names + optional_devices)})"
)
return f"""auto common_device = {get_device_str};
TORCH_INTERNAL_ASSERT(common_device);
"""
def shape_inference(self, func: NativeFunction, schema: LazyIrSchema) -> str:
metadata = self.backend_index.get_kernel(func)
assert metadata is not None
all_args = schema.filtered_args()
returns_length = len(schema.returns)
# call the meta kernel if it exists, to compute output shape/dtype for our IR
# Note [Generated LTC Shape Functions]
# LTC uses meta tensors from core to do shape inference when possible, and otherwise
# we generate a shape function declaration that needs to be manually implemented.
# How do we detect which ops are eligible to use meta tensors?
# In general we should be able to use meta tensors not just on structured operators,
# but also on composite operators that are implemented in terms of structured kernels.
# We don't currently have a way of knowing at codegen time which ops are implemented that way.
# This is the case for all view and view_copy operators however, so we're going to
# use them specifically for all of the view_copy ops (instead of manually writing shape rules for all of them).
is_view_copy_op = "view_copy" in func.tags
is_structured = func.structured or func.structured_delegate is not None
if is_structured or is_view_copy_op:
meta_out = """
std::vector<torch::lazy::Shape> shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};"""
if returns_length > 1:
def this_shape(i: int) -> str:
return f"torch::lazy::Shape(std::get<{i}>(out_meta).scalar_type(), std::get<{i}>(out_meta).sizes().vec())"
shapes_str = ",".join([this_shape(i) for i in range(returns_length)])
meta_out = "std::vector<torch::lazy::Shape> shapes{" + shapes_str + "};"
# Convert tensor args to the meta device and call it.
# (We can't pass in the input tensors directly, because they are "functional wrappers".
# If any of the meta kernels call a tensor op and redispatch, we don't want to hit the functionalize kernels.)
# Even at::meta:: functions might redispatch, e.g. if they call into view ops.
dispatcher_sig = DispatcherSignature.from_schema(func.func)
meta_conversion_str, meta_call_ctx = convert_to_meta_tensors(dispatcher_sig)
meta_call_args = [
e.expr
for e in translate(
meta_call_ctx, dispatcher_sig.arguments(), method=False
)
]
if is_view_copy_op:
# view_copy ops always have a CompositeExplicitAutogradNonFunctional kernel
assert func.has_composite_explicit_autograd_non_functional_kernel
dispatch_ns = "compositeexplicitautogradnonfunctional"
else:
dispatch_ns = "meta"
aten_name = schema.aten_name
# TODO: this is trolling
if func.func.has_symint() and metadata.supports_symint():
aten_name += "_symint"
shape_str = f"""\
{meta_conversion_str}
auto out_meta = at::{dispatch_ns}::{aten_name}({", ".join(meta_call_args)});
{meta_out}"""
else:
shape_sig = ComputeShapeSignature(
metadata.kernel, func, symint=metadata.supports_symint()
)
shape_str = f"""
auto shapes = {shape_sig.shape_call};"""
shape_str += f"""
TORCH_INTERNAL_ASSERT(shapes.size() == {returns_length});"""
# Calculating which dimensions are symbolic
func_schema_str = "aten::" + str(func.func)
shape_str += f"""
if(torch::lazy::symbolicShapeEnabled()){{
std::vector<torch::jit::IValue> inputs = {{ {", ".join(str(a.name) for a in all_args)} }};
const char* schema_str = "{func_schema_str}";
applySymbolicShapesOnLT(schema_str, inputs, shapes);
}}
"""
return shape_str
def build_ir_node(self, func: NativeFunction, schema: LazyIrSchema) -> str:
node_ctor_input_str = node_ctor_inputs(schema)
return f"""torch::lazy::NodePtr node = torch::lazy::ReuseNode<{schema.node_name}>({node_ctor_input_str});
if (!node) {{
{self.shape_inference(func, schema)}
node = torch::lazy::MakeNode<{schema.node_name}>({node_ctor_input_str}, std::move(shapes));
CacheNode(node);
}}
"""
def create_lazy_tensor(self, first_tensor_name: str | None = None) -> str:
# xla uses an instance method for tensor creation, for the time being
if self.create_from_first_tensor:
# TODO(whc) remove this if XLA switches to using static method for creation
assert first_tensor_name is not None, (
"Requires first tensor to create lazy tensor"
)
return f"{first_tensor_name}.{self.create_tensor}"
return f"{self.backend_namespace}::{self.create_tensor}"
def return_aten_tensor(self, func: NativeFunction, schema: LazyIrSchema) -> str:
returns_length = len(schema.returns)
value_args = schema.filtered_args(values=True, scalars=False)
value_types_names = [f"{a.name}" for a in value_args if not a.is_wrapped_scalar]
first_tensor_name = value_types_names[0] if len(value_types_names) > 0 else None
bridge_str = f"""auto result = {self.create_aten_from_ltc_tensor}(
{self.create_lazy_tensor(first_tensor_name)}(std::move(node), *common_device));"""
if returns_length > 1:
assert len(value_types_names) > 0, (
"Code below assumes there is at least one tensor arg"
)
bridge_str = f"""std::vector<{self.lazy_tensor_ptr}> lazy_tensors;
for (int i = 0; i < {returns_length}; i++) {{
lazy_tensors.push_back({self.create_lazy_tensor(first_tensor_name)}({getValueT()}(node, i), *common_device));
}}
auto result = {self.tuple_aten_from_ltc_tensors}<{returns_length}>(lazy_tensors);"""
if schema.name.name.inplace or func.func.is_out_fn():
assert returns_length == 1, (
"We assumed there was no such case where an op is an in-place variant "
f"and has tuple outputs, but got tuple of len {returns_length}."
)
bridge_str = f"""lazy_{first_tensor_name}->SetInPlaceIrValue(node);
auto& result = {first_tensor_name};"""
bridge_str += """
return result;"""
return bridge_str
@method_with_native_function
def __call__(self, func: NativeFunction) -> list[str]:
sig = kernel_signature(func, self.backend_index)
metadata = self.backend_index.get_kernel(func)
assert metadata is not None
schema = LazyIrSchema(func.func, symint=metadata.supports_symint())
return [
f"""\
{sig.decl(name=f"{self.class_method_name}::{metadata.kernel}")} {{
{self.force_eager_fallback(func, schema, metadata, sig)}
{self.metrics(func, schema)}
{self.get_device(func, schema)}
{self.lazy_tensor_decls(func, schema)}
{self.build_ir_node(func, schema)}
{self.return_aten_tensor(func, schema)}
}}\n
"""
]
| GenLazyNativeFuncDefinition |
python | tornadoweb__tornado | demos/file_upload/file_receiver.py | {
"start": 872,
"end": 1562
} | class ____(tornado.web.RequestHandler):
def initialize(self):
self.bytes_read = 0
def data_received(self, chunk):
self.bytes_read += len(chunk)
def put(self, filename):
filename = unquote(filename)
mtype = self.request.headers.get("Content-Type")
logging.info('PUT "%s" "%s" %d bytes', filename, mtype, self.bytes_read)
self.write("OK")
def make_app():
return tornado.web.Application([(r"/post", POSTHandler), (r"/(.*)", PUTHandler)])
async def main():
options.parse_command_line()
app = make_app()
app.listen(8888)
await asyncio.Event().wait()
if __name__ == "__main__":
asyncio.run(main())
| PUTHandler |
python | numpy__numpy | numpy/lib/tests/test_twodim_base.py | {
"start": 5096,
"end": 5405
} | class ____:
def test_basic(self):
assert_raises(ValueError, fliplr, ones(4))
a = get_mat(4)
b = a[:, ::-1]
assert_equal(fliplr(a), b)
a = [[0, 1, 2],
[3, 4, 5]]
b = [[2, 1, 0],
[5, 4, 3]]
assert_equal(fliplr(a), b)
| TestFliplr |
python | huggingface__transformers | src/transformers/models/speech_to_text/modeling_speech_to_text.py | {
"start": 22890,
"end": 28844
} | class ____(Speech2TextPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`Speech2TextEncoderLayer`].
Args:
config: Speech2TextConfig
embed_tokens (nn.Embedding): output embedding
"""
_no_split_modules = ["Speech2TextEncoderLayer"]
def __init__(self, config: Speech2TextConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_source_positions
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.conv = Conv1dSubsampler(config)
self.embed_positions = Speech2TextSinusoidalPositionalEmbedding(
self.max_source_positions,
embed_dim,
self.padding_idx,
)
self.layers = nn.ModuleList([Speech2TextEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_features,
attention_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_features (`torch.LongTensor` of shape `(batch_size, sequence_length, feature_size)`):
Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be
obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a
`numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or
the soundfile library (`pip install soundfile`). To prepare the array into
`input_features`, the [`AutoFeatureExtractor`] should be used for extracting the fbank features,
padding and conversion into a tensor of type `torch.FloatTensor`. See
[`~Speech2TextFeatureExtractor.__call__`]
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing convolution and attention on padding token indices. Mask values selected in
`[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
inputs_embeds = self.conv(input_features)
inputs_embeds = self.embed_scale * inputs_embeds
# subsample attention mask if necessary
if attention_mask is not None:
attention_mask = self._get_feature_vector_attention_mask(inputs_embeds.shape[1], attention_mask)
padding_mask = attention_mask.ne(1).long()
else:
padding_mask = torch.zeros(inputs_embeds.shape[:2], dtype=torch.long, device=inputs_embeds.device)
embed_pos = self.embed_positions(padding_mask)
hidden_states = inputs_embeds + embed_pos
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
to_drop = False
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop: # skip the layer
to_drop = True
if to_drop:
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
| Speech2TextEncoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.