language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | src/sentry/conduit/endpoints/organization_conduit_demo.py | {
"start": 895,
"end": 1277
} | class ____(OrganizationPermission):
"""
Permission for the conduit demo endpoint.
We want members to be able to generate temporary credentials for the demo.
This is a demo-only feature and doesn't modify organization state.
"""
scope_map = {
"POST": ["org:read", "org:write", "org:admin"],
}
@region_silo_endpoint
| OrganizationConduitDemoPermission |
python | plotly__plotly.py | plotly/graph_objs/volume/legendgrouptitle/_font.py | {
"start": 233,
"end": 9921
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "volume.legendgrouptitle"
_path_str = "volume.legendgrouptitle.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.volume.legendgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.volume.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.volume.legendgrouptitle.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | python-poetry__poetry | src/poetry/toml/exceptions.py | {
"start": 133,
"end": 190
} | class ____(TOMLKitError, PoetryCoreError):
pass
| TOMLError |
python | apache__airflow | airflow-core/src/airflow/example_dags/example_dynamic_task_mapping_with_no_taskflow_operators.py | {
"start": 994,
"end": 1255
} | class ____(BaseOperator):
"""A custom operator that adds one to the input."""
def __init__(self, value, **kwargs):
super().__init__(**kwargs)
self.value = value
def execute(self, context):
return self.value + 1
| AddOneOperator |
python | great-expectations__great_expectations | tests/integration/fluent/test_connections.py | {
"start": 466,
"end": 4176
} | class ____:
@pytest.mark.xfail(
raises=AssertionError,
) # inspector.get_table_names() fails with this role
@pytest.mark.parametrize(
"connection_string",
[
param(
"snowflake://ci:${SNOWFLAKE_CI_USER_PASSWORD}@oca29081.us-east-1/ci/public?warehouse=ci&role=ci_no_select",
id="role wo select",
),
],
)
def test_un_queryable_asset_should_raise_error(
self, context: DataContext, connection_string: str
):
"""
If we try to add an asset that is not queryable with the current datasource
connection details, then we should expect a TestConnectionError.
https://docs.snowflake.com/en/developer-guide/python-connector/sqlalchemy#connection-parameters
"""
snowflake_ds: SnowflakeDatasource = context.data_sources.add_snowflake(
"my_ds", connection_string=connection_string
)
inspector: Inspector = sa.inspection.inspect(snowflake_ds.get_engine())
inspector_tables: list[str] = list(inspector.get_table_names(schema="public"))
print(f"tables: {len(inspector_tables)}\n{inspector_tables}")
random.shuffle(inspector_tables)
unqueryable_table: str = ""
for table_name in inspector_tables:
try:
# query the asset, if it fails then we should expect a TestConnectionError
# expect the sql ProgrammingError to be raised
# we are only testing the failure case here
with snowflake_ds.get_engine().connect() as conn:
conn.execute(text(f"SELECT * FROM {table_name} LIMIT 1;"))
print(f"{table_name} is queryable")
except sa.exc.ProgrammingError:
print(f"{table_name} is not queryable")
unqueryable_table = table_name
break
assert unqueryable_table, "no unqueryable tables found, cannot run test"
with pytest.raises(TestConnectionError) as exc_info:
asset = snowflake_ds.add_table_asset(
name="un-reachable asset", table_name=unqueryable_table
)
print(f"\n Uh oh, asset should not have been created...\n{asset!r}")
print(f"\n TestConnectionError was raised as expected.\n{exc_info.exconly()}")
@pytest.mark.parametrize(
"connection_string",
[
param(
"snowflake://ci:${SNOWFLAKE_CI_USER_PASSWORD}@oca29081.us-east-1/ci/public?warehouse=ci&role=ci&database=ci&schema=public",
id="full connection string",
),
],
)
def test_queryable_asset_should_pass_test_connection(
self, context: DataContext, connection_string: str
):
snowflake_ds: SnowflakeDatasource = context.data_sources.add_snowflake(
"my_ds", connection_string=connection_string
)
inspector: Inspector = sa.inspection.inspect(snowflake_ds.get_engine())
inspector_tables = list(inspector.get_table_names())
print(f"tables: {len(inspector_tables)}\n{inspector_tables}")
table_name = random.choice(inspector_tables)
# query the table to make sure it is queryable
with snowflake_ds.get_engine().connect() as conn:
conn.execute(text(f"SELECT * FROM {table_name} LIMIT 1;"))
# the table is queryable so the `add_table_asset()` should pass the test_connection step
asset = snowflake_ds.add_table_asset(name="reachable asset", table_name=table_name)
print(f"\n Yay, asset was created!\n{asset!r}")
if __name__ == "__main__":
pytest.main([__file__, "-vv"])
| TestSnowflake |
python | astropy__astropy | astropy/utils/iers/iers.py | {
"start": 37667,
"end": 39279
} | class ____(ScienceState):
"""Default IERS table for Earth rotation and reference systems service.
These tables are used to calculate the offsets between ``UT1`` and ``UTC``
and for conversion to Earth-based coordinate systems.
The state itself is an IERS table, as an instance of one of the
`~astropy.utils.iers.IERS` classes. The default, the auto-updating
`~astropy.utils.iers.IERS_Auto` class, should suffice for most
purposes.
Examples
--------
To temporarily use the IERS-B file packaged with astropy::
>>> from astropy.utils import iers
>>> from astropy.time import Time
>>> iers_b = iers.IERS_B.open(iers.IERS_B_FILE)
>>> with iers.earth_orientation_table.set(iers_b):
... print(Time('2000-01-01').ut1.isot)
2000-01-01T00:00:00.355
To use the most recent IERS-A file for the whole session::
>>> iers_a = iers.IERS_A.open(iers.IERS_A_URL) # doctest: +SKIP
>>> iers.earth_orientation_table.set(iers_a) # doctest: +SKIP
<ScienceState earth_orientation_table: <IERS_A length=17463>...>
To go back to the default (of `~astropy.utils.iers.IERS_Auto`)::
>>> iers.earth_orientation_table.set(None) # doctest: +SKIP
<ScienceState earth_orientation_table: <IERS_Auto length=17428>...>
"""
_value = None
@classmethod
def validate(cls, value):
if value is None:
value = IERS_Auto.open()
if not isinstance(value, IERS):
raise ValueError("earth_orientation_table requires an IERS Table.")
return value
| earth_orientation_table |
python | paramiko__paramiko | paramiko/ssh_exception.py | {
"start": 836,
"end": 974
} | class ____(Exception):
"""
Exception raised by failures in SSH2 protocol negotiation or logic errors.
"""
pass
| SSHException |
python | huggingface__transformers | src/transformers/models/owlv2/image_processing_owlv2.py | {
"start": 6957,
"end": 27790
} | class ____(BaseImageProcessor):
r"""
Constructs an OWLv2 image processor.
Args:
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image to a square with gray pixels on the bottom and the right. Can be overridden by
`do_pad` in the `preprocess` method.
do_resize (`bool`, *optional*, defaults to `True`):
Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden
by `do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"height": 960, "width": 960}`):
Size to resize the image to. Can be overridden by `size` in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling method to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `OPENAI_CLIP_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_pad: bool = True,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_pad = do_pad
self.do_resize = do_resize
self.size = size if size is not None else {"height": 960, "width": 960}
self.resample = resample
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
def pad(
self,
image: np.ndarray,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Pad an image to a square with gray pixels on the bottom and the right, as per the original OWLv2
implementation.
Args:
image (`np.ndarray`):
Image to pad.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred from the input
image.
"""
height, width = get_image_size(image)
size = max(height, width)
image = pad(
image=image,
padding=((0, size - height), (0, size - width)),
constant_values=0.0,
data_format=data_format,
input_data_format=input_data_format,
)
return image
def resize(
self,
image: np.ndarray,
size: dict[str, int],
anti_aliasing: bool = True,
anti_aliasing_sigma=None,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image as per the original implementation.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary containing the height and width to resize the image to.
anti_aliasing (`bool`, *optional*, defaults to `True`):
Whether to apply anti-aliasing when downsampling the image.
anti_aliasing_sigma (`float`, *optional*, defaults to `None`):
Standard deviation for Gaussian kernel when downsampling the image. If `None`, it will be calculated
automatically.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred from the input
image.
"""
requires_backends(self, "scipy")
output_shape = (size["height"], size["width"])
image = to_channel_dimension_format(image, ChannelDimension.LAST)
image, output_shape = _preprocess_resize_output_shape(image, output_shape)
input_shape = image.shape
factors = np.divide(input_shape, output_shape)
# Translate modes used by np.pad to those used by scipy.ndimage
ndi_mode = "mirror"
cval = 0
order = 1
if anti_aliasing:
if anti_aliasing_sigma is None:
anti_aliasing_sigma = np.maximum(0, (factors - 1) / 2)
else:
anti_aliasing_sigma = np.atleast_1d(anti_aliasing_sigma) * np.ones_like(factors)
if np.any(anti_aliasing_sigma < 0):
raise ValueError("Anti-aliasing standard deviation must be greater than or equal to zero")
elif np.any((anti_aliasing_sigma > 0) & (factors <= 1)):
warnings.warn(
"Anti-aliasing standard deviation greater than zero but not down-sampling along all axes"
)
filtered = ndi.gaussian_filter(image, anti_aliasing_sigma, cval=cval, mode=ndi_mode)
else:
filtered = image
zoom_factors = [1 / f for f in factors]
out = ndi.zoom(filtered, zoom_factors, order=order, mode=ndi_mode, cval=cval, grid_mode=True)
image = _clip_warp_output(image, out)
image = to_channel_dimension_format(image, input_data_format, ChannelDimension.LAST)
image = (
to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image
)
return image
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_pad: Optional[bool] = None,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether to pad the image to a square with gray pixels on the bottom and the right.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size to resize the image to.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_pad = do_pad if do_pad is not None else self.do_pad
do_resize = do_resize if do_resize is not None else self.do_resize
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
size = size if size is not None else self.size
size = get_size_dict(size) # for BC
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
# Here, pad and resize methods are different from the rest of image processors
# as they don't have any resampling in resize()
# or pad size in pad() (the maximum of (height, width) is taken instead).
# hence, these arguments don't need to be passed in validate_preprocess_arguments.
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
size=size,
)
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
if do_rescale:
images = [
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
for image in images
]
if do_pad:
images = [self.pad(image=image, input_data_format=input_data_format) for image in images]
if do_resize:
images = [
self.resize(
image=image,
size=size,
input_data_format=input_data_format,
)
for image in images
]
if do_normalize:
images = [
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
for image in images
]
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
]
data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors)
# Copied from transformers.models.owlvit.image_processing_owlvit.OwlViTImageProcessor.post_process_object_detection with OwlViT->Owlv2
def post_process_object_detection(
self,
outputs: "Owlv2ObjectDetectionOutput",
threshold: float = 0.1,
target_sizes: Optional[Union[TensorType, list[tuple]]] = None,
):
"""
Converts the raw output of [`Owlv2ForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format.
Args:
outputs ([`Owlv2ObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.1):
Score threshold to keep object detection predictions.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. If unset, predictions will not be resized.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the following keys:
- "scores": The confidence scores for each predicted box on the image.
- "labels": Indexes of the classes predicted by the model on the image.
- "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format.
"""
batch_logits, batch_boxes = outputs.logits, outputs.pred_boxes
batch_size = len(batch_logits)
if target_sizes is not None and len(target_sizes) != batch_size:
raise ValueError("Make sure that you pass in as many target sizes as images")
# batch_logits of shape (batch_size, num_queries, num_classes)
batch_class_logits = torch.max(batch_logits, dim=-1)
batch_scores = torch.sigmoid(batch_class_logits.values)
batch_labels = batch_class_logits.indices
# Convert to [x0, y0, x1, y1] format
batch_boxes = center_to_corners_format(batch_boxes)
# Convert from relative [0, 1] to absolute [0, height] coordinates
if target_sizes is not None:
batch_boxes = _scale_boxes(batch_boxes, target_sizes)
results = []
for scores, labels, boxes in zip(batch_scores, batch_labels, batch_boxes):
keep = scores > threshold
scores = scores[keep]
labels = labels[keep]
boxes = boxes[keep]
results.append({"scores": scores, "labels": labels, "boxes": boxes})
return results
# Copied from transformers.models.owlvit.image_processing_owlvit.OwlViTImageProcessor.post_process_image_guided_detection
def post_process_image_guided_detection(self, outputs, threshold=0.0, nms_threshold=0.3, target_sizes=None):
"""
Converts the output of [`OwlViTForObjectDetection.image_guided_detection`] into the format expected by the COCO
api.
Args:
outputs ([`OwlViTImageGuidedObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.0):
Minimum confidence threshold to use to filter out predicted boxes.
nms_threshold (`float`, *optional*, defaults to 0.3):
IoU threshold for non-maximum suppression of overlapping boxes.
target_sizes (`torch.Tensor`, *optional*):
Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in
the batch. If set, predicted normalized bounding boxes are rescaled to the target sizes. If left to
None, predictions will not be unnormalized.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
in the batch as predicted by the model. All labels are set to None as
`OwlViTForObjectDetection.image_guided_detection` perform one-shot object detection.
"""
logits, target_boxes = outputs.logits, outputs.target_pred_boxes
if target_sizes is not None and len(logits) != len(target_sizes):
raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits")
if target_sizes is not None and target_sizes.shape[1] != 2:
raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
probs = torch.max(logits, dim=-1)
scores = torch.sigmoid(probs.values)
# Convert to [x0, y0, x1, y1] format
target_boxes = center_to_corners_format(target_boxes)
# Apply non-maximum suppression (NMS)
if nms_threshold < 1.0:
for idx in range(target_boxes.shape[0]):
for i in torch.argsort(-scores[idx]):
if not scores[idx][i]:
continue
ious = box_iou(target_boxes[idx][i, :].unsqueeze(0), target_boxes[idx])[0][0]
ious[i] = -1.0 # Mask self-IoU.
scores[idx][ious > nms_threshold] = 0.0
# Convert from relative [0, 1] to absolute [0, height] coordinates
if target_sizes is not None:
target_boxes = _scale_boxes(target_boxes, target_sizes)
# Compute box display alphas based on prediction scores
results = []
alphas = torch.zeros_like(scores)
for idx in range(target_boxes.shape[0]):
# Select scores for boxes matching the current query:
query_scores = scores[idx]
if not query_scores.nonzero().numel():
continue
# Apply threshold on scores before scaling
query_scores[query_scores < threshold] = 0.0
# Scale box alpha such that the best box for each query has alpha 1.0 and the worst box has alpha 0.1.
# All other boxes will either belong to a different query, or will not be shown.
max_score = torch.max(query_scores) + 1e-6
query_alphas = (query_scores - (max_score * 0.1)) / (max_score * 0.9)
query_alphas = torch.clip(query_alphas, 0.0, 1.0)
alphas[idx] = query_alphas
mask = alphas[idx] > 0
box_scores = alphas[idx][mask]
boxes = target_boxes[idx][mask]
results.append({"scores": box_scores, "labels": None, "boxes": boxes})
return results
__all__ = ["Owlv2ImageProcessor"]
| Owlv2ImageProcessor |
python | pandas-dev__pandas | pandas/tests/indexes/multi/test_util.py | {
"start": 171,
"end": 2323
} | class ____:
def test_simple(self):
x, y = list("ABC"), [1, 22]
result1, result2 = cartesian_product([x, y])
expected1 = np.array(["A", "A", "B", "B", "C", "C"])
expected2 = np.array([1, 22, 1, 22, 1, 22])
tm.assert_numpy_array_equal(result1, expected1)
tm.assert_numpy_array_equal(result2, expected2)
def test_datetimeindex(self):
# regression test for GitHub issue #6439
# make sure that the ordering on datetimeindex is consistent
x = date_range("2000-01-01", periods=2)
result1, result2 = (Index(y).day for y in cartesian_product([x, x]))
expected1 = Index([1, 1, 2, 2], dtype=np.int32)
expected2 = Index([1, 2, 1, 2], dtype=np.int32)
tm.assert_index_equal(result1, expected1)
tm.assert_index_equal(result2, expected2)
@pytest.mark.parametrize("x, y", [[[], []], [[0, 1], []], [[], ["a", "b", "c"]]])
def test_empty(self, x, y):
# product of empty factors
expected1 = np.array([], dtype=np.asarray(x).dtype)
expected2 = np.array([], dtype=np.asarray(y).dtype)
result1, result2 = cartesian_product([x, y])
tm.assert_numpy_array_equal(result1, expected1)
tm.assert_numpy_array_equal(result2, expected2)
def test_empty_input(self):
# empty product (empty input):
result = cartesian_product([])
expected = []
assert result == expected
@pytest.mark.parametrize(
"X", [1, [1], [1, 2], [[1], 2], "a", ["a"], ["a", "b"], [["a"], "b"]]
)
def test_invalid_input(self, X):
msg = "Input must be a list-like of list-likes"
with pytest.raises(TypeError, match=msg):
cartesian_product(X=X)
def test_exceed_product_space(self):
# GH31355: raise useful error when produce space is too large
msg = "Product space too large to allocate arrays!"
dims = [np.arange(0, 22, dtype=np.int16) for i in range(12)] + [
(np.arange(15128, dtype=np.int16)),
]
with pytest.raises(ValueError, match=msg):
cartesian_product(X=dims)
| TestCartesianProduct |
python | getsentry__sentry | tests/sentry/integrations/slack/notifications/test_resolved_in_release.py | {
"start": 531,
"end": 5827
} | class ____(
SlackActivityNotificationTest, PerformanceIssueTestCase
):
def create_notification(self, group, version="meow"):
return ResolvedInReleaseActivityNotification(
Activity(
project=self.project,
group=group,
user_id=self.user.id,
type=ActivityType.SET_RESOLVED_IN_RELEASE,
data={"version": version},
)
)
def test_resolved_in_release_block(self) -> None:
notification = self.create_notification(self.group)
with self.tasks():
notification.send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
release_name = notification.activity.data["version"]
assert fallback_text == f"Issue marked as resolved in {release_name} by {self.name}"
assert blocks[0]["text"]["text"] == fallback_text
notification_uuid = self.get_notification_uuid(blocks[1]["text"]["text"])
assert (
blocks[1]["text"]["text"]
== f":red_circle: <http://testserver/organizations/{self.organization.slug}/issues/{self.group.id}/?referrer=resolved_in_release_activity-slack¬ification_uuid={notification_uuid}|*{self.group.title}*>"
)
assert (
blocks[3]["elements"][0]["text"]
== f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=resolved_in_release_activity-slack-user¬ification_uuid={notification_uuid}&organizationId={self.organization.id}|Notification Settings>"
)
@mock.patch(
"sentry.services.eventstore.models.GroupEvent.occurrence",
return_value=TEST_PERF_ISSUE_OCCURRENCE,
new_callable=mock.PropertyMock,
)
def test_resolved_in_release_performance_issue_block_with_culprit_blocks(
self, occurrence: mock.MagicMock
) -> None:
"""
Test that a Slack message is sent with the expected payload when a performance issue is resolved in a release
and block kit is enabled.
"""
event = self.create_performance_issue()
notification = self.create_notification(event.group)
with self.tasks():
notification.send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
release_name = notification.activity.data["version"]
assert fallback_text == f"Issue marked as resolved in {release_name} by {self.name}"
assert blocks[0]["text"]["text"] == fallback_text
self.assert_performance_issue_blocks_with_culprit_blocks(
blocks,
event.organization,
event.project.slug,
event.group,
"resolved_in_release_activity-slack",
)
@mock.patch(
"sentry.services.eventstore.models.GroupEvent.occurrence",
return_value=TEST_ISSUE_OCCURRENCE,
new_callable=mock.PropertyMock,
)
def test_resolved_in_release_generic_issue_block(self, occurrence: mock.MagicMock) -> None:
"""
Test that a Slack message is sent with the expected payload when a generic issue type is resolved in a release
and block kit is enabled.
"""
event = self.store_event(
data={"message": "Hellboy's world", "level": "error"}, project_id=self.project.id
)
group_event = event.for_group(event.groups[0])
notification = self.create_notification(group_event.group)
with self.tasks():
notification.send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
release_name = notification.activity.data["version"]
assert fallback_text == f"Issue marked as resolved in {release_name} by {self.name}"
assert blocks[0]["text"]["text"] == fallback_text
self.assert_generic_issue_blocks(
blocks,
group_event.organization,
group_event.project.slug,
group_event.group,
"resolved_in_release_activity-slack",
)
def test_resolved_in_release_parsed_version_block(self) -> None:
"""
Test that the release version is formatted to the short version when block kit is enabled.
"""
notification = self.create_notification(self.group, version="frontend@1.0.0")
with self.tasks():
notification.send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
assert fallback_text == f"Issue marked as resolved in 1.0.0 by {self.name}"
assert blocks[0]["text"]["text"] == fallback_text
notification_uuid = self.get_notification_uuid(blocks[1]["text"]["text"])
assert (
blocks[3]["elements"][0]["text"]
== f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=resolved_in_release_activity-slack-user¬ification_uuid={notification_uuid}&organizationId={self.organization.id}|Notification Settings>"
)
| SlackResolvedInReleaseNotificationTest |
python | getsentry__sentry | src/sentry/api/endpoints/organization_unsubscribe.py | {
"start": 2864,
"end": 4307
} | class ____(OrganizationUnsubscribeBase[Project]):
object_type = "project"
def fetch_instance(
self, request: Request, organization_id_or_slug: int | str, id: int
) -> Project:
try:
project = Project.objects.select_related("organization").get(id=id)
except Project.DoesNotExist:
raise NotFound()
if str(organization_id_or_slug).isdecimal():
if project.organization.id != int(organization_id_or_slug):
raise NotFound()
else:
if project.organization.slug != organization_id_or_slug:
raise NotFound()
if not OrganizationMember.objects.filter(
user_id=request.user.pk, organization_id=project.organization_id
).exists():
raise NotFound()
return project
def add_instance_data(self, data: dict[str, Any], instance: Project) -> dict[str, Any]:
data["slug"] = instance.slug
return data
def unsubscribe(self, request: Request, instance: Project):
notifications_service.update_notification_options(
actor=Actor(id=request.user.pk, actor_type=ActorType.USER),
type=NotificationSettingEnum.ISSUE_ALERTS,
scope_type=NotificationScopeEnum.PROJECT,
scope_identifier=instance.id,
value=NotificationSettingsOptionEnum.NEVER,
)
@region_silo_endpoint
| OrganizationUnsubscribeProject |
python | sphinx-doc__sphinx | tests/roots/test-ext-autosummary-module_all/autosummary_dummy_package_all/autosummary_dummy_module.py | {
"start": 43,
"end": 201
} | class ____:
"""Public Bar class"""
pass
def foo():
"""Foo function"""
pass
def public_foo():
"""Public Foo function"""
pass
| PublicBar |
python | spyder-ide__spyder | spyder/widgets/collectionseditor.py | {
"start": 77230,
"end": 82831
} | class ____(BaseTableView):
"""DictEditor table view"""
def __init__(self, parent, data, shellwidget=None, remote_editing=False,
create_menu=False):
BaseTableView.__init__(self, parent)
self.namespacebrowser = parent
self.shellwidget = shellwidget
self.var_properties = {}
self.dictfilter = None
self.readonly = False
self.source_model = CollectionsModel(
self, data, names=True,
minmax=self.get_conf('minmax'),
remote=True)
self.horizontalHeader().sectionClicked.connect(
self.source_model.load_all)
self.proxy_model = CollectionsCustomSortFilterProxy(self)
self.proxy_model.setSourceModel(self.source_model)
self.proxy_model.setDynamicSortFilter(True)
self.proxy_model.setFilterKeyColumn(0) # Col 0 for Name
self.proxy_model.setFilterCaseSensitivity(Qt.CaseInsensitive)
self.proxy_model.setSortRole(Qt.UserRole)
self.setModel(self.proxy_model)
self.hideColumn(4) # Column 4 for Score
self.delegate = RemoteCollectionsDelegate(self, self.namespacebrowser)
self.delegate.sig_free_memory_requested.connect(
self.sig_free_memory_requested)
self.delegate.sig_editor_creation_started.connect(
self.sig_editor_creation_started)
self.delegate.sig_editor_shown.connect(self.sig_editor_shown)
self.setItemDelegate(self.delegate)
self.setup_table()
if create_menu:
self.menu = self.setup_menu()
# Sorting columns
self.setSortingEnabled(True)
self.sortByColumn(0, Qt.AscendingOrder)
# ------ Remote/local API -------------------------------------------------
def get_value(self, name):
"""Get the value of a variable"""
value = self.shellwidget.get_value(name)
return value
def new_value(self, name, value):
"""Create new value in data"""
try:
self.shellwidget.set_value(name, value)
except TypeError as e:
QMessageBox.critical(self, _("Error"), "TypeError: %s" % str(e))
self.namespacebrowser.refresh_namespacebrowser()
def remove_values(self, names):
"""Remove values from data"""
for name in names:
self.shellwidget.remove_value(name)
self.namespacebrowser.refresh_namespacebrowser()
def copy_value(self, orig_name, new_name):
"""Copy value"""
self.shellwidget.copy_value(orig_name, new_name)
self.namespacebrowser.refresh_namespacebrowser()
def is_list(self, name):
"""Return True if variable is a list, a tuple or a set"""
return self.var_properties[name]['is_list']
def is_dict(self, name):
"""Return True if variable is a dictionary"""
return self.var_properties[name]['is_dict']
def get_len(self, name):
"""Return sequence length"""
return self.var_properties[name]['len']
def is_array(self, name):
"""Return True if variable is a NumPy array"""
return self.var_properties[name]['is_array']
def is_image(self, name):
"""Return True if variable is a PIL.Image image"""
return self.var_properties[name]['is_image']
def is_data_frame(self, name):
"""Return True if variable is a DataFrame"""
return self.var_properties[name]['is_data_frame']
def is_series(self, name):
"""Return True if variable is a Series"""
return self.var_properties[name]['is_series']
def get_array_shape(self, name):
"""Return array's shape"""
return self.var_properties[name]['array_shape']
def get_array_ndim(self, name):
"""Return array's ndim"""
return self.var_properties[name]['array_ndim']
def plot(self, name, funcname):
"""Plot item"""
sw = self.shellwidget
sw.execute("%%varexp --%s %s" % (funcname, name))
def imshow(self, name):
"""Show item's image"""
sw = self.shellwidget
sw.execute("%%varexp --imshow %s" % name)
def show_image(self, name):
"""Show image (item is a PIL image)"""
command = "%s.show()" % name
sw = self.shellwidget
sw.execute(command)
# ------ Other ------------------------------------------------------------
def setup_menu(self):
"""Setup context menu."""
menu = BaseTableView.setup_menu(self)
return menu
def refresh_menu(self):
if self.var_properties:
super().refresh_menu()
def do_find(self, text):
"""Update the regex text for the variable finder."""
text = text.replace(' ', '').lower()
# Make sure everything is loaded
self.source_model.load_all()
self.proxy_model.set_filter(text)
self.source_model.update_search_letters(text)
if text:
# TODO: Use constants for column numbers
self.sortByColumn(4, Qt.DescendingOrder) # Col 4 for index
def next_row(self):
"""Move to next row from currently selected row."""
row = self.currentIndex().row()
rows = self.proxy_model.rowCount()
if row + 1 == rows:
row = -1
self.selectRow(row + 1)
def previous_row(self):
"""Move to previous row from currently selected row."""
row = self.currentIndex().row()
rows = self.proxy_model.rowCount()
if row == 0:
row = rows
self.selectRow(row - 1)
| RemoteCollectionsEditorTableView |
python | chroma-core__chroma | chromadb/api/types.py | {
"start": 58933,
"end": 59033
} | class ____:
enabled: bool
config: StringInvertedIndexConfig
@dataclass
| StringInvertedIndexType |
python | huggingface__transformers | tests/models/marian/test_modeling_marian.py | {
"start": 2129,
"end": 8062
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=100,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
decoder_start_token_id=3,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.decoder_start_token_id = decoder_start_token_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(
3,
)
input_ids[:, -1] = self.eos_token_id # Eos Token
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.get_config()
inputs_dict = prepare_marian_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def get_config(self):
return MarianConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
)
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = MarianModel(config=config).get_decoder().to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def check_encoder_decoder_model_standalone(self, config, inputs_dict):
model = MarianModel(config=config).to(torch_device).eval()
outputs = model(**inputs_dict)
encoder_last_hidden_state = outputs.encoder_last_hidden_state
last_hidden_state = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
encoder = model.get_encoder()
encoder.save_pretrained(tmpdirname)
encoder = MarianEncoder.from_pretrained(tmpdirname).to(torch_device)
encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[
0
]
self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3)
with tempfile.TemporaryDirectory() as tmpdirname:
decoder = model.get_decoder()
decoder.save_pretrained(tmpdirname)
decoder = MarianDecoder.from_pretrained(tmpdirname).to(torch_device)
last_hidden_state_2 = decoder(
input_ids=inputs_dict["decoder_input_ids"],
attention_mask=inputs_dict["decoder_attention_mask"],
encoder_hidden_states=encoder_last_hidden_state,
encoder_attention_mask=inputs_dict["attention_mask"],
)[0]
self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
| MarianModelTester |
python | getsentry__sentry | tests/sentry/sentry_apps/api/endpoints/test_installation_service_hook_projects.py | {
"start": 164,
"end": 4579
} | class ____(APITestCase):
def setUp(self) -> None:
self.user = self.create_user(email="boop@example.com")
self.org = self.create_organization(owner=self.user)
self.project = self.create_project(organization=self.org)
self.project2 = self.create_project(organization=self.org)
self.project3 = self.create_project(organization=self.org)
self.sentry_app = self.create_sentry_app(
name="Testin",
organization=self.org,
webhook_url="https://example.com",
scopes=["org:admin", "org:integrations", "event:admin"],
)
self.install = self.create_sentry_app_installation(
organization=self.org, slug=self.sentry_app.slug, user=self.user
)
self.api_token = self.create_internal_integration_token(
install=self.install, user=self.user # using same install for auth token and webhooks
)
self.service_hook = ServiceHook.objects.get(
installation_id=self.install.id,
)
self.url = reverse(
"sentry-api-0-sentry-app-installation-service-hook-projects", args=[self.install.uuid]
)
def test_get_service_hook_projects(self) -> None:
# Create a service hook project
ServiceHookProject.objects.create(
project_id=self.project.id, service_hook_id=self.service_hook.id
)
response = self.client.get(
self.url, format="json", HTTP_AUTHORIZATION=f"Bearer {self.api_token.token}"
)
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]["project_id"] == str(self.project.id)
def test_post_service_hook_projects(self) -> None:
ServiceHookProject.objects.create(
project_id=self.project2.id, service_hook_id=self.service_hook.id
)
ServiceHookProject.objects.create(
project_id=self.project3.id, service_hook_id=self.service_hook.id
)
data = {"projects": [self.project.id, self.project2.id]}
response = self.client.post(
self.url, data=data, format="json", HTTP_AUTHORIZATION=f"Bearer {self.api_token.token}"
)
assert response.status_code == 200
assert len(response.data) == 2
response_data = {response.data[0]["project_id"], response.data[1]["project_id"]}
assert response_data == {str(self.project.id), str(self.project2.id)}
# Verify both projects are in the database
hook_projects = ServiceHookProject.objects.filter(service_hook_id=self.service_hook.id)
assert hook_projects.count() == 2
project_ids = {hp.project_id for hp in hook_projects}
assert project_ids == {self.project.id, self.project2.id}
def test_post_service_hook_projects_mixed_types(self) -> None:
data = {"projects": [self.project.slug, self.project2.id]}
response = self.client.post(
self.url, data=data, format="json", HTTP_AUTHORIZATION=f"Bearer {self.api_token.token}"
)
assert response.status_code == 400
def test_post_service_hook_projects_with_invalid_project(self) -> None:
data = {"projects": ["invalid-project"]}
response = self.client.post(
self.url, data=data, format="json", HTTP_AUTHORIZATION=f"Bearer {self.api_token.token}"
)
assert response.status_code == 400
def test_post_service_hook_projects_without_projects(self) -> None:
response = self.client.post(
self.url, data={}, format="json", HTTP_AUTHORIZATION=f"Bearer {self.api_token.token}"
)
assert response.status_code == 400
def test_delete_service_hook_projects(self) -> None:
# Create some service hook projects first
ServiceHookProject.objects.create(
project_id=self.project.id, service_hook_id=self.service_hook.id
)
ServiceHookProject.objects.create(
project_id=self.project2.id, service_hook_id=self.service_hook.id
)
response = self.client.delete(self.url, HTTP_AUTHORIZATION=f"Bearer {self.api_token.token}")
assert response.status_code == 204
# Verify all hook projects were deleted
assert ServiceHookProject.objects.filter(service_hook_id=self.service_hook.id).count() == 0
| SentryAppInstallationServiceHookProjectsEndpointTest |
python | getsentry__sentry | tests/sentry/spans/consumers/process_segments/test_message.py | {
"start": 779,
"end": 14948
} | class ____(TestCase):
def setUp(self) -> None:
self.project = self.create_project()
def generate_basic_spans(self):
segment_span = build_mock_span(
project_id=self.project.id,
is_segment=True,
attributes={
"sentry.browser.name": {"value": "Google Chrome"},
"sentry.transaction": {
"value": "/api/0/organizations/{organization_id_or_slug}/n-plus-one/"
},
"sentry.transaction.method": {"value": "GET"},
"sentry.transaction.op": {"value": "http.server"},
"sentry.user": {"value": "id:1"},
},
)
child_span = build_mock_span(
project_id=self.project.id,
description="mock_test",
parent_span_id=segment_span["span_id"],
span_id="940ce942561548b5",
start_timestamp_ms=1707953018867,
start_timestamp=1707953018.867,
)
return [child_span, segment_span]
def generate_n_plus_one_spans(self):
segment_span = build_mock_span(
project_id=self.project.id,
is_segment=True,
_performance_issues_spans=True,
)
child_span = build_mock_span(
project_id=self.project.id,
description="OrganizationNPlusOne.get",
parent_span_id=segment_span["span_id"],
span_id="940ce942561548b5",
start_timestamp_ms=1707953018867,
start_timestamp=1707953018.867,
)
cause_span = build_mock_span(
project_id=self.project.id,
span_op="db",
description='SELECT "sentry_project"."id", "sentry_project"."slug", "sentry_project"."name", "sentry_project"."forced_color", "sentry_project"."organization_id", "sentry_project"."public", "sentry_project"."date_added", "sentry_project"."status", "sentry_project"."first_event", "sentry_project"."flags", "sentry_project"."platform" FROM "sentry_project"',
parent_span_id="940ce942561548b5",
span_id="a974da4671bc3857",
start_timestamp_ms=1707953018867,
start_timestamp=1707953018.867,
)
repeating_span_description = 'SELECT "sentry_organization"."id", "sentry_organization"."name", "sentry_organization"."slug", "sentry_organization"."status", "sentry_organization"."date_added", "sentry_organization"."default_role", "sentry_organization"."is_test", "sentry_organization"."flags" FROM "sentry_organization" WHERE "sentry_organization"."id" = %s LIMIT 21'
def repeating_span():
return build_mock_span(
project_id=self.project.id,
span_op="db",
description=repeating_span_description,
parent_span_id="940ce942561548b5",
span_id=uuid.uuid4().hex[:16],
start_timestamp_ms=1707953018869,
start_timestamp=1707953018.869,
)
repeating_spans = [repeating_span() for _ in range(7)]
spans = [segment_span, child_span, cause_span] + repeating_spans
return spans
def test_enrich_spans(self) -> None:
spans = self.generate_basic_spans()
processed_spans = process_segment(spans)
assert len(processed_spans) == len(spans)
child_span, segment_span = processed_spans
child_attrs = child_span["attributes"] or {}
segment_data = segment_span["attributes"] or {}
assert child_attrs["sentry.transaction"] == segment_data["sentry.transaction"]
assert child_attrs["sentry.transaction.method"] == segment_data["sentry.transaction.method"]
assert child_attrs["sentry.transaction.op"] == segment_data["sentry.transaction.op"]
assert child_attrs["sentry.user"] == segment_data["sentry.user"]
def test_enrich_spans_no_segment(self) -> None:
spans = self.generate_basic_spans()
for span in spans:
span["is_segment"] = False
del span["attributes"]
processed_spans = process_segment(spans)
assert len(processed_spans) == len(spans)
for i, span in enumerate(processed_spans):
assert span["span_id"] == spans[i]["span_id"]
assert span["op"]
assert span["hash"]
def test_create_models(self) -> None:
spans = self.generate_basic_spans()
assert process_segment(spans)
Environment.objects.get(
organization_id=self.organization.id,
name="development",
)
release = Release.objects.get(
organization_id=self.organization.id,
version="backend@24.2.0.dev0+699ce0cd1281cc3c7275d0a474a595375c769ae8",
)
assert release.date_added.timestamp() == spans[0]["end_timestamp"]
@override_options({"spans.process-segments.detect-performance-problems.enable": True})
@mock.patch("sentry.issues.ingest.send_issue_occurrence_to_eventstream")
def test_n_plus_one_issue_detection(self, mock_eventstream: mock.MagicMock) -> None:
spans = self.generate_n_plus_one_spans()
with mock.patch(
"sentry.issues.grouptype.PerformanceStreamedSpansGroupTypeExperimental.released",
return_value=True,
):
process_segment(spans)
mock_eventstream.assert_called_once()
performance_problem = mock_eventstream.call_args[0][1]
assert performance_problem.fingerprint == [
md5(
b"1-GroupType.PERFORMANCE_N_PLUS_ONE_DB_QUERIES-f906d576ffde8f005fd741f7b9c8a35062361e67-1019"
).hexdigest()
]
assert performance_problem.type == PerformanceStreamedSpansGroupTypeExperimental
@override_options({"spans.process-segments.detect-performance-problems.enable": True})
@mock.patch("sentry.issues.ingest.send_issue_occurrence_to_eventstream")
@pytest.mark.xfail(reason="batches without segment spans are not supported yet")
def test_n_plus_one_issue_detection_without_segment_span(
self, mock_eventstream: mock.MagicMock
) -> None:
segment_span = build_mock_span(project_id=self.project.id, is_segment=False)
child_span = build_mock_span(
project_id=self.project.id,
description="OrganizationNPlusOne.get",
is_segment=False,
parent_span_id="b35b839c02985f33",
span_id="940ce942561548b5",
start_timestamp_ms=1707953018867,
start_timestamp=1707953018.867,
)
cause_span = build_mock_span(
project_id=self.project.id,
span_op="db",
description='SELECT "sentry_project"."id", "sentry_project"."slug", "sentry_project"."name", "sentry_project"."forced_color", "sentry_project"."organization_id", "sentry_project"."public", "sentry_project"."date_added", "sentry_project"."status", "sentry_project"."first_event", "sentry_project"."flags", "sentry_project"."platform" FROM "sentry_project"',
is_segment=False,
parent_span_id="940ce942561548b5",
span_id="a974da4671bc3857",
start_timestamp_ms=1707953018867,
start_timestamp=1707953018.867,
)
repeating_span_description = 'SELECT "sentry_organization"."id", "sentry_organization"."name", "sentry_organization"."slug", "sentry_organization"."status", "sentry_organization"."date_added", "sentry_organization"."default_role", "sentry_organization"."is_test", "sentry_organization"."flags" FROM "sentry_organization" WHERE "sentry_organization"."id" = %s LIMIT 21'
def repeating_span():
return build_mock_span(
project_id=self.project.id,
span_op="db",
description=repeating_span_description,
is_segment=False,
parent_span_id="940ce942561548b5",
span_id=uuid.uuid4().hex[:16],
start_timestamp_ms=1707953018869,
start_timestamp=1707953018.869,
)
repeating_spans = [repeating_span() for _ in range(7)]
spans = [segment_span, child_span, cause_span] + repeating_spans
with mock.patch(
"sentry.issues.grouptype.PerformanceStreamedSpansGroupTypeExperimental.released"
) as mock_released:
mock_released.return_value = True
process_segment(spans)
performance_problem = mock_eventstream.call_args[0][1]
assert performance_problem.fingerprint == [
md5(
b"1-GroupType.PERFORMANCE_N_PLUS_ONE_DB_QUERIES-f906d576ffde8f005fd741f7b9c8a35062361e67-1019"
).hexdigest()
]
assert performance_problem.type == PerformanceStreamedSpansGroupTypeExperimental
@mock.patch("sentry.spans.consumers.process_segments.message.track_outcome")
@pytest.mark.skip("temporarily disabled")
def test_skip_produce_does_not_track_outcomes(self, mock_track_outcome: mock.MagicMock) -> None:
"""Test that outcomes are not tracked when skip_produce=True"""
spans = self.generate_basic_spans()
# Process with skip_produce=True
process_segment(spans, skip_produce=True)
# Verify track_outcome was not called
mock_track_outcome.assert_not_called()
# Process with skip_produce=False (default)
process_segment(spans, skip_produce=False)
# Verify track_outcome was called once
mock_track_outcome.assert_called_once()
@mock.patch("sentry.spans.consumers.process_segments.message.set_project_flag_and_signal")
def test_record_signals(self, mock_track):
span = build_mock_span(
project_id=self.project.id,
is_segment=True,
span_op="http.client",
attributes={
"sentry.op": {"value": "http.client"},
"sentry.category": {"value": "http"},
},
)
spans = process_segment([span])
assert len(spans) == 1
signals = [args[0][1] for args in mock_track.call_args_list]
assert signals == ["has_transactions", "has_insights_http"]
def test_segment_name_propagation(self):
child_span, segment_span = self.generate_basic_spans()
segment_span["name"] = "my segment name"
processed_spans = process_segment([child_span, segment_span])
assert len(processed_spans) == 2
child_span, segment_span = processed_spans
segment_attributes = segment_span["attributes"] or {}
assert segment_attributes["sentry.segment.name"] == {
"type": "string",
"value": "my segment name",
}
child_attributes = child_span["attributes"] or {}
assert child_attributes["sentry.segment.name"] == {
"type": "string",
"value": "my segment name",
}
def test_segment_name_propagation_when_name_missing(self):
child_span, segment_span = self.generate_basic_spans()
del segment_span["name"]
processed_spans = process_segment([child_span, segment_span])
assert len(processed_spans) == 2
child_span, segment_span = processed_spans
segment_attributes = segment_span["attributes"] or {}
assert segment_attributes.get("sentry.segment.name") is None
child_attributes = child_span["attributes"] or {}
assert child_attributes.get("sentry.segment.name") is None
@mock.patch("sentry.spans.consumers.process_segments.message.record_segment_name")
def test_segment_name_normalization_with_feature(
self, mock_record_segment_name: mock.MagicMock
):
_, segment_span = self.generate_basic_spans()
segment_span["name"] = "/foo/2fd4e1c67a2d28fced849ee1bb76e7391b93eb12/user/123/0"
with self.feature("organizations:normalize_segment_names_in_span_enrichment"):
processed_spans = process_segment([segment_span])
assert processed_spans[0]["name"] == "/foo/*/user/*/0"
mock_record_segment_name.assert_called_once()
@mock.patch("sentry.spans.consumers.process_segments.message.record_segment_name")
def test_segment_name_normalization_without_feature(
self, mock_record_segment_name: mock.MagicMock
):
_, segment_span = self.generate_basic_spans()
segment_span["name"] = "/foo/2fd4e1c67a2d28fced849ee1bb76e7391b93eb12/user/123/0"
with Feature({"organizations:normalize_segment_names_in_span_enrichment": False}):
processed_spans = process_segment([segment_span])
assert (
processed_spans[0]["name"] == "/foo/2fd4e1c67a2d28fced849ee1bb76e7391b93eb12/user/123/0"
)
mock_record_segment_name.assert_not_called()
def test_segment_name_normalization_checks_source(self):
_, segment_span = self.generate_basic_spans()
segment_span["name"] = "/foo/2fd4e1c67a2d28fced849ee1bb76e7391b93eb12/user/123/0"
segment_span["attributes"][ATTRIBUTE_NAMES.SENTRY_SPAN_SOURCE] = {
"type": "string",
"value": "route",
}
with self.feature("organizations:normalize_segment_names_in_span_enrichment"):
processed_spans = process_segment([segment_span])
assert (
processed_spans[0]["name"] == "/foo/2fd4e1c67a2d28fced849ee1bb76e7391b93eb12/user/123/0"
)
def test_verify_compatibility():
spans: list[dict[str, Any]] = [
# regular span:
{"data": {"foo": 1}},
# valid compat span:
{"data": {"foo": 1}, "attributes": {"foo": {"value": 1}}},
# invalid compat spans:
{"data": {"foo": 1}, "attributes": {"value": {"foo": "2"}}},
{"data": {"bar": 1}, "attributes": None},
{"data": {"baz": 1}, "attributes": {}},
{"data": {"zap": 1}, "attributes": {"zap": {"no_value": "1"}}},
{"data": {"abc": 1}, "attributes": {"abc": None}},
]
result = _verify_compatibility(spans)
assert len(result) == len(spans)
assert [v is None for v in result] == [True, True, False, False, False, False, False]
| TestSpansTask |
python | realpython__materials | django-user-management/user_auth_intro/users/forms.py | {
"start": 57,
"end": 203
} | class ____(UserCreationForm):
class Meta(UserCreationForm.Meta):
fields = UserCreationForm.Meta.fields + ("email",)
| CustomUserCreationForm |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pycodestyle/E30_syntax_error.py | {
"start": 139,
"end": 300
} | class ____:
def __init__(
pass
def method():
pass
foo = Foo(
def top(
def nested1():
pass
def nested2():
pass
| Foo |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 613808,
"end": 614148
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("type", "value")
type = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="type")
value = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="value")
| SecurityAdvisoryIdentifier |
python | automl__auto-sklearn | autosklearn/pipeline/components/data_preprocessing/minority_coalescense/__init__.py | {
"start": 945,
"end": 4515
} | class ____(AutoSklearnChoice):
@classmethod
def get_components(cls: BaseEstimator) -> Dict[str, BaseEstimator]:
components: Dict[str, BaseEstimator] = OrderedDict()
components.update(_mcs)
components.update(additional_components.components)
return components
def get_hyperparameter_search_space(
self,
feat_type: Optional[FEAT_TYPE_TYPE] = None,
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None,
default: Optional[str] = None,
include: Optional[Dict[str, str]] = None,
exclude: Optional[Dict[str, str]] = None,
) -> ConfigurationSpace:
cs = ConfigurationSpace()
if dataset_properties is None:
dataset_properties = {}
# Compile a list of legal preprocessors for this problem
available_preprocessors = self.get_available_components(
dataset_properties=dataset_properties, include=include, exclude=exclude
)
if len(available_preprocessors) == 0:
raise ValueError(
"No minority coalescers found, please add any one minority coalescer"
"component."
)
if default is None:
defaults = ["minority_coalescer", "no_coalescense"]
for default_ in defaults:
if default_ in available_preprocessors:
default = default_
break
preprocessor = CategoricalHyperparameter(
"__choice__", list(available_preprocessors.keys()), default_value=default
)
cs.add_hyperparameter(preprocessor)
for name in available_preprocessors:
preprocessor_configuration_space = available_preprocessors[
name
].get_hyperparameter_search_space(dataset_properties=dataset_properties)
parent_hyperparameter = {"parent": preprocessor, "value": name}
cs.add_configuration_space(
name,
preprocessor_configuration_space,
parent_hyperparameter=parent_hyperparameter,
)
self.configuration_space = cs
self.dataset_properties = dataset_properties
return cs
def set_hyperparameters(
self,
configuration: Configuration,
init_params: Optional[Dict[str, Any]] = None,
feat_type: Optional[FEAT_TYPE_TYPE] = None,
) -> "CoalescenseChoice":
new_params = {}
params = configuration.get_dictionary()
choice = params["__choice__"]
del params["__choice__"]
for param, value in params.items():
param = param.replace(choice, "").replace(":", "")
new_params[param] = value
if init_params is not None:
for param, value in init_params.items():
# These next two lines are different than in the base class -
# they allow removing the categorical feature indicator array
# in order to not pass it to the no encoding
if choice not in param:
continue
param = param.replace(choice, "").replace(":", "")
new_params[param] = value
new_params["random_state"] = self.random_state
self.new_params = new_params
new_params["feat_type"] = feat_type
self.choice = self.get_components()[choice](**new_params)
return self
def transform(self, X: PIPELINE_DATA_DTYPE) -> PIPELINE_DATA_DTYPE:
return self.choice.transform(X)
| CoalescenseChoice |
python | pypa__hatch | tests/cli/status/test_status.py | {
"start": 1056,
"end": 3047
} | class ____:
@pytest.mark.parametrize("file_name", ["pyproject.toml", "setup.py"])
def test_found_project_flag(self, hatch, temp_dir, config_file, helpers, file_name):
project_file = temp_dir / file_name
project_file.touch()
project = "foo"
config_file.model.projects = {project: str(temp_dir)}
config_file.save()
result = hatch("-p", project, "status")
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
f"""
[Project] - {project}
[Location] - {temp_dir}
[Config] - {config_file.path}
"""
)
@pytest.mark.parametrize("file_name", ["pyproject.toml", "setup.py"])
def test_found_project_env(self, hatch, temp_dir, config_file, helpers, file_name):
project_file = temp_dir / file_name
project_file.touch()
project = "foo"
config_file.model.projects = {project: str(temp_dir)}
config_file.save()
with EnvVars({ConfigEnvVars.PROJECT: project}):
result = hatch("status")
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
f"""
[Project] - {project}
[Location] - {temp_dir}
[Config] - {config_file.path}
"""
)
def test_unknown_project(self, hatch):
project = "foo"
result = hatch("-p", project, "status")
assert result.exit_code == 1
assert result.output == f"Unable to locate project {project}\n"
def test_not_a_project(self, hatch, temp_dir, config_file):
project = "foo"
config_file.model.project = project
config_file.model.projects = {project: str(temp_dir)}
config_file.save()
result = hatch("-p", project, "status")
assert result.exit_code == 1
assert result.output == f"Unable to locate project {project}\n"
| TestProjectExplicit |
python | sympy__sympy | sympy/core/kind.py | {
"start": 3468,
"end": 4816
} | class ____(Kind):
"""
Kind for all numeric object.
This kind represents every number, including complex numbers,
infinity and ``S.NaN``. Other objects such as quaternions do not
have this kind.
Most ``Expr`` are initially designed to represent the number, so
this will be the most common kind in SymPy core. For example
``Symbol()``, which represents a scalar, has this kind as long as it
is commutative.
Numbers form a field. Any operation between number-kind objects will
result this kind as well.
Examples
========
>>> from sympy import S, oo, Symbol
>>> S.One.kind
NumberKind
>>> (-oo).kind
NumberKind
>>> S.NaN.kind
NumberKind
Commutative symbol are treated as number.
>>> x = Symbol('x')
>>> x.kind
NumberKind
>>> Symbol('y', commutative=False).kind
UndefinedKind
Operation between numbers results number.
>>> (x+1).kind
NumberKind
See Also
========
sympy.core.expr.Expr.is_Number : check if the object is strictly
subclass of ``Number`` class.
sympy.core.expr.Expr.is_number : check if the object is number
without any free symbol.
"""
def __new__(cls):
return super().__new__(cls)
def __repr__(self):
return "NumberKind"
NumberKind = _NumberKind()
| _NumberKind |
python | tensorflow__tensorflow | tensorflow/python/framework/c_api_util.py | {
"start": 1442,
"end": 2841
} | class ____(object):
"""Wrapper around single-ownership C-API objects that handles deletion."""
__slots__ = ["_obj", "deleter", "name", "type_name"]
def __init__(self, name, obj, deleter):
# '_' prefix marks _obj private, but unclear if it is required also to
# maintain a special CPython destruction order.
self._obj = obj
self.name = name
# Note: when we're destructing the global context (i.e when the process is
# terminating) we may have already deleted other modules. By capturing the
# DeleteGraph function here, we retain the ability to cleanly destroy the
# graph at shutdown, which satisfies leak checkers.
self.deleter = deleter
self.type_name = str(type(obj))
@contextlib.contextmanager
def get(self):
"""Yields the managed C-API Object, guaranteeing aliveness.
This is a context manager. Inside the context the C-API object is
guaranteed to be alive.
Raises:
AlreadyGarbageCollectedError: if the object is already deleted.
"""
# Thread-safety: self.__del__ never runs during the call of this function
# because there is a reference to self from the argument list.
if self._obj is None:
raise AlreadyGarbageCollectedError(self.name, self.type_name)
yield self._obj
def __del__(self):
obj = self._obj
if obj is not None:
self._obj = None
self.deleter(obj)
| UniquePtr |
python | ray-project__ray | python/ray/tune/tests/test_trainable.py | {
"start": 441,
"end": 4097
} | class ____(tune.Trainable):
def __init__(self, return_type: str, *args, **kwargs):
self.return_type = return_type
super(SavingTrainable, self).__init__(*args, **kwargs)
def step(self):
return {"iter": self.training_iteration}
def save_checkpoint(self, tmp_checkpoint_dir: str):
checkpoint_data = {"data": 1}
if self.return_type == "object":
return checkpoint_data
subdir = os.path.join(tmp_checkpoint_dir, "subdir")
os.makedirs(subdir, exist_ok=True)
checkpoint_file = os.path.join(subdir, "checkpoint.pkl")
with open(checkpoint_file, "w") as f:
f.write(json.dumps(checkpoint_data))
if self.return_type == "root":
return tmp_checkpoint_dir
elif self.return_type == "subdir":
return subdir
elif self.return_type == "checkpoint":
return checkpoint_file
def load_checkpoint(self, checkpoint: Union[Dict, str]):
if self.return_type == "object":
assert isinstance(checkpoint, dict)
checkpoint_data = checkpoint
checkpoint_file = None
elif self.return_type == "root":
assert "subdir" not in checkpoint
checkpoint_file = os.path.join(checkpoint, "subdir", "checkpoint.pkl")
elif self.return_type == "subdir":
assert "subdir" in checkpoint
assert "checkpoint.pkl" not in checkpoint
checkpoint_file = os.path.join(checkpoint, "checkpoint.pkl")
else: # self.return_type == "checkpoint"
assert checkpoint.endswith("subdir/checkpoint.pkl")
checkpoint_file = checkpoint
if checkpoint_file:
with open(checkpoint_file, "rb") as f:
checkpoint_data = json.load(f)
checkpoint_data = {
key: value
for key, value in checkpoint_data.items()
if not key.startswith("_")
}
assert checkpoint_data == {"data": 1}, checkpoint_data
def function_trainable(config):
with create_dict_checkpoint({"checkpoint_data": 5}) as checkpoint:
tune.report({"metric": 4}, checkpoint=checkpoint)
@pytest.mark.parametrize("return_type", ["object", "root"])
def test_save_load_checkpoint_path_class(ray_start_2_cpus, return_type, tmpdir):
"""Assert that restoring from a Trainable.save() future works with
class trainables.
Needs Ray cluster so we get actual futures.
"""
trainable = ray.remote(SavingTrainable).remote(return_type=return_type)
# Train one step
ray.get(trainable.train.remote())
# Save checkpoint
saving_future = trainable.save.remote()
# Check for errors
ray.get(saving_future)
restoring_future = trainable.restore.remote(saving_future)
ray.get(restoring_future)
def test_save_load_checkpoint_path_fn(ray_start_2_cpus, tmp_path):
"""Assert that restoring from a Trainable.save() future works with
function trainables.
Needs Ray cluster so we get actual futures.
"""
trainable_cls = wrap_function(function_trainable)
trainable = ray.remote(trainable_cls).remote(
storage=StorageContext(
storage_path=str(tmp_path),
experiment_dir_name="exp",
trial_dir_name="trial",
)
)
ray.get(trainable.train.remote())
saving_future = trainable.save.remote()
# Check for errors
ray.get(saving_future)
restoring_future = trainable.restore.remote(saving_future)
ray.get(restoring_future)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| SavingTrainable |
python | catalyst-team__catalyst | catalyst/contrib/datasets/movielens.py | {
"start": 219,
"end": 9294
} | class ____(Dataset):
"""
MovieLens data sets were collected by the GroupLens Research Project
at the University of Minnesota.
This data set consists of:
* 100,000 ratings (1-5) from 943 users on 1682 movies.
* Each user has rated at least 20 movies.
* Simple demographic info for the users
(age, gender, occupation, zip)
The data was collected through the MovieLens web site
(movielens.umn.edu) during the seven-month period from September 19th,
1997 through April 22nd, 1998. This data has been cleaned up - users
who had less than 20 ratings or did not have complete demographic
information were removed from this data set. Detailed descriptions of
the data file can be found at the end of this file.
Neither the University of Minnesota nor any of the researchers
involved can guarantee the correctness of the data, its suitability
for any particular purpose, or the validity of results based on the
use of the data set. The data set may be used for any research
purposes under the following conditions:
* The user may not state or imply any endorsement from the
University of Minnesota or the GroupLens Research Group.
* The user must acknowledge the use of the data set in
publications resulting from the use of the data set
(see below for citation information).
* The user may not redistribute the data without separate
permission.
* The user may not use this information for any commercial or
revenue-bearing purposes without first obtaining permission
from a faculty member of the GroupLens Research Project at the
University of Minnesota.
If you have any further questions or comments, please contact GroupLens
<grouplens-info@cs.umn.edu>.
http://files.grouplens.org/datasets/movielens/ml-100k-README.txt
.. note::
catalyst[ml] required for this dataset.
"""
resources = (
"http://files.grouplens.org/datasets/movielens/ml-100k.zip",
"0e33842e24a9c977be4e0107933c0723",
)
filename = "ml-100k.zip"
training_file = "training.pt"
test_file = "test.pt"
def __init__(self, root, train=True, download=False, min_rating=0.0):
"""
Args:
root (string): Root directory of dataset where
``MovieLens/processed/training.pt``
and ``MovieLens/processed/test.pt`` exist.
train (bool, optional): If True, creates dataset from
``training.pt``, otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from
the internet and puts it in root directory. If dataset
is already downloaded, it is not downloaded again.
min_rating (float, optional): Minimum rating to include in
the interaction matrix
Raises:
RuntimeError: If ``download is False`` and the dataset not found.
"""
if isinstance(root, torch._six.string_classes):
root = os.path.expanduser(root)
self.root = root
self.train = train
self.min_rating = min_rating
if download:
self._download()
self._fetch_movies()
if not self._check_exists():
raise RuntimeError("Dataset not found. Set `download=True`")
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
self.data = torch.load(os.path.join(self.processed_folder, data_file))
def __getitem__(self, user_index):
"""Get item.
Args:
user_index (int): User index [0, 942]
Returns:
tensor: (items) item's ranking for the user with index user_index
"""
return self.data[user_index]
def __len__(self):
"""The length of the loader"""
return self.dimensions[0]
@property
def raw_folder(self):
"""Create raw folder for data download
Returns:
raw_path (path): raw folder path
"""
return os.path.join(self.root, self.__class__.__name__, "raw")
@property
def processed_folder(self):
"""Create the folder for the processed files
Returns:
raw_path (path): processed folder path
"""
return os.path.join(self.root, self.__class__.__name__, "processed")
def _check_exists(self):
"""Check if the path for tarining and testing data
exists in processed folder.
Returns:
raw_path (path): processed folder path
"""
return os.path.exists(
os.path.join(self.processed_folder, self.training_file)
) and os.path.exists(os.path.join(self.processed_folder, self.test_file))
def _download(self):
"""Download and extract files/"""
if self._check_exists():
return
os.makedirs(self.raw_folder, exist_ok=True)
os.makedirs(self.processed_folder, exist_ok=True)
url = self.resources[0]
md5 = self.resources[1]
download_and_extract_archive(
url=url,
download_root=self.raw_folder,
filename=self.filename,
md5=md5,
remove_finished=True,
)
def _read_raw_movielens_data(self):
"""Return the raw lines of the train and test files."""
path = self.raw_folder
with open(path + "/ml-100k/ua.base") as datafile:
ua_base = datafile.read().split("\n")
with open(path + "/ml-100k/ua.test") as datafile:
ua_test = datafile.read().split("\n")
with open(path + "/ml-100k/u.item", encoding="ISO-8859-1") as datafile:
u_item = datafile.read().split("\n")
with open(path + "/ml-100k/u.genre") as datafile:
u_genre = datafile.read().split("\n")
return (ua_base, ua_test, u_item, u_genre)
def _build_interaction_matrix(self, rows, cols, data):
"""Builds interaction matrix.
Args:
rows (int): rows of the oevrall dataset
cols (int): columns of the overall dataset
data (generator object): generator of
the data object
Returns:
interaction_matrix (torch.sparse.Float):
sparse user2item interaction matrix
"""
mat = sp.lil_matrix((rows, cols), dtype=np.int32)
for uid, iid, rating, _ in data:
if rating >= self.min_rating:
mat[uid, iid] = rating
coo = mat.tocoo()
values = coo.data
indices = np.vstack((coo.row, coo.col))
i = torch.LongTensor(indices)
v = torch.FloatTensor(values)
shape = coo.shape
interaction_matrix = torch.sparse.FloatTensor(i, v, torch.Size(shape)).to_dense()
return interaction_matrix
def _parse(self, data):
"""Parses the raw data. Substract one to shift to zero based indexing
Args:
data: raw data of the dataset
Yields:
Generator iterator for parsed data
"""
for line in data:
if not line:
continue
uid, iid, rating, timestamp = [int(x) for x in line.split("\t")]
yield uid - 1, iid - 1, rating, timestamp
def _get_dimensions(self, train_data, test_data):
"""Gets the dimensions of the raw dataset
Args:
train_data: (uid, iid, rating, timestamp)
Genrator for training data
test_data: (uid, iid, rating, timestamp)
Genrator for testing data
Returns:
The total dimension of the dataset
"""
uids = set()
iids = set()
for uid, iid, _, _ in itertools.chain(train_data, test_data):
uids.add(uid)
iids.add(iid)
rows = max(uids) + 1
cols = max(iids) + 1
self.dimensions = (rows, cols)
return rows, cols
def _fetch_movies(self):
"""
Fetch data and save in the pytorch format
1. Read the train/test data from raw archive
2. Parse train data
3. Parse test data
4. Save in the .pt with torch.save
"""
data = self._read_raw_movielens_data()
train_raw = data[0]
test_raw = data[1]
train_parsed = self._parse(train_raw)
test_parsed = self._parse(test_raw)
num_users, num_items = self._get_dimensions(train_parsed, test_parsed)
train = self._build_interaction_matrix(
num_users, num_items, self._parse(train_raw)
)
test = self._build_interaction_matrix(
num_users, num_items, self._parse(test_raw)
)
assert train.shape == test.shape
with open(os.path.join(self.processed_folder, self.training_file), "wb") as f:
torch.save(train, f)
with open(os.path.join(self.processed_folder, self.test_file), "wb") as f:
torch.save(test, f)
| MovieLens |
python | readthedocs__readthedocs.org | readthedocs/api/v2/serializers.py | {
"start": 6891,
"end": 7778
} | class ____(BuildCommandSerializer):
"""
Serializer used on GETs to trim the commands' path.
Remove unreadable paths from the command outputs when returning it from the API.
We could make this change at build level, but we want to avoid undoable issues from now
and hack a small solution to fix the immediate problem.
This converts:
$ /usr/src/app/checkouts/readthedocs.org/user_builds/
<container_hash>/<project_slug>/envs/<version_slug>/bin/python
$ /home/docs/checkouts/readthedocs.org/user_builds/
<project_slug>/envs/<version_slug>/bin/python
into
$ python
"""
command = serializers.SerializerMethodField()
def get_command(self, obj):
return normalize_build_command(
obj.command, obj.build.project.slug, obj.build.get_version_slug()
)
| BuildCommandReadOnlySerializer |
python | plotly__plotly.py | plotly/graph_objs/sankey/link/_colorscale.py | {
"start": 233,
"end": 11829
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "sankey.link"
_path_str = "sankey.link.colorscale"
_valid_props = {"cmax", "cmin", "colorscale", "label", "name", "templateitemname"}
@property
def cmax(self):
"""
Sets the upper bound of the color domain.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
@property
def cmin(self):
"""
Sets the lower bound of the color domain.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
@property
def colorscale(self):
"""
Sets the colorscale. The colorscale must be an array containing
arrays mapping a normalized value to an rgb, rgba, hex, hsl,
hsv, or named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the
bounds of the colorscale in color space, use `cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,RdBu,Reds,Viridis,
YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
@property
def label(self):
"""
The label of the links to color based on their concentration
within a flow.
The 'label' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["label"]
@label.setter
def label(self, val):
self["label"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def _prop_descriptions(self):
return """\
cmax
Sets the upper bound of the color domain.
cmin
Sets the lower bound of the color domain.
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
label
The label of the links to color based on their
concentration within a flow.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
"""
def __init__(
self,
arg=None,
cmax=None,
cmin=None,
colorscale=None,
label=None,
name=None,
templateitemname=None,
**kwargs,
):
"""
Construct a new Colorscale object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.sankey.link.Colorscale`
cmax
Sets the upper bound of the color domain.
cmin
Sets the lower bound of the color domain.
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
label
The label of the links to color based on their
concentration within a flow.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
Returns
-------
Colorscale
"""
super().__init__("colorscales")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.sankey.link.Colorscale
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.link.Colorscale`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("cmax", arg, cmax)
self._set_property("cmin", arg, cmin)
self._set_property("colorscale", arg, colorscale)
self._set_property("label", arg, label)
self._set_property("name", arg, name)
self._set_property("templateitemname", arg, templateitemname)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Colorscale |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_format_returned.py | {
"start": 1275,
"end": 1418
} | class ____:
"""Potential uninferable return value"""
def __format__(self, format_spec):
return str(Missing)
| AnotherAmbiguousFormat |
python | huggingface__transformers | src/transformers/models/resnet/modeling_resnet.py | {
"start": 11699,
"end": 13669
} | class ____(ResNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.resnet = ResNetModel(config)
# classification head
self.classifier = nn.Sequential(
nn.Flatten(),
nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity(),
)
# initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> ImageClassifierOutputWithNoAttention:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.resnet(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
@auto_docstring(
custom_intro="""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
"""
)
| ResNetForImageClassification |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0095_default_branch_helptext.py | {
"start": 149,
"end": 1207
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0094_auto_20221221_1045"),
]
operations = [
migrations.AlterField(
model_name="historicalproject",
name="default_branch",
field=models.CharField(
blank=True,
default=None,
help_text='What branch "latest" points to. Leave empty to use the default value for your VCS.',
max_length=255,
null=True,
verbose_name="Default branch",
),
),
migrations.AlterField(
model_name="project",
name="default_branch",
field=models.CharField(
blank=True,
default=None,
help_text='What branch "latest" points to. Leave empty to use the default value for your VCS.',
max_length=255,
null=True,
verbose_name="Default branch",
),
),
]
| Migration |
python | doocs__leetcode | solution/1200-1299/1286.Iterator for Combination/Solution2.py | {
"start": 0,
"end": 867
} | class ____:
def __init__(self, characters: str, combinationLength: int):
self.curr = (1 << len(characters)) - 1
self.size = combinationLength
self.cs = characters[::-1]
def next(self) -> str:
while self.curr >= 0 and self.curr.bit_count() != self.size:
self.curr -= 1
ans = []
for i in range(len(self.cs)):
if (self.curr >> i) & 1:
ans.append(self.cs[i])
self.curr -= 1
return ''.join(ans[::-1])
def hasNext(self) -> bool:
while self.curr >= 0 and self.curr.bit_count() != self.size:
self.curr -= 1
return self.curr >= 0
# Your CombinationIterator object will be instantiated and called as such:
# obj = CombinationIterator(characters, combinationLength)
# param_1 = obj.next()
# param_2 = obj.hasNext()
| CombinationIterator |
python | huggingface__transformers | src/transformers/models/hunyuan_v1_moe/modular_hunyuan_v1_moe.py | {
"start": 6975,
"end": 7569
} | class ____(LlamaDecoderLayer):
def __init__(self, config: HunYuanMoEV1Config, layer_idx: int):
super().__init__(config, layer_idx)
self.hidden_size = config.hidden_size
self.self_attn = HunYuanMoEV1Attention(config=config, layer_idx=layer_idx)
self.mlp = HunYuanMoEV1Moe(config, layer_idx=layer_idx)
self.input_layernorm = HunYuanMoEV1RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = HunYuanMoEV1RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.layer_idx = layer_idx
| HunYuanMoEV1DecoderLayer |
python | pandas-dev__pandas | asv_bench/benchmarks/indexing.py | {
"start": 6971,
"end": 7460
} | class ____:
params = ["int", "datetime"]
param_names = ["index"]
def setup(self, index):
N = 100000
indexes = {
"int": Index(np.arange(N), dtype=np.int64),
"datetime": date_range("2011-01-01", freq="s", periods=N),
}
index = indexes[index]
self.s = Series(np.random.rand(N), index=index)
self.indexer = np.random.randint(0, N, size=N)
def time_take(self, index):
self.s.take(self.indexer)
| Take |
python | ipython__ipython | IPython/core/formatters.py | {
"start": 2351,
"end": 8840
} | class ____(Configurable):
active_types = List(Unicode(),
help="""List of currently active mime-types to display.
You can use this to set a white-list for formats to display.
Most users will not need to change this value.
""",
).tag(config=True)
@default('active_types')
def _active_types_default(self):
return self.format_types
@observe('active_types')
def _active_types_changed(self, change):
for key, formatter in self.formatters.items():
if key in change['new']:
formatter.enabled = True
else:
formatter.enabled = False
ipython_display_formatter = ForwardDeclaredInstance("FormatterABC") # type: ignore
@default("ipython_display_formatter")
def _default_formatter(self):
return IPythonDisplayFormatter(parent=self)
mimebundle_formatter = ForwardDeclaredInstance("FormatterABC") # type: ignore
@default("mimebundle_formatter")
def _default_mime_formatter(self):
return MimeBundleFormatter(parent=self)
# A dict of formatter whose keys are format types (MIME types) and whose
# values are subclasses of BaseFormatter.
formatters = Dict()
@default("formatters")
def _formatters_default(self):
"""Activate the default formatters."""
formatter_classes = [
PlainTextFormatter,
HTMLFormatter,
MarkdownFormatter,
SVGFormatter,
PNGFormatter,
PDFFormatter,
JPEGFormatter,
LatexFormatter,
JSONFormatter,
JavascriptFormatter
]
d = {}
for cls in formatter_classes:
f = cls(parent=self)
d[f.format_type] = f
return d
def format(self, obj, include=None, exclude=None):
"""Return a format data dict for an object.
By default all format types will be computed.
The following MIME types are usually implemented:
* text/plain
* text/html
* text/markdown
* text/latex
* application/json
* application/javascript
* application/pdf
* image/png
* image/jpeg
* image/svg+xml
Parameters
----------
obj : object
The Python object whose format data will be computed.
include : list, tuple or set; optional
A list of format type strings (MIME types) to include in the
format data dict. If this is set *only* the format types included
in this list will be computed.
exclude : list, tuple or set; optional
A list of format type string (MIME types) to exclude in the format
data dict. If this is set all format types will be computed,
except for those included in this argument.
Mimetypes present in exclude will take precedence over the ones in include
Returns
-------
(format_dict, metadata_dict) : tuple of two dicts
format_dict is a dictionary of key/value pairs, one of each format that was
generated for the object. The keys are the format types, which
will usually be MIME type strings and the values and JSON'able
data structure containing the raw data for the representation in
that format.
metadata_dict is a dictionary of metadata about each mime-type output.
Its keys will be a strict subset of the keys in format_dict.
Notes
-----
If an object implement `_repr_mimebundle_` as well as various
`_repr_*_`, the data returned by `_repr_mimebundle_` will take
precedence and the corresponding `_repr_*_` for this mimetype will
not be called.
"""
format_dict = {}
md_dict = {}
if self.ipython_display_formatter(obj):
# object handled itself, don't proceed
return {}, {}
format_dict, md_dict = self.mimebundle_formatter(obj, include=include, exclude=exclude)
if format_dict or md_dict:
if include:
format_dict = {k:v for k,v in format_dict.items() if k in include}
md_dict = {k:v for k,v in md_dict.items() if k in include}
if exclude:
format_dict = {k:v for k,v in format_dict.items() if k not in exclude}
md_dict = {k:v for k,v in md_dict.items() if k not in exclude}
for format_type, formatter in self.formatters.items():
if format_type in format_dict:
# already got it from mimebundle, maybe don't render again.
# exception: manually registered per-mime renderer
# check priority:
# 1. user-registered per-mime formatter
# 2. mime-bundle (user-registered or repr method)
# 3. default per-mime formatter (e.g. repr method)
try:
formatter.lookup(obj)
except KeyError:
# no special formatter, use mime-bundle-provided value
continue
if include and format_type not in include:
continue
if exclude and format_type in exclude:
continue
md = None
try:
data = formatter(obj)
except:
# FIXME: log the exception
raise
# formatters can return raw data or (data, metadata)
if isinstance(data, tuple) and len(data) == 2:
data, md = data
if data is not None:
format_dict[format_type] = data
if md is not None:
md_dict[format_type] = md
return format_dict, md_dict
@property
def format_types(self):
"""Return the format types (MIME types) of the active formatters."""
return list(self.formatters.keys())
#-----------------------------------------------------------------------------
# Formatters for specific format types (text, html, svg, etc.)
#-----------------------------------------------------------------------------
def _safe_repr(obj):
"""Try to return a repr of an object
always returns a string, at least.
"""
try:
return repr(obj)
except Exception as e:
return "un-repr-able object (%r)" % e
| DisplayFormatter |
python | getsentry__sentry | src/sentry/rules/filters/issue_occurrences.py | {
"start": 329,
"end": 404
} | class ____(forms.Form):
value = forms.IntegerField()
| IssueOccurrencesForm |
python | django__django | tests/migrations/test_exceptions.py | {
"start": 103,
"end": 436
} | class ____(SimpleTestCase):
def test_node_not_found_error_repr(self):
node = ("some_app_label", "some_migration_label")
error_repr = repr(NodeNotFoundError("some message", node))
self.assertEqual(
error_repr, "NodeNotFoundError(('some_app_label', 'some_migration_label'))"
)
| ExceptionTests |
python | google__pytype | pytype/pyi/parser.py | {
"start": 8871,
"end": 9825
} | class ____(visitor.BaseVisitor):
"""Converts typing.Annotated metadata."""
def visit_Call(self, node):
posargs = tuple(evaluator.literal_eval(x) for x in node.args)
kwargs = {x.arg: evaluator.literal_eval(x.value) for x in node.keywords}
if isinstance(node.func, astlib.Attribute):
func_name = _attribute_to_name(node.func)
else:
func_name = node.func
return (func_name.id, posargs, kwargs)
def visit_Dict(self, node):
return evaluator.literal_eval(node)
def _flatten_splices(body: list[Any]) -> list[Any]:
"""Flatten a list with nested Splices."""
if not any(isinstance(x, Splice) for x in body):
return body
out = []
for x in body:
if isinstance(x, Splice):
# This technically needn't be recursive because of how we build Splices
# but better not to have the class assume that.
out.extend(_flatten_splices(x.body))
else:
out.append(x)
return out
| _MetadataVisitor |
python | apache__airflow | providers/slack/tests/unit/slack/operators/test_slack.py | {
"start": 1164,
"end": 2542
} | class ____:
@mock.patch("airflow.providers.slack.operators.slack.SlackHook")
@pytest.mark.parametrize(
("slack_op_kwargs", "hook_extra_kwargs"),
[
pytest.param({}, DEFAULT_HOOKS_PARAMETERS, id="default-hook-parameters"),
pytest.param(
{
"base_url": "https://foo.bar",
"timeout": 42,
"proxy": "http://spam.egg",
"retry_handlers": [],
},
{
"base_url": "https://foo.bar",
"timeout": 42,
"proxy": "http://spam.egg",
"retry_handlers": [],
},
id="with-extra-hook-parameters",
),
],
)
def test_hook(self, mock_slack_hook_cls, slack_op_kwargs, hook_extra_kwargs):
mock_slack_hook = mock_slack_hook_cls.return_value
op = SlackAPIOperator(
task_id="test-mask-token",
slack_conn_id=SLACK_API_TEST_CONNECTION_ID,
method="foo.Bar",
**slack_op_kwargs,
)
hook = op.hook
assert hook == mock_slack_hook
assert hook is op.hook
mock_slack_hook_cls.assert_called_once_with(
slack_conn_id=SLACK_API_TEST_CONNECTION_ID, **hook_extra_kwargs
)
| TestSlackAPIOperator |
python | getsentry__sentry | tests/sentry/receivers/test_superuser.py | {
"start": 242,
"end": 4033
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.superuser = self.create_user(is_superuser=True)
self.non_superuser = self.create_user(is_superuser=False)
self.superuser_request = self.make_request(user=self.superuser)
self.non_superuser_request = self.make_request(user=self.non_superuser)
def test_enable_superuser_when_self_hosted__superuser(self) -> None:
with self.settings(
SENTRY_SELF_HOSTED=True, VALIDATE_SUPERUSER_ACCESS_CATEGORY_AND_REASON=False
):
enable_superuser(request=self.superuser_request, user=self.superuser)
assert is_active_superuser(self.superuser_request)
def test_enable_superuser_when_flag_on__superuser(self) -> None:
with self.settings(
SENTRY_SELF_HOSTED=False,
VALIDATE_SUPERUSER_ACCESS_CATEGORY_AND_REASON=False,
ENABLE_SU_UPON_LOGIN_FOR_LOCAL_DEV=True,
):
enable_superuser(request=self.superuser_request, user=self.superuser)
assert is_active_superuser(self.superuser_request)
def test_enable_superuser_saas__superuser(self) -> None:
with self.settings(
SENTRY_SELF_HOSTED=False,
):
enable_superuser(request=self.superuser_request, user=self.superuser)
assert not is_active_superuser(self.superuser_request)
def test_enable_superuser_when_self_hosted_non__superuser(self) -> None:
with self.settings(
SENTRY_SELF_HOSTED=True, VALIDATE_SUPERUSER_ACCESS_CATEGORY_AND_REASON=False
):
enable_superuser(request=self.non_superuser_request, user=self.non_superuser)
assert not is_active_superuser(self.non_superuser_request)
def test_enable_superuser_when_flag_on_non__superuser(self) -> None:
with self.settings(
SENTRY_SELF_HOSTED=False,
VALIDATE_SUPERUSER_ACCESS_CATEGORY_AND_REASON=False,
ENABLE_SU_UPON_LOGIN_FOR_LOCAL_DEV=True,
):
enable_superuser(request=self.non_superuser_request, user=self.non_superuser)
assert not is_active_superuser(self.non_superuser_request)
def test_enable_superuser_when_session_has_prefill_key_superuser(self) -> None:
self.superuser_request.session[PREFILLED_SU_MODAL_KEY] = {
"superuserAccessCategory": "for_unit_test",
"superuserReason": "Edit organization settings",
"isSuperuserModal": True,
}
enable_superuser(request=self.superuser_request, user=self.superuser)
assert is_active_superuser(self.superuser_request)
def test_enable_superuser_when_session_has_prefill_key_non_superuser(self) -> None:
self.superuser_request.session[PREFILLED_SU_MODAL_KEY] = {
"superuserAccessCategory": "for_unit_test",
"superuserReason": "Edit organization settings",
"isSuperuserModal": True,
}
enable_superuser(request=self.non_superuser_request, user=self.non_superuser)
assert not is_active_superuser(self.non_superuser_request)
def test_enable_superuser_saas_non__superuser(self) -> None:
with self.settings(
SENTRY_SELF_HOSTED=False,
):
enable_superuser(request=self.non_superuser_request, user=self.non_superuser)
assert not is_active_superuser(self.superuser_request)
def test_disable_superuser_active__superuser(self) -> None:
enable_superuser(request=self.superuser_request, user=self.superuser)
assert is_active_superuser(self.superuser_request)
disable_superuser(request=self.superuser_request, user=self.superuser)
assert not is_active_superuser(self.superuser_request)
| SuperuserReceiverTest |
python | spack__spack | lib/spack/spack/vendor/jsonschema/exceptions.py | {
"start": 378,
"end": 3566
} | class ____(Exception):
def __init__(
self,
message,
validator=_unset,
path=(),
cause=None,
context=(),
validator_value=_unset,
instance=_unset,
schema=_unset,
schema_path=(),
parent=None,
):
super(_Error, self).__init__(
message,
validator,
path,
cause,
context,
validator_value,
instance,
schema,
schema_path,
parent,
)
self.message = message
self.path = self.relative_path = deque(path)
self.schema_path = self.relative_schema_path = deque(schema_path)
self.context = list(context)
self.cause = self.__cause__ = cause
self.validator = validator
self.validator_value = validator_value
self.instance = instance
self.schema = schema
self.parent = parent
for error in context:
error.parent = self
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.message)
def __unicode__(self):
essential_for_verbose = (
self.validator, self.validator_value, self.instance, self.schema,
)
if any(m is _unset for m in essential_for_verbose):
return self.message
pschema = pprint.pformat(self.schema, width=72)
pinstance = pprint.pformat(self.instance, width=72)
return self.message + textwrap.dedent("""
Failed validating %r in %s%s:
%s
On %s%s:
%s
""".rstrip()
) % (
self.validator,
self._word_for_schema_in_error_message,
_utils.format_as_index(list(self.relative_schema_path)[:-1]),
_utils.indent(pschema),
self._word_for_instance_in_error_message,
_utils.format_as_index(self.relative_path),
_utils.indent(pinstance),
)
if PY3:
__str__ = __unicode__
else:
def __str__(self):
return unicode(self).encode("utf-8")
@classmethod
def create_from(cls, other):
return cls(**other._contents())
@property
def absolute_path(self):
parent = self.parent
if parent is None:
return self.relative_path
path = deque(self.relative_path)
path.extendleft(reversed(parent.absolute_path))
return path
@property
def absolute_schema_path(self):
parent = self.parent
if parent is None:
return self.relative_schema_path
path = deque(self.relative_schema_path)
path.extendleft(reversed(parent.absolute_schema_path))
return path
def _set(self, **kwargs):
for k, v in iteritems(kwargs):
if getattr(self, k) is _unset:
setattr(self, k, v)
def _contents(self):
attrs = (
"message", "cause", "context", "validator", "validator_value",
"path", "schema_path", "instance", "schema", "parent",
)
return dict((attr, getattr(self, attr)) for attr in attrs)
| _Error |
python | openai__openai-python | src/openai/types/audio/transcription_create_params.py | {
"start": 630,
"end": 4662
} | class ____(TypedDict, total=False):
file: Required[FileTypes]
"""
The audio file object (not file name) to transcribe, in one of these formats:
flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
"""
model: Required[Union[str, AudioModel]]
"""ID of the model to use.
The options are `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, `whisper-1`
(which is powered by our open source Whisper V2 model), and
`gpt-4o-transcribe-diarize`.
"""
chunking_strategy: Optional[ChunkingStrategy]
"""Controls how the audio is cut into chunks.
When set to `"auto"`, the server first normalizes loudness and then uses voice
activity detection (VAD) to choose boundaries. `server_vad` object can be
provided to tweak VAD detection parameters manually. If unset, the audio is
transcribed as a single block. Required when using `gpt-4o-transcribe-diarize`
for inputs longer than 30 seconds.
"""
include: List[TranscriptionInclude]
"""
Additional information to include in the transcription response. `logprobs` will
return the log probabilities of the tokens in the response to understand the
model's confidence in the transcription. `logprobs` only works with
response_format set to `json` and only with the models `gpt-4o-transcribe` and
`gpt-4o-mini-transcribe`. This field is not supported when using
`gpt-4o-transcribe-diarize`.
"""
known_speaker_names: SequenceNotStr[str]
"""
Optional list of speaker names that correspond to the audio samples provided in
`known_speaker_references[]`. Each entry should be a short identifier (for
example `customer` or `agent`). Up to 4 speakers are supported.
"""
known_speaker_references: SequenceNotStr[str]
"""
Optional list of audio samples (as
[data URLs](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs))
that contain known speaker references matching `known_speaker_names[]`. Each
sample must be between 2 and 10 seconds, and can use any of the same input audio
formats supported by `file`.
"""
language: str
"""The language of the input audio.
Supplying the input language in
[ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
format will improve accuracy and latency.
"""
prompt: str
"""An optional text to guide the model's style or continue a previous audio
segment.
The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
should match the audio language. This field is not supported when using
`gpt-4o-transcribe-diarize`.
"""
response_format: AudioResponseFormat
"""
The format of the output, in one of these options: `json`, `text`, `srt`,
`verbose_json`, `vtt`, or `diarized_json`. For `gpt-4o-transcribe` and
`gpt-4o-mini-transcribe`, the only supported format is `json`. For
`gpt-4o-transcribe-diarize`, the supported formats are `json`, `text`, and
`diarized_json`, with `diarized_json` required to receive speaker annotations.
"""
temperature: float
"""The sampling temperature, between 0 and 1.
Higher values like 0.8 will make the output more random, while lower values like
0.2 will make it more focused and deterministic. If set to 0, the model will use
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
automatically increase the temperature until certain thresholds are hit.
"""
timestamp_granularities: List[Literal["word", "segment"]]
"""The timestamp granularities to populate for this transcription.
`response_format` must be set `verbose_json` to use timestamp granularities.
Either or both of these options are supported: `word`, or `segment`. Note: There
is no additional latency for segment timestamps, but generating word timestamps
incurs additional latency. This option is not available for
`gpt-4o-transcribe-diarize`.
"""
| TranscriptionCreateParamsBase |
python | huggingface__transformers | src/transformers/models/groupvit/modeling_groupvit.py | {
"start": 6240,
"end": 7759
} | class ____(nn.Module):
def __init__(self, config: GroupViTVisionConfig):
super().__init__()
self.scale = config.hidden_size**-0.5
self.q_proj = nn.Linear(config.hidden_size, config.hidden_size)
self.k_proj = nn.Linear(config.hidden_size, config.hidden_size)
self.v_proj = nn.Linear(config.hidden_size, config.hidden_size)
self.proj = nn.Linear(config.hidden_size, config.hidden_size)
self.assign_eps = config.assign_eps
def get_attn(self, attn, gumbel=True, hard=True):
if gumbel and self.training:
attn = gumbel_softmax(attn, dim=-2, hard=hard)
else:
if hard:
attn = hard_softmax(attn, dim=-2)
else:
attn = nn.functional.softmax(attn, dim=-2)
return attn
def forward(self, query, key):
value = key
# [batch_size, query_length, channels]
query = self.q_proj(query)
# [batch_size, key_length, channels]
key = self.k_proj(key)
# [batch_size, key_length, channels]
value = self.v_proj(value)
# [batch_size, query_length, key_length]
raw_attn = (query @ key.transpose(-2, -1)) * self.scale
attn = self.get_attn(raw_attn)
soft_attn = self.get_attn(raw_attn, gumbel=False, hard=False)
attn = attn / (attn.sum(dim=-1, keepdim=True) + self.assign_eps)
out = attn @ value
out = self.proj(out)
return out, soft_attn
| GroupViTAssignAttention |
python | pytorch__pytorch | torch/distributed/checkpoint/_fsspec_filesystem.py | {
"start": 5240,
"end": 5634
} | class ____(FileSystemReader):
def __init__(self, path: Union[str, os.PathLike], **kwargs) -> None:
super().__init__(path)
self.fs = FileSystem()
self.path = self.fs.init_path(path, **kwargs)
@classmethod
def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool:
return FileSystem.validate_checkpoint_id(checkpoint_id)
| FsspecReader |
python | tensorflow__tensorflow | tensorflow/python/eager/forwardprop_test.py | {
"start": 8509,
"end": 36600
} | class ____(test.TestCase, parameterized.TestCase):
def testJVPFunction(self):
add_outputs = (constant_op.constant(4.),)
vp, = forwardprop._jvp_dispatch(
op_name="Add",
attr_tuple=(),
inputs=(constant_op.constant(1.), constant_op.constant(3.)),
outputs=add_outputs,
tangents=(
constant_op.constant(1.),
constant_op.constant(5.),
))
self.assertAllClose(1. + 5., self.evaluate(vp))
mul_outputs = (constant_op.constant([20.]),)
vp, = forwardprop._jvp_dispatch(
op_name="Mul",
attr_tuple=(),
inputs=(constant_op.constant([4.]), constant_op.constant([5.])),
outputs=mul_outputs,
tangents=(
constant_op.constant([2.]),
constant_op.constant([3.]),
))
self.assertAllClose([2. * 5. + 3. * 4.], self.evaluate(vp))
def testJVPFunctionWithBatchOfTangents(self):
add_outputs = (constant_op.constant(4.),)
jvp_flat = forwardprop._jvp_dispatch(
op_name="Add",
attr_tuple=(),
inputs=(constant_op.constant(1.), constant_op.constant(3.)),
outputs=add_outputs,
tangents=(
constant_op.constant([1., 2., 3.]),
constant_op.constant([4., 5., 6.]),
),
use_batch=True)
# Using evaluate and asserting with just a list works too
# but the output is more explicit this way
self.assertAllClose([constant_op.constant([1. + 4., 2. + 5., 3. + 6.])],
jvp_flat)
mul_outputs = (constant_op.constant([20.]),)
jvp_flat = forwardprop._jvp_dispatch(
op_name="Mul",
attr_tuple=(),
inputs=(constant_op.constant([4.]), constant_op.constant([5.])),
outputs=mul_outputs,
tangents=(
constant_op.constant([[1.], [0.], [1.]]),
constant_op.constant([[0.], [1.], [1.]]),
),
use_batch=True)
self.assertAllClose([constant_op.constant([[5.], [4.], [5. + 4.]])],
jvp_flat)
def testJVPFunctionRaisesError(self):
sum_outputs = (constant_op.constant(6.),)
with self.assertRaisesRegex(ValueError, r".*was expected to be of shape*"):
forwardprop._jvp_dispatch(
op_name="Add",
attr_tuple=(),
inputs=(constant_op.constant(2.), constant_op.constant(4.)),
outputs=sum_outputs,
tangents=(constant_op.constant([1., 2.]),
constant_op.constant([[1.], [2.]])),
use_batch=True)
def testNonDifferentiableOpWithInputTangent(self):
x = constant_op.constant(1.)
with forwardprop.ForwardAccumulator(x, 2.) as acc1:
with forwardprop.ForwardAccumulator(x, 2.) as acc2:
y = array_ops.zeros_like(x)
self.assertIsNone(acc1.jvp(y))
self.assertIsNone(acc2.jvp(y))
def testRunFunctionsEagerly(self):
try:
original_setting = def_function.functions_run_eagerly()
def_function.run_functions_eagerly(True)
x = constant_op.constant(1.)
with forwardprop.ForwardAccumulator(x, 2.) as acc:
y = x * 3.
self.assertAllClose(6., acc.jvp(y))
finally:
def_function.run_functions_eagerly(original_setting)
def testJVPFunctionUsedByAccumulatorForOps(self):
previous_fn = forwardprop._jvp_dispatch
try:
x = constant_op.constant(1.)
with forwardprop.ForwardAccumulator(x, 2.) as acc:
y = x + x
pywrap_tfe.TFE_Py_RegisterJVPFunction(
lambda *args, **kwargs: [constant_op.constant(-15.)])
z = x + x
self.assertAllClose(4., acc.jvp(y))
self.assertAllClose(-15., acc.jvp(z))
finally:
pywrap_tfe.TFE_Py_RegisterJVPFunction(previous_fn)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testFunctionCacheLimited(self):
# Every time this loop is executed, it will create a slightly larger Tensor
# and push it through Add's gradient.
# We run TRACE_COUNT_LIMIT x 2 so that it is tested with both
# experimental_relax_shapes on and off.
for execution_count in range(forwardprop._TRACE_COUNT_LIMIT*2):
x = array_ops.zeros([execution_count])
with forwardprop.ForwardAccumulator(x, array_ops.ones_like(x)) as acc:
y = x + x
self.assertAllClose(2. * array_ops.ones_like(x), acc.jvp(y))
def testVariableUnwatchedZero(self):
v = variables.Variable([[1.]])
x = constant_op.constant(1.)
xt = constant_op.constant(2.)
with forwardprop.ForwardAccumulator(x, xt) as acc:
pass
self.assertIsNone(acc.jvp(v))
self.assertAllClose([[0.]], acc.jvp(v, unconnected_gradients="zero"))
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testFunctionReturnsResource(self):
v = variables.Variable([[1.]])
x = constant_op.constant(1.)
xt = constant_op.constant(2.)
@def_function.function
def f(a):
return a, v.handle
with forwardprop.ForwardAccumulator(x, xt) as acc:
y, _ = f(x)
self.assertAllClose(2., acc.jvp(y))
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testMultipleWatchesAdd(self):
x = constant_op.constant(-2.)
with self.assertRaisesRegex(ValueError, "multiple times"):
with forwardprop.ForwardAccumulator([x, x], [1., 2.]):
pass
with forwardprop.ForwardAccumulator([x], [3.]) as acc:
self.assertAllClose(3., acc.jvp(x))
acc._watch(x, constant_op.constant(10.))
self.assertAllClose(13., acc.jvp(x))
acc._watch(x, constant_op.constant(11.))
self.assertAllClose(24., acc.jvp(x))
y = constant_op.constant(3.) * x
self.assertAllClose(24., acc.jvp(x))
self.assertAllClose(24. * 3., acc.jvp(y))
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testReenter(self):
x = constant_op.constant(-2.)
with forwardprop.ForwardAccumulator(x, 1.5) as acc:
self.assertAllClose(1.5, acc.jvp(x))
y = 4. * x
self.assertAllClose(6., acc.jvp(y))
with self.assertRaisesRegex(ValueError, "already recording"):
with acc:
pass
z = 4. * x
self.assertIsNone(acc.jvp(z))
with acc:
yy = y * y
self.assertAllClose(6. * -8. * 2., acc.jvp(yy))
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testDeadTensorsJVPCleared(self):
x = array_ops.ones([100])
x_weak = weakref.ref(x)
grad_tensor = constant_op.constant(array_ops.zeros([100]))
grad_tensor_weak = weakref.ref(grad_tensor)
with forwardprop.ForwardAccumulator(x, grad_tensor) as acc:
derived_tensor = constant_op.constant(2.) * x
del grad_tensor
self.assertAllClose(array_ops.zeros([100]), acc.jvp(x))
del x
self.assertIsNone(x_weak())
self.assertIsNone(grad_tensor_weak())
derived_tensor_weak = weakref.ref(derived_tensor)
derived_tensor_grad = acc.jvp(derived_tensor)
derived_tensor_grad_weak = weakref.ref(derived_tensor_grad)
del derived_tensor
del derived_tensor_grad
self.assertIsNone(derived_tensor_weak())
self.assertIsNone(derived_tensor_grad_weak())
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testJVPManual(self):
primal, tangent = _jvp(math_ops.sin, (constant_op.constant(0.1),),
(constant_op.constant(0.2),))
self.assertAllClose(math_ops.sin(0.1), primal)
self.assertAllClose(math_ops.cos(0.1) * 0.2, tangent)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testNumericHigherOrder(self):
def f(x):
pointwise = math_ops.sin(x) * math_ops.tan(x)
return math_ops.reduce_prod(
pointwise + math_ops.reduce_sum(pointwise), axis=1)
_test_gradients(
self,
f,
[constant_op.constant([[2.0, 3.0], [1.0, 4.0]])],
order=3,
srtol=1e-6,
satol=1e-3,
)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testNumericHigherOrderFloat64(self):
def f(x):
pointwise = math_ops.sin(x) * math_ops.tan(x)
return math_ops.reduce_prod(
pointwise + math_ops.reduce_sum(pointwise), axis=1)
_test_gradients(
self,
f,
[constant_op.constant([[2.0, 3.0], [1.0, 4.0]], dtype=dtypes.float64)],
order=3)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testCustomGradient(self):
@custom_gradient.custom_gradient
def f(x):
def grad(dy):
return dy * math_ops.cos(x)
return np.sin(x.numpy()), grad
_test_gradients(self, f, [constant_op.constant([1., 2.])], order=3)
# TODO(allenl): investigate why assert_no_new_pyobjects_executing_eagerly()
# fails around this test?
def testExceptionCustomGradientRecomputeGradForward(self):
@custom_gradient.recompute_grad
def f(x):
return math_ops.reduce_prod(math_ops.tanh(x)**2)
with self.assertRaisesRegex(NotImplementedError,
"recompute_grad tried to transpose"):
primals = [constant_op.constant([1.])]
sym_jac_fwd = _jacfwd(f, primals)
def testExceptionInCustomGradientNotSwallowed(self):
@custom_gradient.custom_gradient
def f(unused_x):
def grad(unused_dy):
raise ValueError("test_error_string")
return 1., grad
c = constant_op.constant(1.)
d = constant_op.constant(2.)
with forwardprop.ForwardAccumulator(c, d):
with self.assertRaisesRegex(ValueError, "test_error_string"):
f(c)
@parameterized.named_parameters([("EluM5", -0.5, nn_ops.elu),
("EluP5", [0.5], nn_ops.elu),
("SwishP5", 0.5, nn_impl.swish),
("SwishM5", [-0.5], nn_impl.swish)])
def testElementwiseNNOps(self, value, op_fn):
_test_gradients(self, op_fn, [constant_op.constant(value)], order=3)
def testFusedBatchNormGradsInference(self):
x_shape = [4, 10, 10, 2]
increment = 3. / math_ops.reduce_prod(
constant_op.constant(x_shape, dtype=dtypes.float32))
x = array_ops.reshape(math_ops.range(-2., 1., increment), x_shape)
scale = constant_op.constant([1., 1.1])
offset = constant_op.constant([-0.5, -0.6])
mean = constant_op.constant([-1.3, 1.4])
variance = constant_op.constant([0.7, 0.9])
epsilon = 0.001
def _bn_fused(x_arg, scale_arg, offset_arg):
return nn_impl.fused_batch_norm(
x_arg,
scale_arg,
offset_arg,
mean,
variance,
epsilon=epsilon,
is_training=False)[0]
_test_gradients(self, _bn_fused, [x, scale, offset], order=2, atol=1e-2)
def testPushPopAccumulatorState(self):
# Note that this example is somewhat contrived. push_forwardprop_state is
# probably only useful in practice for building functions that compute jvps
# alongside their usual outputs.
c = constant_op.constant(1.)
d = constant_op.constant(2.)
with forwardprop.ForwardAccumulator(c, d) as acc:
@custom_gradient.custom_gradient
def f(x):
y = math_ops.sin(x.numpy())
def grad(dy):
with forwardprop_util.push_forwardprop_state():
x_copy = constant_op.constant(x.numpy())
acc._watch(x_copy, dy)
y_copy = math_ops.sin(x_copy)
return dy * acc.jvp(y_copy)
return y, grad
output = f(c)
self.assertAllClose(d * math_ops.cos(c), acc.jvp(output))
@parameterized.named_parameters([
("Order{}".format(order), order, expected)
for order, expected in enumerate(_X11_35_DERIVATIVES)
])
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testHigherOrderPureForward(self, order, expected):
def _forwardgrad(f):
def _compute_forwardgrad(primal):
tangent = constant_op.constant(1.)
with forwardprop.ForwardAccumulator(primal, tangent) as acc:
primal_out = f(primal)
return acc.jvp(primal_out)
return _compute_forwardgrad
def _forward(x):
return x**3.5
f = _forward
primal = constant_op.constant(1.1)
for _ in range(order):
f = _forwardgrad(f)
self.assertAllClose(expected, f(primal))
@parameterized.named_parameters([("Function", def_function.function),
("NoFunction", lambda f: f)])
def testGradPureForward(self, decorator):
@decorator
def f(x):
return x**3.5
primal = constant_op.constant(1.1)
with forwardprop.ForwardAccumulator(primal,
constant_op.constant(1.)) as outer_acc:
with forwardprop.ForwardAccumulator(primal,
constant_op.constant(1.)) as acc:
primal_out = f(primal)
inner_jvp = acc.jvp(primal_out)
outer_jvp = outer_acc.jvp(inner_jvp)
self.assertAllClose(1.1**3.5, primal_out)
self.assertAllClose(3.5 * 1.1**2.5, inner_jvp)
self.assertAllClose(3.5 * 2.5 * 1.1**1.5, outer_jvp)
self.assertIsNone(acc.jvp(outer_acc.jvp(primal_out)))
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testJVPPacking(self):
two = constant_op.constant(2.)
primal_in = constant_op.constant(1.)
inner_jvp = constant_op.constant(3.)
with forwardprop.ForwardAccumulator(
[primal_in, inner_jvp],
[constant_op.constant(2.),
constant_op.constant(4.)]) as outer_acc:
with forwardprop.ForwardAccumulator(primal_in, inner_jvp) as inner_acc:
packed_input_indices, packed_input_tangents = (
forwardprop_util.pack_tangents([primal_in]))
self.assertAllClose([3., 2., 4.], packed_input_tangents)
expected_indices = (
# inner_acc watches primal_in
(
(0, 1),),
# outer_acc watches primal_in and inner_jvp
((0, 2), (1, 3)))
self.assertAllEqual(expected_indices, packed_input_indices)
primal_out = primal_in * two
self.assertAllClose(6., inner_acc.jvp(primal_out))
self.assertAllClose(4., outer_acc.jvp(primal_out))
self.assertAllClose(8., outer_acc.jvp(inner_acc.jvp(primal_out)))
packed_output_indices, packed_output_tangents = (
forwardprop_util.pack_tangents([primal_out]))
self.assertAllClose([6., 4., 8.], packed_output_tangents)
self.assertAllEqual(expected_indices, packed_output_indices)
def testFunctionGradInFunctionPureForward(self):
@def_function.function
def take_gradients():
@def_function.function
def f(x):
return x**3.5
primal = constant_op.constant(1.1)
with forwardprop.ForwardAccumulator(
primal, constant_op.constant(1.)) as outer_acc:
with forwardprop.ForwardAccumulator(primal,
constant_op.constant(1.)) as acc:
primal_out = f(primal)
inner_jvp = acc.jvp(primal_out)
outer_jvp = outer_acc.jvp(inner_jvp)
self.assertIsNone(acc.jvp(outer_acc.jvp(primal_out)))
return primal_out, inner_jvp, outer_jvp
primal_out, inner_jvp, outer_jvp = take_gradients()
self.assertAllClose(1.1**3.5, primal_out)
self.assertAllClose(3.5 * 1.1**2.5, inner_jvp)
self.assertAllClose(3.5 * 2.5 * 1.1**1.5, outer_jvp)
def testFunctionGrad(self):
@def_function.function
def f(x):
return math_ops.reduce_prod(math_ops.tanh(x)**2)
_test_gradients(self, f, [constant_op.constant([1., 2.])], order=3)
def testReusingJVP(self):
m1 = random_ops.random_uniform((256, 2096))
m2 = array_ops.identity(m1)
tangent1 = random_ops.random_uniform((256, 2096))
tangent2 = random_ops.random_uniform((256, 2096))
matmul = def_function.function(math_ops.matmul)
with forwardprop.ForwardAccumulator(
primals=[m1, m2], tangents=[tangent1, tangent2]) as acc:
result1 = matmul(m1, m1, transpose_b=True)
result2 = matmul(m2, m2, transpose_b=True)
def _expected(mat, tangent):
return (math_ops.matmul(tangent, mat, transpose_b=True) +
math_ops.matmul(mat, tangent, transpose_b=True))
self.assertAllClose(result1, result2)
self.assertAllClose(_expected(m1, tangent1), acc.jvp(result1))
self.assertAllClose(_expected(m2, tangent2), acc.jvp(result2))
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testHVPMemory(self):
def fun(x):
return math_ops.reduce_prod(math_ops.tanh(x)**2)
primals = constant_op.constant([1., 2., 3.])
tangents = constant_op.constant([3., 4., 5.])
_hvp(fun, (primals,), (tangents,))
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testHVPCorrectness(self):
def fun(x):
return math_ops.reduce_prod(math_ops.tanh(x)**2)
primals = constant_op.constant([1., 2., 3.])
tangents = constant_op.constant([3., 4., 5.])
forwardback_hvp_eager, = _hvp(fun, (primals,), (tangents,))
forwardback_hvp_function, = def_function.function(_hvp)(fun, (primals,),
(tangents,))
with backprop.GradientTape(persistent=True) as g:
g.watch(primals)
with backprop.GradientTape() as gg:
gg.watch(primals)
out = fun(primals)
grad = array_ops_stack.unstack(gg.gradient(out, primals))
hessian = []
for i in range(3):
hessian.append(g.gradient(grad[i], primals))
hessian = array_ops_stack.stack(hessian, axis=0)
backback_hvp = math_ops.tensordot(hessian, tangents, axes=1)
self.assertAllClose(backback_hvp, forwardback_hvp_eager)
self.assertAllClose(backback_hvp, forwardback_hvp_function)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testShouldRecordAndStopRecord(self):
c = constant_op.constant(1.)
c_tangent = constant_op.constant(2.)
with forwardprop.ForwardAccumulator(c, c_tangent) as acc:
with backprop.GradientTape() as tape:
self.assertFalse(record.should_record_backprop([c]))
self.assertEqual(1, pywrap_tfe.TFE_Py_TapeSetPossibleGradientTypes([c]))
tape.watch(c)
self.assertEqual(2, pywrap_tfe.TFE_Py_TapeSetPossibleGradientTypes([c]))
self.assertTrue(record.should_record_backprop([c]))
with record.stop_recording():
self.assertEqual(0,
pywrap_tfe.TFE_Py_TapeSetPossibleGradientTypes([c]))
self.assertFalse(record.should_record_backprop([c]))
d = c * 2.
self.assertEqual(2, pywrap_tfe.TFE_Py_TapeSetPossibleGradientTypes([c]))
self.assertTrue(record.should_record_backprop([c]))
self.assertFalse(record.should_record_backprop([d]))
self.assertIsNone(acc.jvp(d))
self.assertIsNone(tape.gradient(d, c))
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testRecordingSelectively(self):
c = constant_op.constant(1.)
c_tangent = constant_op.constant(2.)
with forwardprop.ForwardAccumulator(c, c_tangent) as acc:
with backprop.GradientTape(persistent=True) as tape:
tape.watch(c)
with record.stop_recording():
two = constant_op.constant(2.)
d = c * two
three = constant_op.constant(3.)
e = c * three
self.assertIsNone(acc.jvp(d))
self.assertIsNone(acc.jvp(e))
self.assertIsNone(tape.gradient(d, c))
self.assertIsNone(tape.gradient(e, c))
record.record_operation_forwardprop_only(
"CustomForwardMul", [d], [c, two], lambda dd: (two * dd, c * dd),
None)
record.record_operation_backprop_only("CustomBackwardMul", [e],
[c, three], lambda de:
(three * de, c * de))
self.assertAllClose(4., acc.jvp(d))
self.assertIsNone(acc.jvp(e))
self.assertIsNone(tape.gradient(d, c))
self.assertAllClose(3., tape.gradient(e, c))
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testOpWithNoTrainableOutputs(self):
v = variables.Variable(1.)
with forwardprop.ForwardAccumulator(v, 11.):
v.assign_sub(0.5)
self.assertAllClose(0.5, self.evaluate(v))
# TODO(b/141025187): Add a no_new_pyobjects decorator.
def testVariableReadInFunction(self):
v = variables.Variable(1.)
with forwardprop.ForwardAccumulator(v, 11.) as acc:
@def_function.function
def f():
return v.read_value(), 2. * v.read_value()
result = f()
self.assertAllClose((1.0, 2.), result)
self.assertAllClose((11., 22.), acc.jvp(result))
@parameterized.named_parameters([("ForwardPropFirst", True),
("TapeFirst", False)])
def testForwardOverBackwardMemoryEfficiency(self, forward_prop_first):
# Watching depends on nesting, not creation order
c = constant_op.constant(1.)
if forward_prop_first:
forward_accumulator = forwardprop.ForwardAccumulator(c, .1)
gradient_tape = backprop.GradientTape()
else:
gradient_tape = backprop.GradientTape()
forward_accumulator = forwardprop.ForwardAccumulator(c, .1)
try:
gc.disable()
with gradient_tape as tape:
# Adding and removing the tape multiple times in different nesting
# patterns does not affect watch ordering.
pass
with forward_accumulator as acc:
with gradient_tape as tape:
tape.watch(c)
d = math_ops.cos(c)
self.assertFalse(record.should_record_backprop((acc.jvp(d),)))
e = math_ops.cos(acc.jvp(d))
math_ops.cos(e)
weak_e = weakref.ref(e)
del e
self.assertIsNone(weak_e())
self.assertIsNone(tape.gradient(acc.jvp(d), c))
finally:
gc.enable()
@parameterized.named_parameters([("ForwardPropFirst", True),
("TapeFirst", False)])
def testBackwardOverForward(self, forward_prop_first):
c = constant_op.constant(1.)
# Watching depends on nesting, not creation order
if forward_prop_first:
forward_accumulator = forwardprop.ForwardAccumulator(c, .1)
gradient_tape = backprop.GradientTape()
else:
gradient_tape = backprop.GradientTape()
forward_accumulator = forwardprop.ForwardAccumulator(c, .1)
with gradient_tape as tape:
with forward_accumulator as acc:
tape.watch(c)
d = math_ops.cos(c)
self.assertTrue(record.should_record_backprop((acc.jvp(d),)))
self.assertAllClose(-.1 * math_ops.cos(1.), tape.gradient(acc.jvp(d), c))
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testRecordingWithJVPIndices(self):
c = constant_op.constant(1.)
with forwardprop.ForwardAccumulator(c, 10.) as acc:
packed_input_tangents = forwardprop_util.pack_tangents([c]).tangents
self.assertAllClose([10.], packed_input_tangents)
d = constant_op.constant(2.)
d_tangent = constant_op.constant(3.)
record.record_operation_forwardprop_only("FunctionWithInlineJVPs",
[d] + [d_tangent],
[c] + packed_input_tangents,
None, (((0, 1),),))
self.assertAllClose(3., acc.jvp(d))
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testSpecialForwardFunctionUsed(self):
c = constant_op.constant(1.)
d = constant_op.constant(2.)
e = constant_op.constant(3.)
with forwardprop.ForwardAccumulator(c, 10.) as acc:
record.record_operation("ForwardIsSpecial", [d], [c], None,
lambda jvp: [-2. * jvp])
self.assertAllClose(-20., acc.jvp(d))
record.record_operation("ForwardIsSpecial2", [], [], None, lambda: [])
record.record_operation("ForwardIsSpecial3", [e], [d], None,
lambda x: [x])
self.assertAllClose(-20., acc.jvp(e))
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testVariableWatched(self):
v = variables.Variable([1., 2., 3.])
with forwardprop.ForwardAccumulator(v, constant_op.constant([.1, -.2,
.3])) as acc:
self.assertAllClose([.1, -.2, .3], acc.jvp(v))
x = v * 2.
self.assertAllClose([.2, -.4, .6], acc.jvp(x))
x2 = v + .1
self.assertAllClose([.1, -.2, .3], acc.jvp(x2))
def testUnconnectedGradients(self):
x = constant_op.constant(-1.)
with forwardprop.ForwardAccumulator(x, 0.1) as acc:
self.assertAllClose(0.1, acc.jvp(x, unconnected_gradients="zero"))
self.assertAllClose(0.1, acc.jvp(x, unconnected_gradients="none"))
y = constant_op.constant(-2.)
self.assertAllClose(0.0, acc.jvp(y, unconnected_gradients="zero"))
self.assertIsNone(acc.jvp(y, unconnected_gradients="none"))
# TODO(kkb): One weakref instance is created with warmup_iters=2,
# investigate.
@test_util.assert_no_new_pyobjects_executing_eagerly(warmup_iters=3)
def testVariableWatchedFunction(self):
class _Model(module.Module):
def __init__(self):
self._v = None
@def_function.function
def compute_jvps(self):
if self._v is None:
self._v = variables.Variable([1., 2., 3.])
with forwardprop.ForwardAccumulator(self._v,
constant_op.constant([.1, -.2,
.3])) as acc:
x = self._v * 2.
x2 = self._v + .1
return acc.jvp((self._v, x, x2))
model = _Model()
v_jvp, x_jvp, x2_jvp = model.compute_jvps()
self.assertAllClose([.1, -.2, .3], v_jvp)
self.assertAllClose([.2, -.4, .6], x_jvp)
self.assertAllClose([.1, -.2, .3], x2_jvp)
def testIndexSlicesGrad(self):
x = constant_op.constant([1.])
with forwardprop.ForwardAccumulator(x, constant_op.constant([3.])) as acc:
y = array_ops.gather(x, 0)
self.assertAllClose(3., acc.jvp(y))
def testIndexSlicesGradInFunction(self):
@def_function.function
def f(a):
return array_ops.gather(a, 0)
x = constant_op.constant([1.])
with forwardprop.ForwardAccumulator(x, constant_op.constant([3.])) as acc:
y = f(x)
self.assertAllClose(3., acc.jvp(y))
# NOTE: assert_no_new_pyobjects_executing_eagerly fails flakily on this
# test... could be something wrong with the test decorator, or some sort of
# nondeterministic caching.
def testMirroredVariableWatched(self):
def _replicated(input_tangent):
with forwardprop.ForwardAccumulator(v, input_tangent) as acc:
self.assertAllClose([.1, -.2, .3], acc.jvp(v))
x = v * 2.
self.assertAllClose([.2, -.4, .6], acc.jvp(x))
x2 = v + .1
self.assertAllClose([.1, -.2, .3], acc.jvp(x2))
strategy = mirrored_strategy.MirroredStrategy()
with strategy.scope():
v = variables.Variable([1., 2., 3.])
strategy.run(_replicated, args=(constant_op.constant([.1, -.2, .3]),))
# TODO(b/141025187): Add a no_new_pyobjects decorator.
def testArgumentUnused(self):
v = constant_op.constant(1.)
with forwardprop.ForwardAccumulator(v, 11.) as acc:
@def_function.function
def _f(x):
del x
return constant_op.constant(1.)
result = _f(v)
self.assertAllClose(1.0, result)
self.assertIsNone(acc.jvp(result))
@def_function.function
def _has_loop(iters, y):
ret = 0.
for i in math_ops.range(iters):
ret += y * math_ops.cast(i, dtypes.float32)
return ret
@def_function.function
def _has_cond(k, y):
if k > 1:
ret = 3. * y
else:
ret = 0.
return ret
@def_function.function
def _fprop_while(iters, y):
with forwardprop.ForwardAccumulator(y, 1.) as acc:
ret = 0.
for i in math_ops.range(iters):
ret += y * math_ops.cast(i, dtypes.float32)
return acc.jvp(ret)
@def_function.function
def _fprop_cond(k, y):
with forwardprop.ForwardAccumulator(y, 1.) as acc:
if k > 1:
ret = 3. * y
else:
ret = 0.
return acc.jvp(ret)
| ForwardpropTest |
python | apache__airflow | task-sdk/tests/task_sdk/definitions/decorators/test_setup_teardown.py | {
"start": 1835,
"end": 54185
} | class ____:
def test_marking_functions_as_setup_task(self):
@setup
def mytask():
print("I am a setup task")
with DAG("test_marking_functions_as_setup_task") as dag:
mytask()
assert len(dag.task_group.children) == 1
setup_task = dag.task_group.children["mytask"]
assert setup_task.is_setup
def test_marking_functions_as_teardown_task(self):
@teardown
def mytask():
print("I am a teardown task")
with DAG("test_marking_functions_as_teardown_task") as dag:
mytask()
assert len(dag.task_group.children) == 1
teardown_task = dag.task_group.children["mytask"]
assert teardown_task.is_teardown
def test_marking_decorated_functions_as_setup_task(self):
@setup
@task
def mytask():
print("I am a setup task")
with DAG("test_marking_decorated_functions_as_setup_task") as dag:
mytask()
assert len(dag.task_group.children) == 1
setup_task = dag.task_group.children["mytask"]
assert setup_task.is_setup
def test_marking_operator_as_setup_task(self):
with DAG("test_marking_operator_as_setup_task") as dag:
BashOperator(task_id="mytask", bash_command='echo "I am a setup task"').as_setup()
assert len(dag.task_group.children) == 1
setup_task = dag.task_group.children["mytask"]
assert setup_task.is_setup
def test_marking_decorated_functions_as_teardown_task(self):
@teardown
@task
def mytask():
print("I am a teardown task")
with DAG("test_marking_decorated_functions_as_teardown_task") as dag:
mytask()
assert len(dag.task_group.children) == 1
teardown_task = dag.task_group.children["mytask"]
assert teardown_task.is_teardown
def test_marking_operator_as_teardown_task(self):
with DAG("test_marking_operator_as_teardown_task") as dag:
BashOperator(task_id="mytask", bash_command='echo "I am a setup task"').as_teardown()
assert len(dag.task_group.children) == 1
teardown_task = dag.task_group.children["mytask"]
assert teardown_task.is_teardown
def test_setup_taskgroup_decorator(self):
with DAG("test_setup_taskgroup_decorator"):
with pytest.raises( # noqa: PT012, check decorators required more than one line
expected_exception=AirflowException,
match="Task groups cannot be marked as setup or teardown.",
):
@setup
@task_group
def mygroup():
@task
def mytask():
print("I am a setup task")
mytask()
mygroup()
def test_teardown_taskgroup_decorator(self):
with DAG("test_teardown_taskgroup_decorator"):
with pytest.raises( # noqa: PT012, check decorators required more than one line
expected_exception=AirflowException,
match="Task groups cannot be marked as setup or teardown.",
):
@teardown
@task_group
def mygroup():
@task
def mytask():
print("I am a teardown task")
mytask()
mygroup()
@pytest.mark.parametrize("on_failure_fail_dagrun", [True, False])
def test_teardown_task_decorators_works_with_on_failure_fail_dagrun(self, on_failure_fail_dagrun):
@teardown(on_failure_fail_dagrun=on_failure_fail_dagrun)
def mytask():
print("I am a teardown task")
with DAG("test_teardown_task_decorators_works_with_on_failure_fail_dagrun") as dag:
mytask()
teardown_task = dag.task_group.children["mytask"]
assert teardown_task.is_teardown
assert teardown_task.on_failure_fail_dagrun is on_failure_fail_dagrun
assert len(dag.task_group.children) == 1
@pytest.mark.parametrize("on_failure_fail_dagrun", [True, False])
def test_classic_teardown_task_works_with_on_failure_fail_dagrun(self, on_failure_fail_dagrun):
with DAG("test_classic_teardown_task_works_with_on_failure_fail_dagrun") as dag:
BashOperator(
task_id="mytask",
bash_command='echo "I am a teardown task"',
).as_teardown(on_failure_fail_dagrun=on_failure_fail_dagrun)
teardown_task = dag.task_group.children["mytask"]
assert teardown_task.is_teardown
assert teardown_task.on_failure_fail_dagrun is on_failure_fail_dagrun
assert len(dag.task_group.children) == 1
def test_setup_task_can_be_overridden(self):
@setup
def mytask():
print("I am a setup task")
with DAG("test_setup_task_can_be_overridden") as dag:
mytask.override(task_id="mytask2")()
assert len(dag.task_group.children) == 1
setup_task = dag.task_group.children["mytask2"]
assert setup_task.is_setup
def test_teardown_on_failure_fail_dagrun_can_be_overridden(self):
@teardown
def mytask():
print("I am a teardown task")
with DAG("test_teardown_on_failure_fail_dagrun_can_be_overridden") as dag:
mytask.override(on_failure_fail_dagrun=True)()
assert len(dag.task_group.children) == 1
teardown_task = dag.task_group.children["mytask"]
assert teardown_task.is_teardown
assert teardown_task.on_failure_fail_dagrun
def test_retain_on_failure_fail_dagrun_when_other_attrs_are_overridden(self):
@teardown(on_failure_fail_dagrun=True)
def mytask():
print("I am a teardown task")
with DAG("test_retain_on_failure_fail_dagrun_when_other_attrs_are_overridden") as dag:
mytask.override(task_id="mytask2")()
assert len(dag.task_group.children) == 1
teardown_task = dag.task_group.children["mytask2"]
assert teardown_task.is_teardown
assert teardown_task.on_failure_fail_dagrun
def test_setup_teardown_mixed_up_in_a_dag(self):
@setup
def setuptask():
print("setup")
@setup
def setuptask2():
print("setup")
@teardown
def teardowntask():
print("teardown")
@teardown
def teardowntask2():
print("teardown")
@task()
def mytask():
print("mytask")
@task()
def mytask2():
print("mytask")
with DAG("test_setup_teardown_mixed_up_in_a_dag") as dag:
setuptask()
teardowntask()
setuptask2()
teardowntask2()
mytask()
mytask2()
assert len(dag.task_group.children) == 6
assert sum(1 for x in dag.tasks if not x.downstream_list) == 6
assert dag.task_group.children["setuptask"].is_setup
assert dag.task_group.children["teardowntask"].is_teardown
assert dag.task_group.children["setuptask2"].is_setup
assert dag.task_group.children["teardowntask2"].is_teardown
assert dag.task_group.children["mytask"].is_setup is False
assert dag.task_group.children["mytask"].is_teardown is False
assert dag.task_group.children["mytask2"].is_setup is False
assert dag.task_group.children["mytask2"].is_teardown is False
def test_setup_teardown_as_context_manager_normal_tasks_rel_set_downstream(self):
"""
Test that setup >> teardown tasks are set up correctly when used as context managers
and the normal tasks are set up with >> relations.
"""
@setup
def setuptask():
print("setup")
@setup
def setuptask2():
print("setup")
@teardown
def teardowntask():
print("teardown")
@teardown
def teardowntask2():
print("teardown")
@task()
def mytask():
print("mytask")
@task()
def mytask2():
print("mytask")
with DAG("test_setup_teardown_as_context_manager_normal_tasks_rel_set_downstream") as dag:
with setuptask() >> teardowntask():
with setuptask2() >> teardowntask2():
mytask() >> mytask2()
assert len(dag.task_group.children) == 6
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert dag.task_group.children["setuptask"].downstream_task_ids == {"teardowntask", "setuptask2"}
assert dag.task_group.children["setuptask2"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["setuptask2"].downstream_task_ids == {"teardowntask2", "mytask"}
assert dag.task_group.children["mytask"].upstream_task_ids == {"setuptask2"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"mytask2"}
assert dag.task_group.children["mytask2"].upstream_task_ids == {"mytask"}
assert dag.task_group.children["mytask2"].downstream_task_ids == {"teardowntask2"}
assert dag.task_group.children["teardowntask"].upstream_task_ids == {"setuptask", "teardowntask2"}
assert not dag.task_group.children["teardowntask"].downstream_task_ids
assert dag.task_group.children["teardowntask2"].upstream_task_ids == {"setuptask2", "mytask2"}
assert dag.task_group.children["teardowntask2"].downstream_task_ids == {"teardowntask"}
def test_setup_teardown_as_context_manager_normal_tasks_rel_set_upstream(self):
"""
Test that setup >> teardown tasks are set up correctly when used as context managers
and the normal tasks are set up with << relations.
"""
@setup
def setuptask():
print("setup")
@setup
def setuptask2():
print("setup")
@teardown
def teardowntask():
print("teardown")
@teardown
def teardowntask2():
print("teardown")
@task()
def mytask():
print("mytask")
@task()
def mytask2():
print("mytask")
with DAG("test_setup_teardown_as_context_manager_normal_tasks_rel_set_upstream") as dag:
with setuptask() >> teardowntask():
with setuptask2() >> teardowntask2():
mytask() << mytask2()
assert len(dag.task_group.children) == 6
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert dag.task_group.children["setuptask"].downstream_task_ids == {"teardowntask", "setuptask2"}
assert dag.task_group.children["setuptask2"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["setuptask2"].downstream_task_ids == {"teardowntask2", "mytask2"}
assert dag.task_group.children["mytask"].upstream_task_ids == {"mytask2"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"teardowntask2"}
assert dag.task_group.children["mytask2"].upstream_task_ids == {"setuptask2"}
assert dag.task_group.children["mytask2"].downstream_task_ids == {"mytask"}
assert dag.task_group.children["teardowntask"].upstream_task_ids == {"setuptask", "teardowntask2"}
assert not dag.task_group.children["teardowntask"].downstream_task_ids
assert dag.task_group.children["teardowntask2"].upstream_task_ids == {"setuptask2", "mytask"}
assert dag.task_group.children["teardowntask2"].downstream_task_ids == {"teardowntask"}
def test_normal_task_raises_when_used_as_context_managers(self):
@task()
def mytask():
print("mytask")
with DAG("test_setup_teardown_as_context_manager_normal_tasks_rel_set_up"):
with pytest.raises(
AirflowException, match="Only setup/teardown tasks can be used as context managers."
):
with mytask():
pass
def test_only_setup(self):
@setup
def setuptask():
print("setup")
@task()
def mytask():
print("mytask")
@task()
def mytask2():
print("mytask")
with DAG("test_only_setup") as dag:
with setuptask():
mytask() >> mytask2()
assert len(dag.task_group.children) == 3
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert dag.task_group.children["setuptask"].downstream_task_ids == {"mytask"}
assert dag.task_group.children["mytask"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"mytask2"}
assert dag.task_group.children["mytask2"].upstream_task_ids == {"mytask"}
assert not dag.task_group.children["mytask2"].downstream_task_ids
def test_only_teardown(self):
@teardown
def teardowntask():
print("teardown")
@task()
def mytask():
print("mytask")
@task()
def mytask2():
print("mytask")
with DAG("test_only_teardown") as dag:
with teardowntask():
mytask() >> mytask2()
assert len(dag.task_group.children) == 3
assert not dag.task_group.children["mytask"].upstream_task_ids
assert dag.task_group.children["mytask"].downstream_task_ids == {"mytask2"}
assert dag.task_group.children["mytask2"].upstream_task_ids == {"mytask"}
assert dag.task_group.children["mytask2"].downstream_task_ids == {"teardowntask"}
assert dag.task_group.children["teardowntask"].upstream_task_ids == {"mytask2"}
assert not dag.task_group.children["teardowntask"].downstream_task_ids
def test_nested_only_setup(self):
@setup
def setuptask():
print("setup")
@setup
def setuptask2():
print("setup")
@teardown
def teardowntask():
print("teardown")
@task()
def mytask():
print("mytask")
@task()
def mytask2():
print("mytask")
with DAG("test_nested_only_setup") as dag:
with setuptask() >> teardowntask():
with setuptask2():
mytask() << mytask2()
assert len(dag.task_group.children) == 5
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert dag.task_group.children["setuptask"].downstream_task_ids == {"teardowntask", "setuptask2"}
assert dag.task_group.children["setuptask2"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["setuptask2"].downstream_task_ids == {"mytask2"}
assert dag.task_group.children["mytask"].upstream_task_ids == {"mytask2"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"teardowntask"}
assert dag.task_group.children["mytask2"].upstream_task_ids == {"setuptask2"}
assert dag.task_group.children["mytask2"].downstream_task_ids == {"mytask"}
assert dag.task_group.children["teardowntask"].upstream_task_ids == {"setuptask", "mytask"}
assert not dag.task_group.children["teardowntask"].downstream_task_ids
def test_task_in_different_setup_context(self):
@setup
def setuptask():
print("setup")
@setup
def setuptask2():
print("setup")
@task()
def mytask():
print("mytask")
@task()
def mytask2():
print("mytask")
@task()
def mytask3():
print("mytask")
@task
def mytask4():
print("mytask")
with DAG("test_task_in_different_setup_context") as dag:
with setuptask():
t1 = mytask()
t2 = mytask2()
t1 >> t2
with setuptask2():
t3 = mytask3()
t4 = mytask4()
t2 >> t3 >> t4
assert len(dag.task_group.children) == 6
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert not dag.task_group.children["setuptask2"].upstream_task_ids
assert dag.task_group.children["setuptask"].downstream_task_ids == {"mytask"}
assert dag.task_group.children["setuptask2"].downstream_task_ids == {"mytask3"}
assert dag.task_group.children["mytask"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"mytask2"}
assert dag.task_group.children["mytask2"].upstream_task_ids == {"mytask"}
assert dag.task_group.children["mytask2"].downstream_task_ids == {"mytask3"}
assert dag.task_group.children["mytask3"].upstream_task_ids == {"mytask2", "setuptask2"}
assert dag.task_group.children["mytask3"].downstream_task_ids == {"mytask4"}
assert dag.task_group.children["mytask4"].upstream_task_ids == {"mytask3"}
assert not dag.task_group.children["mytask4"].downstream_task_ids
def test_task_in_different_setup_context_2(self):
@setup
def setuptask():
print("setup")
@setup
def setuptask2():
print("setup")
@task()
def mytask():
print("mytask")
@task()
def mytask2():
print("mytask")
@task()
def mytask3():
print("mytask")
@task
def mytask4():
print("mytask")
with DAG("test_task_in_different_setup_context_2") as dag:
with setuptask():
t1 = mytask()
t2 = mytask2()
t1 >> t2
with setuptask2():
t3 = mytask3()
t4 = mytask4()
t2 >> t3 >> t4
assert len(dag.task_group.children) == 6
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert dag.task_group.children["setuptask2"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["setuptask"].downstream_task_ids == {"mytask", "setuptask2"}
assert dag.task_group.children["setuptask2"].downstream_task_ids == {"mytask3"}
assert dag.task_group.children["mytask"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"mytask2"}
assert dag.task_group.children["mytask2"].upstream_task_ids == {"mytask"}
assert dag.task_group.children["mytask2"].downstream_task_ids == {"mytask3"}
assert dag.task_group.children["mytask3"].upstream_task_ids == {"mytask2", "setuptask2"}
assert dag.task_group.children["mytask3"].downstream_task_ids == {"mytask4"}
assert dag.task_group.children["mytask4"].upstream_task_ids == {"mytask3"}
assert not dag.task_group.children["mytask4"].downstream_task_ids
def test_setup_teardown_as_context_manager_with_work_task_rel_not_set(self):
"""
Test that setup >> teardown tasks are set up correctly when used as context managers
and the normal tasks are set up even if they don't have a relationship set
"""
@setup
def setuptask():
print("setup")
@setup
def setuptask2():
print("setup")
@teardown
def teardowntask():
print("teardown")
@teardown
def teardowntask2():
print("teardown")
@task()
def mytask():
print("mytask")
@task()
def mytask2():
print("mytask")
with DAG("test_setup_teardown_as_context_manager") as dag:
with setuptask() >> teardowntask():
with setuptask2() >> teardowntask2():
mytask()
mytask2()
assert len(dag.task_group.children) == 6
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert dag.task_group.children["setuptask"].downstream_task_ids == {"teardowntask", "setuptask2"}
assert dag.task_group.children["setuptask2"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["setuptask2"].downstream_task_ids == {
"teardowntask2",
"mytask",
"mytask2",
}
assert dag.task_group.children["mytask"].upstream_task_ids == {"setuptask2"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"teardowntask2"}
assert dag.task_group.children["mytask2"].upstream_task_ids == {"setuptask2"}
assert dag.task_group.children["mytask2"].downstream_task_ids == {"teardowntask2"}
assert dag.task_group.children["teardowntask"].upstream_task_ids == {"setuptask", "teardowntask2"}
assert not dag.task_group.children["teardowntask"].downstream_task_ids
assert dag.task_group.children["teardowntask2"].upstream_task_ids == {
"setuptask2",
"mytask",
"mytask2",
}
assert dag.task_group.children["teardowntask2"].downstream_task_ids == {"teardowntask"}
def test_classic_setup_teardown_as_context_manager_normal_tasks_rel_set_downstream(self):
"""
Test that setup >> teardown tasks are set up correctly when used as context managers
and the normal tasks are set up with >> relations.
"""
@task()
def mytask():
print("mytask")
@task()
def mytask2():
print("mytask")
with DAG("test_setup_teardown_as_context_manager") as dag:
setuptask = BashOperator(task_id="setuptask", bash_command="echo 1").as_setup()
setuptask2 = BashOperator(task_id="setuptask2", bash_command="echo 1").as_setup()
teardowntask = BashOperator(task_id="teardowntask", bash_command="echo 1").as_teardown()
teardowntask2 = BashOperator(task_id="teardowntask2", bash_command="echo 1").as_teardown()
with setuptask >> teardowntask:
with setuptask2 >> teardowntask2:
mytask() >> mytask2()
assert len(dag.task_group.children) == 6
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert dag.task_group.children["setuptask"].downstream_task_ids == {"teardowntask", "setuptask2"}
assert dag.task_group.children["setuptask2"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["setuptask2"].downstream_task_ids == {"teardowntask2", "mytask"}
assert dag.task_group.children["mytask"].upstream_task_ids == {"setuptask2"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"mytask2"}
assert dag.task_group.children["mytask2"].upstream_task_ids == {"mytask"}
assert dag.task_group.children["mytask2"].downstream_task_ids == {"teardowntask2"}
assert dag.task_group.children["teardowntask"].upstream_task_ids == {"setuptask", "teardowntask2"}
assert not dag.task_group.children["teardowntask"].downstream_task_ids
assert dag.task_group.children["teardowntask2"].upstream_task_ids == {"setuptask2", "mytask2"}
assert dag.task_group.children["teardowntask2"].downstream_task_ids == {"teardowntask"}
def test_classic_setup_teardown_as_context_manager_normal_tasks_rel_set_upstream(self):
"""
Test that setup >> teardown tasks are set up correctly when used as context managers
and the normal tasks are set up with << relations.
"""
@task()
def mytask():
print("mytask")
@task()
def mytask2():
print("mytask")
with DAG("test_setup_teardown_as_context_manager") as dag:
setuptask = BashOperator(task_id="setuptask", bash_command="echo 1").as_setup()
setuptask2 = BashOperator(task_id="setuptask2", bash_command="echo 1").as_setup()
teardowntask = BashOperator(task_id="teardowntask", bash_command="echo 1").as_teardown()
teardowntask2 = BashOperator(task_id="teardowntask2", bash_command="echo 1").as_teardown()
with setuptask >> teardowntask:
with setuptask2 >> teardowntask2:
mytask() << mytask2()
assert len(dag.task_group.children) == 6
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert dag.task_group.children["setuptask"].downstream_task_ids == {"teardowntask", "setuptask2"}
assert dag.task_group.children["setuptask2"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["setuptask2"].downstream_task_ids == {"teardowntask2", "mytask2"}
assert dag.task_group.children["mytask"].upstream_task_ids == {"mytask2"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"teardowntask2"}
assert dag.task_group.children["mytask2"].upstream_task_ids == {"setuptask2"}
assert dag.task_group.children["mytask2"].downstream_task_ids == {"mytask"}
assert dag.task_group.children["teardowntask"].upstream_task_ids == {"setuptask", "teardowntask2"}
assert not dag.task_group.children["teardowntask"].downstream_task_ids
assert dag.task_group.children["teardowntask2"].upstream_task_ids == {"setuptask2", "mytask"}
assert dag.task_group.children["teardowntask2"].downstream_task_ids == {"teardowntask"}
def test_only_setup_classic(self):
@task()
def mytask():
print("mytask")
@task()
def mytask2():
print("mytask")
with DAG("test_only_setup_classic") as dag:
setuptask = BashOperator(task_id="setuptask", bash_command="echo 1").as_setup()
with setuptask:
mytask() >> mytask2()
assert len(dag.task_group.children) == 3
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert dag.task_group.children["setuptask"].downstream_task_ids == {"mytask"}
assert dag.task_group.children["mytask"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"mytask2"}
assert dag.task_group.children["mytask2"].upstream_task_ids == {"mytask"}
assert not dag.task_group.children["mytask2"].downstream_task_ids
def test_only_teardown_classic(self):
@task()
def mytask():
print("mytask")
@task()
def mytask2():
print("mytask")
with DAG("test_only_teardown_classic") as dag:
teardowntask = BashOperator(task_id="teardowntask", bash_command="echo 1").as_teardown()
with teardowntask:
mytask() >> mytask2()
assert len(dag.task_group.children) == 3
assert not dag.task_group.children["mytask"].upstream_task_ids
assert dag.task_group.children["mytask"].downstream_task_ids == {"mytask2"}
assert dag.task_group.children["mytask2"].upstream_task_ids == {"mytask"}
assert dag.task_group.children["mytask2"].downstream_task_ids == {"teardowntask"}
assert dag.task_group.children["teardowntask"].upstream_task_ids == {"mytask2"}
assert not dag.task_group.children["teardowntask"].downstream_task_ids
def test_nested_only_setup_classic(self):
@task()
def mytask():
print("mytask")
@task()
def mytask2():
print("mytask")
with DAG("test_nested_only_setup_classic") as dag:
setuptask = BashOperator(task_id="setuptask", bash_command="echo 1").as_setup()
setuptask2 = BashOperator(task_id="setuptask2", bash_command="echo 1").as_setup()
teardowntask = BashOperator(task_id="teardowntask", bash_command="echo 1").as_teardown()
with setuptask >> teardowntask:
with setuptask2:
mytask() << mytask2()
assert len(dag.task_group.children) == 5
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert dag.task_group.children["setuptask"].downstream_task_ids == {"teardowntask", "setuptask2"}
assert dag.task_group.children["setuptask2"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["setuptask2"].downstream_task_ids == {"mytask2"}
assert dag.task_group.children["mytask"].upstream_task_ids == {"mytask2"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"teardowntask"}
assert dag.task_group.children["mytask2"].upstream_task_ids == {"setuptask2"}
assert dag.task_group.children["mytask2"].downstream_task_ids == {"mytask"}
assert dag.task_group.children["teardowntask"].upstream_task_ids == {"setuptask", "mytask"}
assert not dag.task_group.children["teardowntask"].downstream_task_ids
def test_classic_setup_teardown_task_in_different_setup_context(self):
@task()
def mytask():
print("mytask")
@task()
def mytask2():
print("mytask")
@task()
def mytask3():
print("mytask")
@task
def mytask4():
print("mytask")
with DAG("test_classic_setup_teardown_task_in_different_setup_context") as dag:
setuptask = BashOperator(task_id="setuptask", bash_command="echo 1").as_setup()
setuptask2 = BashOperator(task_id="setuptask2", bash_command="echo 1").as_setup()
with setuptask:
t1 = mytask()
t2 = mytask2()
t1 >> t2
with setuptask2:
t3 = mytask3()
t4 = mytask4()
t2 >> t3 >> t4
assert len(dag.task_group.children) == 6
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert not dag.task_group.children["setuptask2"].upstream_task_ids
assert dag.task_group.children["setuptask"].downstream_task_ids == {"mytask"}
assert dag.task_group.children["setuptask2"].downstream_task_ids == {"mytask3"}
assert dag.task_group.children["mytask"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"mytask2"}
assert dag.task_group.children["mytask2"].upstream_task_ids == {"mytask"}
assert dag.task_group.children["mytask2"].downstream_task_ids == {"mytask3"}
assert dag.task_group.children["mytask3"].upstream_task_ids == {"mytask2", "setuptask2"}
assert dag.task_group.children["mytask3"].downstream_task_ids == {"mytask4"}
assert dag.task_group.children["mytask4"].upstream_task_ids == {"mytask3"}
assert not dag.task_group.children["mytask4"].downstream_task_ids
def test_classic_setup_teardown_task_in_different_setup_context_2(self):
@task()
def mytask():
print("mytask")
@task()
def mytask2():
print("mytask")
@task()
def mytask3():
print("mytask")
@task
def mytask4():
print("mytask")
with DAG("test_classic_setup_teardown_task_in_different_setup_context_2") as dag:
setuptask = BashOperator(task_id="setuptask", bash_command="echo 1").as_setup()
setuptask2 = BashOperator(task_id="setuptask2", bash_command="echo 1").as_setup()
with setuptask:
t1 = mytask()
t2 = mytask2()
t1 >> t2
with setuptask2:
t3 = mytask3()
t4 = mytask4()
t2 >> t3 >> t4
assert len(dag.task_group.children) == 6
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert dag.task_group.children["setuptask2"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["setuptask"].downstream_task_ids == {"mytask", "setuptask2"}
assert dag.task_group.children["setuptask2"].downstream_task_ids == {"mytask3"}
assert dag.task_group.children["mytask"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"mytask2"}
assert dag.task_group.children["mytask2"].upstream_task_ids == {"mytask"}
assert dag.task_group.children["mytask2"].downstream_task_ids == {"mytask3"}
assert dag.task_group.children["mytask3"].upstream_task_ids == {"mytask2", "setuptask2"}
assert dag.task_group.children["mytask3"].downstream_task_ids == {"mytask4"}
assert dag.task_group.children["mytask4"].upstream_task_ids == {"mytask3"}
assert not dag.task_group.children["mytask4"].downstream_task_ids
def test_classic_setup_teardown_as_context_manager_with_work_task_rel_not_set(self):
"""
Test that setup >> teardown tasks are set up correctly when used as context managers
and the normal tasks are set up even if they don't have a relationship set
"""
@task()
def mytask():
print("mytask")
@task()
def mytask2():
print("mytask")
with DAG("foo") as dag:
setuptask = BashOperator(task_id="setuptask", bash_command="echo 1").as_setup()
setuptask2 = BashOperator(task_id="setuptask2", bash_command="echo 1").as_setup()
teardowntask = BashOperator(task_id="teardowntask", bash_command="echo 1").as_teardown()
teardowntask2 = BashOperator(task_id="teardowntask2", bash_command="echo 1").as_teardown()
with setuptask >> teardowntask:
with setuptask2 >> teardowntask2:
mytask()
mytask2()
assert len(dag.task_group.children) == 6
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert dag.task_group.children["setuptask"].downstream_task_ids == {"teardowntask", "setuptask2"}
assert dag.task_group.children["setuptask2"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["setuptask2"].downstream_task_ids == {
"teardowntask2",
"mytask",
"mytask2",
}
assert dag.task_group.children["mytask"].upstream_task_ids == {"setuptask2"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"teardowntask2"}
assert dag.task_group.children["mytask2"].upstream_task_ids == {"setuptask2"}
assert dag.task_group.children["mytask2"].downstream_task_ids == {"teardowntask2"}
assert dag.task_group.children["teardowntask"].upstream_task_ids == {"setuptask", "teardowntask2"}
assert not dag.task_group.children["teardowntask"].downstream_task_ids
assert dag.task_group.children["teardowntask2"].upstream_task_ids == {
"setuptask2",
"mytask",
"mytask2",
}
assert dag.task_group.children["teardowntask2"].downstream_task_ids == {"teardowntask"}
def test_setup_decorator_context_manager_with_list_on_left(self):
@setup
def setuptask():
print("setup")
@setup
def setuptask2():
print("setup")
@task()
def mytask():
print("mytask")
@teardown
def teardowntask():
print("teardown")
with DAG("test_setup_decorator_context_manager_with_list_on_left") as dag:
with [setuptask(), setuptask2()] >> teardowntask():
mytask()
assert len(dag.task_group.children) == 4
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert not dag.task_group.children["setuptask2"].upstream_task_ids
assert dag.task_group.children["setuptask"].downstream_task_ids == {"mytask", "teardowntask"}
assert dag.task_group.children["setuptask2"].downstream_task_ids == {"mytask", "teardowntask"}
assert dag.task_group.children["mytask"].upstream_task_ids == {"setuptask", "setuptask2"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"teardowntask"}
assert dag.task_group.children["teardowntask"].upstream_task_ids == {
"setuptask",
"setuptask2",
"mytask",
}
def test_setup_decorator_context_manager_with_list_on_right(self):
@setup
def setuptask():
print("setup")
@setup
def setuptask2():
print("setup")
@task()
def mytask():
print("mytask")
@teardown
def teardowntask():
print("teardown")
with DAG("test_setup_decorator_context_manager_with_list_on_right") as dag:
with teardowntask() << context_wrapper([setuptask(), setuptask2()]):
mytask()
assert len(dag.task_group.children) == 4
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert not dag.task_group.children["setuptask2"].upstream_task_ids
assert dag.task_group.children["setuptask"].downstream_task_ids == {"mytask", "teardowntask"}
assert dag.task_group.children["setuptask2"].downstream_task_ids == {"mytask", "teardowntask"}
assert dag.task_group.children["mytask"].upstream_task_ids == {"setuptask", "setuptask2"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"teardowntask"}
assert dag.task_group.children["teardowntask"].upstream_task_ids == {
"setuptask",
"setuptask2",
"mytask",
}
def test_setup_decorator_context_manager_errors_with_mixed_up_tasks(self):
@setup
def setuptask():
print("setup")
@setup
def setuptask2():
print("setup")
@task()
def mytask():
print("mytask")
@teardown
def teardowntask():
print("teardown")
with pytest.raises( # noqa: PT012, check decorators required more than one line
ValueError, match="All tasks in the list must be either setup or teardown tasks"
):
with DAG("test_setup_decorator_context_manager_errors_with_mixed_up_tasks"):
with setuptask() << context_wrapper([teardowntask(), setuptask2()]):
mytask()
def test_teardown_decorator_context_manager_with_list_on_left(self):
@setup
def setuptask():
print("setup")
@task()
def mytask():
print("mytask")
@teardown
def teardowntask():
print("teardown")
@teardown
def teardowntask2():
print("teardown")
with DAG("test_setup_decorator_context_manager_with_list_on_left") as dag:
with [teardowntask(), teardowntask2()] << setuptask():
mytask()
assert len(dag.task_group.children) == 4
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert dag.task_group.children["setuptask"].downstream_task_ids == {
"mytask",
"teardowntask",
"teardowntask2",
}
assert dag.task_group.children["mytask"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"teardowntask", "teardowntask2"}
assert dag.task_group.children["teardowntask"].upstream_task_ids == {
"setuptask",
"mytask",
}
assert dag.task_group.children["teardowntask2"].upstream_task_ids == {
"setuptask",
"mytask",
}
def test_teardown_decorator_context_manager_with_list_on_right(self):
@setup
def setuptask():
print("setup")
@task()
def mytask():
print("mytask")
@teardown
def teardowntask():
print("teardown")
@teardown
def teardowntask2():
print("teardown")
with DAG("test_setup_decorator_context_manager_with_list_on_right") as dag:
with setuptask() >> context_wrapper([teardowntask(), teardowntask2()]):
mytask()
assert len(dag.task_group.children) == 4
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert dag.task_group.children["setuptask"].downstream_task_ids == {
"mytask",
"teardowntask",
"teardowntask2",
}
assert dag.task_group.children["mytask"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"teardowntask", "teardowntask2"}
assert dag.task_group.children["teardowntask"].upstream_task_ids == {
"setuptask",
"mytask",
}
assert dag.task_group.children["teardowntask2"].upstream_task_ids == {
"setuptask",
"mytask",
}
def test_classic_operator_context_manager_with_list_on_left(self):
@task()
def mytask():
print("mytask")
with DAG("test_setup_decorator_context_manager_with_list_on_left") as dag:
teardowntask = BashOperator(task_id="teardowntask", bash_command="echo 1").as_teardown()
teardowntask2 = BashOperator(task_id="teardowntask2", bash_command="echo 1").as_teardown()
setuptask = BashOperator(task_id="setuptask", bash_command="echo 1").as_setup()
with [teardowntask, teardowntask2] << setuptask:
mytask()
assert len(dag.task_group.children) == 4
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert dag.task_group.children["setuptask"].downstream_task_ids == {
"mytask",
"teardowntask",
"teardowntask2",
}
assert dag.task_group.children["mytask"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"teardowntask", "teardowntask2"}
assert dag.task_group.children["teardowntask"].upstream_task_ids == {
"setuptask",
"mytask",
}
assert dag.task_group.children["teardowntask2"].upstream_task_ids == {
"setuptask",
"mytask",
}
def test_classic_operator_context_manager_with_list_on_right(self):
@task()
def mytask():
print("mytask")
with DAG("test_setup_decorator_context_manager_with_list_on_right") as dag:
teardowntask = BashOperator(task_id="teardowntask", bash_command="echo 1").as_teardown()
teardowntask2 = BashOperator(task_id="teardowntask2", bash_command="echo 1").as_teardown()
setuptask = BashOperator(task_id="setuptask", bash_command="echo 1").as_setup()
with setuptask >> context_wrapper([teardowntask, teardowntask2]):
mytask()
assert len(dag.task_group.children) == 4
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert dag.task_group.children["setuptask"].downstream_task_ids == {
"mytask",
"teardowntask",
"teardowntask2",
}
assert dag.task_group.children["mytask"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"teardowntask", "teardowntask2"}
assert dag.task_group.children["teardowntask"].upstream_task_ids == {
"setuptask",
"mytask",
}
assert dag.task_group.children["teardowntask2"].upstream_task_ids == {
"setuptask",
"mytask",
}
def test_tasks_decorators_called_outside_context_manager_can_link_up_with_scope(self):
@setup
def setuptask():
print("setup")
@task()
def mytask():
print("mytask")
@teardown
def teardowntask():
print("teardown")
with DAG("test_tasks_decorators_called_outside_context_manager_can_link_up_with_scope") as dag:
task1 = mytask()
with setuptask() >> teardowntask() as scope:
scope.add_task(task1)
assert len(dag.task_group.children) == 3
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert dag.task_group.children["setuptask"].downstream_task_ids == {"mytask", "teardowntask"}
assert dag.task_group.children["mytask"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"teardowntask"}
assert dag.task_group.children["teardowntask"].upstream_task_ids == {"mytask", "setuptask"}
assert not dag.task_group.children["teardowntask"].downstream_task_ids
def test_classic_tasks_called_outside_context_manager_can_link_up_with_scope(self):
with DAG("classic_tasks_called_outside_context_manager") as dag:
setuptask = BashOperator(task_id="setuptask", bash_command="echo 1").as_setup()
teardowntask = BashOperator(task_id="teardowntask", bash_command="echo 1").as_teardown()
mytask = BashOperator(task_id="mytask", bash_command="echo 1")
with setuptask >> teardowntask as scope:
scope.add_task(mytask)
assert len(dag.task_group.children) == 3
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert dag.task_group.children["setuptask"].downstream_task_ids == {"mytask", "teardowntask"}
assert dag.task_group.children["mytask"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"teardowntask"}
assert dag.task_group.children["teardowntask"].upstream_task_ids == {"mytask", "setuptask"}
assert not dag.task_group.children["teardowntask"].downstream_task_ids
def test_tasks_decorators_called_outside_context_manager_can_link_up_with_scope_op(self):
"""Here we test that XComArg.add_ctx_task can take an Operator as argument"""
@setup
def setuptask():
print("setup")
@teardown
def teardowntask():
print("teardown")
with DAG("test_tasks_decorators_called_outside_context_manager_can_link_up_with_scope_op") as dag:
task1 = BashOperator(task_id="mytask", bash_command="echo 1")
with setuptask() >> teardowntask() as scope:
scope.add_task(task1)
assert len(dag.task_group.children) == 3
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert dag.task_group.children["setuptask"].downstream_task_ids == {"mytask", "teardowntask"}
assert dag.task_group.children["mytask"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"teardowntask"}
assert dag.task_group.children["teardowntask"].upstream_task_ids == {"mytask", "setuptask"}
assert not dag.task_group.children["teardowntask"].downstream_task_ids
def test_classic_tasks_called_outside_context_manager_can_link_up_with_scope_xcomarg(self):
"""Here we test we can add xcom arg task to a scope using the BaseOperator.add_ctx_task method"""
@task
def mytask():
return 1
with DAG("test_classic_tasks_called_outside_context_manager_can_link_up_with_scope_xcomarg") as dag:
setuptask = BashOperator(task_id="setuptask", bash_command="echo 1").as_setup()
teardowntask = BashOperator(task_id="teardowntask", bash_command="echo 1").as_teardown()
with setuptask >> teardowntask as scope:
scope.add_task(mytask())
assert len(dag.task_group.children) == 3
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert dag.task_group.children["setuptask"].downstream_task_ids == {"mytask", "teardowntask"}
assert dag.task_group.children["mytask"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"teardowntask"}
assert dag.task_group.children["teardowntask"].upstream_task_ids == {"mytask", "setuptask"}
assert not dag.task_group.children["teardowntask"].downstream_task_ids
def test_add_tasks_to_context_for_different_context_level(self):
@setup
def setuptask():
print("setup")
@teardown
def teardowntask():
print("teardown")
@task
def mytask():
return 1
with DAG("test_add_tasks_to_context_for_different_context_level") as dag:
task1 = mytask()
setuptask2 = BashOperator(task_id="setuptask2", bash_command="echo 1").as_setup()
teardowntask2 = BashOperator(task_id="teardowntask2", bash_command="echo 1").as_teardown()
task2 = BashOperator(task_id="mytask2", bash_command="echo 1")
with setuptask() >> teardowntask() as scope:
scope.add_task(task1)
with setuptask2 >> teardowntask2 as scope2:
scope2.add_task(task2)
assert len(dag.task_group.children) == 6
assert not dag.task_group.children["setuptask"].upstream_task_ids
assert dag.task_group.children["setuptask"].downstream_task_ids == {
"setuptask2",
"mytask",
"teardowntask",
}
assert dag.task_group.children["mytask"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["mytask"].downstream_task_ids == {"teardowntask"}
assert dag.task_group.children["teardowntask"].upstream_task_ids == {
"mytask",
"setuptask",
"teardowntask2",
}
assert dag.task_group.children["setuptask2"].upstream_task_ids == {"setuptask"}
assert dag.task_group.children["setuptask2"].downstream_task_ids == {"mytask2", "teardowntask2"}
assert dag.task_group.children["mytask2"].upstream_task_ids == {"setuptask2"}
assert dag.task_group.children["mytask2"].downstream_task_ids == {"teardowntask2"}
assert dag.task_group.children["teardowntask2"].upstream_task_ids == {"mytask2", "setuptask2"}
def test_check_for_circular_dependency(self):
with DAG("test_check_for_circular_dependency") as dag:
s1 = make_task("s1", type_="classic", setup_=True)
s2 = make_task("s2", type_="classic", setup_=True)
t1 = make_task("t1", type_="classic", teardown_=True)
t2 = make_task("t2", type_="classic", teardown_=True)
s1 >> s2
s1 >> t1
s2 >> t1
s1 >> t2
s2 >> t2
with t1, t2:
make_task("work_task", type_="classic")
dag.validate()
assert dag.task_group.children.keys() == {"s1", "s2", "t1", "t2", "work_task"}
assert dag.task_group.children["s1"].downstream_task_ids == {"s2", "work_task", "t1", "t2"}
assert dag.task_group.children["s2"].downstream_task_ids == {"work_task", "t1", "t2"}
assert dag.task_group.children["t2"].downstream_task_ids == {"t1"}
def test_mixing_construct_with_add_task(self):
with DAG("test_mixing_construct_with_add_task") as dag:
s1 = make_task("s1", type_="classic")
s2 = make_task("s2", type_="classic")
t1 = make_task("t1", type_="classic")
t2 = make_task("t2", type_="classic")
t1.as_teardown(setups=s1)
t2.as_teardown(setups=s2)
with t1:
work = make_task("work", type_="classic")
with t2 as scope:
scope.add_task(work)
assert dag.task_group.children.keys() == {"s1", "s2", "t1", "t2", "work"}
assert dag.task_group.children["s1"].downstream_task_ids == {"work", "t1"}
assert dag.task_group.children["s2"].downstream_task_ids == {"work", "t2"}
assert not dag.task_group.children["t1"].downstream_task_ids
assert not dag.task_group.children["t2"].downstream_task_ids
assert dag.task_group.children["work"].downstream_task_ids == {"t1", "t2"}
| TestSetupTearDownTask |
python | getsentry__sentry | src/sentry/tasks/weekly_escalating_forecast.py | {
"start": 633,
"end": 2405
} | class ____(TypedDict):
intervals: list[str]
data: list[int]
ParsedGroupsCount = dict[int, GroupCount]
logger = logging.getLogger(__name__)
ITERATOR_CHUNK = 10_000
@instrumented_task(
name="sentry.tasks.weekly_escalating_forecast.run_escalating_forecast",
namespace=issues_tasks,
processing_deadline_duration=60 * 2,
silo_mode=SiloMode.REGION,
)
def run_escalating_forecast() -> None:
"""
Run the escalating forecast algorithm on archived until escalating issues.
"""
logger.info("Starting task for sentry.tasks.weekly_escalating_forecast.run_escalating_forecast")
for project_ids in chunked(
RangeQuerySetWrapper(
Project.objects.filter(status=ObjectStatus.ACTIVE).values_list("id", flat=True),
result_value_getter=lambda item: item,
step=ITERATOR_CHUNK,
),
ITERATOR_CHUNK,
):
generate_forecasts_for_projects.delay(project_ids=project_ids)
@instrumented_task(
name="sentry.tasks.weekly_escalating_forecast.generate_forecasts_for_projects",
namespace=issues_tasks,
processing_deadline_duration=60 * 2,
retry=Retry(times=3, delay=60),
silo_mode=SiloMode.REGION,
)
@retry
def generate_forecasts_for_projects(project_ids: list[int]) -> None:
for until_escalating_groups in chunked(
RangeQuerySetWrapper(
Group.objects.filter(
status=GroupStatus.IGNORED,
substatus=GroupSubStatus.UNTIL_ESCALATING,
project_id__in=project_ids,
last_seen__gte=datetime.now(UTC) - timedelta(days=7),
),
step=ITERATOR_CHUNK,
),
ITERATOR_CHUNK,
):
generate_and_save_forecasts(groups=until_escalating_groups)
| GroupCount |
python | doocs__leetcode | lcci/16.25.LRU Cache/Solution.py | {
"start": 148,
"end": 1677
} | class ____:
def __init__(self, capacity: int):
self.cache = {}
self.head = Node()
self.tail = Node()
self.capacity = capacity
self.size = 0
self.head.next = self.tail
self.tail.prev = self.head
def get(self, key: int) -> int:
if key not in self.cache:
return -1
node = self.cache[key]
self.move_to_head(node)
return node.val
def put(self, key: int, value: int) -> None:
if key in self.cache:
node = self.cache[key]
node.val = value
self.move_to_head(node)
else:
node = Node(key, value)
self.cache[key] = node
self.add_to_head(node)
self.size += 1
if self.size > self.capacity:
node = self.remove_tail()
self.cache.pop(node.key)
self.size -= 1
def move_to_head(self, node):
self.remove_node(node)
self.add_to_head(node)
def remove_node(self, node):
node.prev.next = node.next
node.next.prev = node.prev
def add_to_head(self, node):
node.next = self.head.next
node.prev = self.head
self.head.next = node
node.next.prev = node
def remove_tail(self):
node = self.tail.prev
self.remove_node(node)
return node
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
| LRUCache |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-bing-ads/components.py | {
"start": 14705,
"end": 15730
} | class ____(SchemaLoader):
"""
Creates custom report schema based on provided reporting columns.
"""
reporting_columns: List[str]
report_aggregation: str
def get_json_schema(self) -> Mapping[str, Any]:
if self.report_aggregation == "DayOfWeek":
self.reporting_columns = self.reporting_columns + ["DayOfWeek", "StartOfTimePeriod", "EndOfTimePeriod"]
if self.report_aggregation == "HourOfDay":
self.reporting_columns = self.reporting_columns + ["HourOfDay", "StartOfTimePeriod", "EndOfTimePeriod"]
self.reporting_columns = list(frozenset(self.reporting_columns))
columns_schema = {col: {"type": ["null", "string"]} for col in self.reporting_columns}
schema: Mapping[str, Any] = {
"$schema": "https://json-schema.org/draft-07/schema#",
"type": ["null", "object"],
"additionalProperties": True,
"properties": columns_schema,
}
return schema
@dataclass
| CustomReportSchemaLoader |
python | graphql-python__graphene | graphene/types/definitions.py | {
"start": 971,
"end": 1510
} | class ____(GrapheneGraphQLType, GraphQLEnumType):
def serialize(self, value):
if not isinstance(value, PyEnum):
enum = self.graphene_type._meta.enum
try:
# Try and get enum by value
value = enum(value)
except ValueError:
# Try and get enum by name
try:
value = enum[value]
except KeyError:
pass
return super(GrapheneEnumType, self).serialize(value)
| GrapheneEnumType |
python | dask__distributed | distributed/http/scheduler/api.py | {
"start": 2219,
"end": 2981
} | class ____(RequestHandler):
def get(self):
self.set_header("Content-Type", "application/json")
scheduler = self.server
try:
idle_since = scheduler.check_idle()
response = {
"idle_since": idle_since,
}
self.write(json.dumps(response))
except Exception as e:
self.set_status(500, str(e))
self.write(json.dumps({"Error": "Internal Server Error"}))
routes: list[tuple] = [
("/api/v1", APIHandler, {}),
("/api/v1/retire_workers", RetireWorkersHandler, {}),
("/api/v1/get_workers", GetWorkersHandler, {}),
("/api/v1/adaptive_target", AdaptiveTargetHandler, {}),
("/api/v1/check_idle", CheckIdleHandler, {}),
]
| CheckIdleHandler |
python | matplotlib__matplotlib | lib/matplotlib/tests/test_cbook.py | {
"start": 6336,
"end": 6380
} | class ____:
def dummy(self): pass
| Hashable |
python | sanic-org__sanic | sanic/mixins/routes.py | {
"start": 714,
"end": 31588
} | class ____(BaseMixin, metaclass=SanicMeta):
def __init__(self, *args, **kwargs) -> None:
self._future_routes: set[FutureRoute] = set()
self._future_statics: set[FutureStatic] = set()
def _apply_route(self, route: FutureRoute) -> list[Route]:
raise NotImplementedError # noqa
def route(
self,
uri: str,
methods: Optional[Iterable[str]] = None,
host: Optional[Union[str, list[str]]] = None,
strict_slashes: Optional[bool] = None,
stream: bool = False,
version: Optional[Union[int, str, float]] = None,
name: Optional[str] = None,
ignore_body: bool = False,
apply: bool = True,
subprotocols: Optional[list[str]] = None,
websocket: bool = False,
unquote: bool = False,
static: bool = False,
version_prefix: str = "/v",
error_format: Optional[str] = None,
**ctx_kwargs: Any,
) -> RouteWrapper:
"""Decorate a function to be registered as a route.
Args:
uri (str): Path of the URL.
methods (Optional[Iterable[str]]): List or tuple of
methods allowed.
host (Optional[Union[str, List[str]]]): The host, if required.
strict_slashes (Optional[bool]): Whether to apply strict slashes
to the route.
stream (bool): Whether to allow the request to stream its body.
version (Optional[Union[int, str, float]]): Route specific
versioning.
name (Optional[str]): User-defined route name for url_for.
ignore_body (bool): Whether the handler should ignore request
body (e.g. `GET` requests).
apply (bool): Apply middleware to the route.
subprotocols (Optional[List[str]]): List of subprotocols.
websocket (bool): Enable WebSocket support.
unquote (bool): Unquote special characters in the URL path.
static (bool): Enable static route.
version_prefix (str): URL path that should be before the version
value; default: `"/v"`.
error_format (Optional[str]): Error format for the route.
ctx_kwargs (Any): Keyword arguments that begin with a `ctx_*`
prefix will be appended to the route context (`route.ctx`).
Returns:
RouteWrapper: Tuple of routes, decorated function.
Examples:
Using the method to define a GET endpoint:
```python
@app.route("/hello")
async def hello(request: Request):
return text("Hello, World!")
```
Adding context kwargs to the route:
```python
@app.route("/greet", ctx_name="World")
async def greet(request: Request):
name = request.route.ctx.name
return text(f"Hello, {name}!")
```
"""
# Fix case where the user did not prefix the URL with a /
# and will probably get confused as to why it's not working
if not uri.startswith("/") and (uri or hasattr(self, "router")):
uri = "/" + uri
if strict_slashes is None:
strict_slashes = self.strict_slashes
if not methods and not websocket:
methods = frozenset({"GET"})
route_context = self._build_route_context(ctx_kwargs)
def decorator(handler):
nonlocal uri
nonlocal methods
nonlocal host
nonlocal strict_slashes
nonlocal stream
nonlocal version
nonlocal name
nonlocal ignore_body
nonlocal subprotocols
nonlocal websocket
nonlocal static
nonlocal version_prefix
nonlocal error_format
if isinstance(handler, tuple):
# if a handler fn is already wrapped in a route, the handler
# variable will be a tuple of (existing routes, handler fn)
_, handler = handler
name = self.generate_name(name, handler)
if isinstance(host, str):
host = frozenset([host])
elif host and not isinstance(host, frozenset):
try:
host = frozenset(host)
except TypeError:
raise ValueError(
"Expected either string or Iterable of host strings, "
"not %s" % host
)
if isinstance(subprotocols, list):
# Ordered subprotocols, maintain order
subprotocols = tuple(subprotocols)
elif isinstance(subprotocols, set):
# subprotocol is unordered, keep it unordered
subprotocols = frozenset(subprotocols)
if not error_format or error_format == "auto":
error_format = self._determine_error_format(handler)
route = FutureRoute(
handler,
uri,
None if websocket else frozenset([x.upper() for x in methods]),
host,
strict_slashes,
stream,
version,
name,
ignore_body,
websocket,
subprotocols,
unquote,
static,
version_prefix,
error_format,
route_context,
)
overwrite = getattr(self, "_allow_route_overwrite", False)
if overwrite:
self._future_routes = set(
filter(lambda x: x.uri != uri, self._future_routes)
)
self._future_routes.add(route)
args = list(signature(handler).parameters.keys())
if websocket and len(args) < 2:
handler_name = handler.__name__
raise ValueError(
f"Required parameter `request` and/or `ws` missing "
f"in the {handler_name}() route?"
)
elif not args:
handler_name = handler.__name__
raise ValueError(
f"Required parameter `request` missing "
f"in the {handler_name}() route?"
)
if not websocket and stream:
handler.is_stream = stream
if apply:
self._apply_route(route, overwrite=overwrite)
if static:
return route, handler
return handler
return decorator
def add_route(
self,
handler: RouteHandler,
uri: str,
methods: Iterable[str] = frozenset({"GET"}),
host: Optional[Union[str, list[str]]] = None,
strict_slashes: Optional[bool] = None,
version: Optional[Union[int, str, float]] = None,
name: Optional[str] = None,
stream: bool = False,
version_prefix: str = "/v",
error_format: Optional[str] = None,
unquote: bool = False,
**ctx_kwargs: Any,
) -> RouteHandler:
"""A helper method to register class-based view or functions as a handler to the application url routes.
Args:
handler (RouteHandler): Function or class-based view used as a route handler.
uri (str): Path of the URL.
methods (Iterable[str]): List or tuple of methods allowed; these are overridden if using an HTTPMethodView.
host (Optional[Union[str, List[str]]]): Hostname or hostnames to match for this route.
strict_slashes (Optional[bool]): If set, a route's slashes will be strict. E.g. `/foo` will not match `/foo/`.
version (Optional[Union[int, str, float]]): Version of the API for this route.
name (Optional[str]): User-defined route name for `url_for`.
stream (bool): Boolean specifying if the handler is a stream handler.
version_prefix (str): URL path that should be before the version value; default: ``/v``.
error_format (Optional[str]): Custom error format string.
unquote (bool): Boolean specifying if the handler requires unquoting.
ctx_kwargs (Any): Keyword arguments that begin with a `ctx_*` prefix will be appended to the route context (``route.ctx``). See below for examples.
Returns:
RouteHandler: The route handler.
Examples:
```python
from sanic import Sanic, text
app = Sanic("test")
async def handler(request):
return text("OK")
app.add_route(handler, "/test", methods=["GET", "POST"])
```
You can use `ctx_kwargs` to add custom context to the route. This
can often be useful when wanting to add metadata to a route that
can be used by other parts of the application (like middleware).
```python
from sanic import Sanic, text
app = Sanic("test")
async def handler(request):
return text("OK")
async def custom_middleware(request):
if request.route.ctx.monitor:
do_some_monitoring()
app.add_route(handler, "/test", methods=["GET", "POST"], ctx_monitor=True)
app.register_middleware(custom_middleware)
""" # noqa: E501
# Handle HTTPMethodView differently
if hasattr(handler, "view_class"):
methods = set()
for method in HTTP_METHODS:
view_class = getattr(handler, "view_class")
_handler = getattr(view_class, method.lower(), None)
if _handler:
methods.add(method)
if hasattr(_handler, "is_stream"):
stream = True
if strict_slashes is None:
strict_slashes = self.strict_slashes
self.route(
uri=uri,
methods=methods,
host=host,
strict_slashes=strict_slashes,
stream=stream,
version=version,
name=name,
version_prefix=version_prefix,
error_format=error_format,
unquote=unquote,
**ctx_kwargs,
)(handler)
return handler
# Shorthand method decorators
def get(
self,
uri: str,
host: Optional[Union[str, list[str]]] = None,
strict_slashes: Optional[bool] = None,
version: Optional[Union[int, str, float]] = None,
name: Optional[str] = None,
ignore_body: bool = True,
version_prefix: str = "/v",
error_format: Optional[str] = None,
**ctx_kwargs: Any,
) -> RouteHandler:
"""Decorate a function handler to create a route definition using the **GET** HTTP method.
Args:
uri (str): URL to be tagged to GET method of HTTP.
host (Optional[Union[str, List[str]]]): Host IP or FQDN for
the service to use.
strict_slashes (Optional[bool]): Instruct Sanic to check if the
request URLs need to terminate with a `/`.
version (Optional[Union[int, str, float]]): API Version.
name (Optional[str]): Unique name that can be used to identify
the route.
ignore_body (bool): Whether the handler should ignore request
body. This means the body of the request, if sent, will not
be consumed. In that instance, you will see a warning in
the logs. Defaults to `True`, meaning do not consume the body.
version_prefix (str): URL path that should be before the version
value. Defaults to `"/v"`.
error_format (Optional[str]): Custom error format string.
**ctx_kwargs (Any): Keyword arguments that begin with a
`ctx_* prefix` will be appended to the route
context (`route.ctx`).
Returns:
RouteHandler: Object decorated with route method.
""" # noqa: E501
return cast(
RouteHandler,
self.route(
uri,
methods=frozenset({"GET"}),
host=host,
strict_slashes=strict_slashes,
version=version,
name=name,
ignore_body=ignore_body,
version_prefix=version_prefix,
error_format=error_format,
**ctx_kwargs,
),
)
def post(
self,
uri: str,
host: Optional[Union[str, list[str]]] = None,
strict_slashes: Optional[bool] = None,
stream: bool = False,
version: Optional[Union[int, str, float]] = None,
name: Optional[str] = None,
version_prefix: str = "/v",
error_format: Optional[str] = None,
**ctx_kwargs: Any,
) -> RouteHandler:
"""Decorate a function handler to create a route definition using the **POST** HTTP method.
Args:
uri (str): URL to be tagged to POST method of HTTP.
host (Optional[Union[str, List[str]]]): Host IP or FQDN for
the service to use.
strict_slashes (Optional[bool]): Instruct Sanic to check if the
request URLs need to terminate with a `/`.
stream (bool): Whether or not to stream the request body.
Defaults to `False`.
version (Optional[Union[int, str, float]]): API Version.
name (Optional[str]): Unique name that can be used to identify
the route.
version_prefix (str): URL path that should be before the version
value. Defaults to `"/v"`.
error_format (Optional[str]): Custom error format string.
**ctx_kwargs (Any): Keyword arguments that begin with a
`ctx_*` prefix will be appended to the route
context (`route.ctx`).
Returns:
RouteHandler: Object decorated with route method.
""" # noqa: E501
return cast(
RouteHandler,
self.route(
uri,
methods=frozenset({"POST"}),
host=host,
strict_slashes=strict_slashes,
stream=stream,
version=version,
name=name,
version_prefix=version_prefix,
error_format=error_format,
**ctx_kwargs,
),
)
def put(
self,
uri: str,
host: Optional[Union[str, list[str]]] = None,
strict_slashes: Optional[bool] = None,
stream: bool = False,
version: Optional[Union[int, str, float]] = None,
name: Optional[str] = None,
version_prefix: str = "/v",
error_format: Optional[str] = None,
**ctx_kwargs: Any,
) -> RouteHandler:
"""Decorate a function handler to create a route definition using the **PUT** HTTP method.
Args:
uri (str): URL to be tagged to PUT method of HTTP.
host (Optional[Union[str, List[str]]]): Host IP or FQDN for
the service to use.
strict_slashes (Optional[bool]): Instruct Sanic to check if the
request URLs need to terminate with a `/`.
stream (bool): Whether or not to stream the request body.
Defaults to `False`.
version (Optional[Union[int, str, float]]): API Version.
name (Optional[str]): Unique name that can be used to identify
the route.
version_prefix (str): URL path that should be before the version
value. Defaults to `"/v"`.
error_format (Optional[str]): Custom error format string.
**ctx_kwargs (Any): Keyword arguments that begin with a
`ctx_*` prefix will be appended to the route
context (`route.ctx`).
Returns:
RouteHandler: Object decorated with route method.
""" # noqa: E501
return cast(
RouteHandler,
self.route(
uri,
methods=frozenset({"PUT"}),
host=host,
strict_slashes=strict_slashes,
stream=stream,
version=version,
name=name,
version_prefix=version_prefix,
error_format=error_format,
**ctx_kwargs,
),
)
def head(
self,
uri: str,
host: Optional[Union[str, list[str]]] = None,
strict_slashes: Optional[bool] = None,
version: Optional[Union[int, str, float]] = None,
name: Optional[str] = None,
ignore_body: bool = True,
version_prefix: str = "/v",
error_format: Optional[str] = None,
**ctx_kwargs: Any,
) -> RouteHandler:
"""Decorate a function handler to create a route definition using the **HEAD** HTTP method.
Args:
uri (str): URL to be tagged to HEAD method of HTTP.
host (Optional[Union[str, List[str]]]): Host IP or FQDN for
the service to use.
strict_slashes (Optional[bool]): Instruct Sanic to check if the
request URLs need to terminate with a `/`.
version (Optional[Union[int, str, float]]): API Version.
name (Optional[str]): Unique name that can be used to identify
the route.
ignore_body (bool): Whether the handler should ignore request
body. This means the body of the request, if sent, will not
be consumed. In that instance, you will see a warning in
the logs. Defaults to `True`, meaning do not consume the body.
version_prefix (str): URL path that should be before the version
value. Defaults to `"/v"`.
error_format (Optional[str]): Custom error format string.
**ctx_kwargs (Any): Keyword arguments that begin with a
`ctx_*` prefix will be appended to the route
context (`route.ctx`).
Returns:
RouteHandler: Object decorated with route method.
""" # noqa: E501
return cast(
RouteHandler,
self.route(
uri,
methods=frozenset({"HEAD"}),
host=host,
strict_slashes=strict_slashes,
version=version,
name=name,
ignore_body=ignore_body,
version_prefix=version_prefix,
error_format=error_format,
**ctx_kwargs,
),
)
def options(
self,
uri: str,
host: Optional[Union[str, list[str]]] = None,
strict_slashes: Optional[bool] = None,
version: Optional[Union[int, str, float]] = None,
name: Optional[str] = None,
ignore_body: bool = True,
version_prefix: str = "/v",
error_format: Optional[str] = None,
**ctx_kwargs: Any,
) -> RouteHandler:
"""Decorate a function handler to create a route definition using the **OPTIONS** HTTP method.
Args:
uri (str): URL to be tagged to OPTIONS method of HTTP.
host (Optional[Union[str, List[str]]]): Host IP or FQDN for
the service to use.
strict_slashes (Optional[bool]): Instruct Sanic to check if the
request URLs need to terminate with a `/`.
version (Optional[Union[int, str, float]]): API Version.
name (Optional[str]): Unique name that can be used to identify
the route.
ignore_body (bool): Whether the handler should ignore request
body. This means the body of the request, if sent, will not
be consumed. In that instance, you will see a warning in
the logs. Defaults to `True`, meaning do not consume the body.
version_prefix (str): URL path that should be before the version
value. Defaults to `"/v"`.
error_format (Optional[str]): Custom error format string.
**ctx_kwargs (Any): Keyword arguments that begin with a
`ctx_*` prefix will be appended to the route
context (`route.ctx`).
Returns:
RouteHandler: Object decorated with route method.
""" # noqa: E501
return cast(
RouteHandler,
self.route(
uri,
methods=frozenset({"OPTIONS"}),
host=host,
strict_slashes=strict_slashes,
version=version,
name=name,
ignore_body=ignore_body,
version_prefix=version_prefix,
error_format=error_format,
**ctx_kwargs,
),
)
def patch(
self,
uri: str,
host: Optional[Union[str, list[str]]] = None,
strict_slashes: Optional[bool] = None,
stream=False,
version: Optional[Union[int, str, float]] = None,
name: Optional[str] = None,
version_prefix: str = "/v",
error_format: Optional[str] = None,
**ctx_kwargs: Any,
) -> RouteHandler:
"""Decorate a function handler to create a route definition using the **PATCH** HTTP method.
Args:
uri (str): URL to be tagged to PATCH method of HTTP.
host (Optional[Union[str, List[str]]]): Host IP or FQDN for
the service to use.
strict_slashes (Optional[bool]): Instruct Sanic to check if the
request URLs need to terminate with a `/`.
stream (bool): Set to `True` if full request streaming is needed,
`False` otherwise. Defaults to `False`.
version (Optional[Union[int, str, float]]): API Version.
name (Optional[str]): Unique name that can be used to identify
the route.
version_prefix (str): URL path that should be before the version
value. Defaults to `"/v"`.
error_format (Optional[str]): Custom error format string.
**ctx_kwargs (Any): Keyword arguments that begin with a
`ctx_*` prefix will be appended to the route
context (`route.ctx`).
Returns:
RouteHandler: Object decorated with route method.
""" # noqa: E501
return cast(
RouteHandler,
self.route(
uri,
methods=frozenset({"PATCH"}),
host=host,
strict_slashes=strict_slashes,
stream=stream,
version=version,
name=name,
version_prefix=version_prefix,
error_format=error_format,
**ctx_kwargs,
),
)
def delete(
self,
uri: str,
host: Optional[Union[str, list[str]]] = None,
strict_slashes: Optional[bool] = None,
version: Optional[Union[int, str, float]] = None,
name: Optional[str] = None,
ignore_body: bool = False,
version_prefix: str = "/v",
error_format: Optional[str] = None,
**ctx_kwargs: Any,
) -> RouteHandler:
"""Decorate a function handler to create a route definition using the **DELETE** HTTP method.
Args:
uri (str): URL to be tagged to the DELETE method of HTTP.
host (Optional[Union[str, List[str]]]): Host IP or FQDN for the
service to use.
strict_slashes (Optional[bool]): Instruct Sanic to check if the
request URLs need to terminate with a */*.
version (Optional[Union[int, str, float]]): API Version.
name (Optional[str]): Unique name that can be used to identify
the Route.
ignore_body (bool): Whether or not to ignore the body in the
request. Defaults to `False`.
version_prefix (str): URL path that should be before the version
value. Defaults to `"/v"`.
error_format (Optional[str]): Custom error format string.
**ctx_kwargs (Any): Keyword arguments that begin with a `ctx_*`
prefix will be appended to the route context (`route.ctx`).
Returns:
RouteHandler: Object decorated with route method.
""" # noqa: E501
return cast(
RouteHandler,
self.route(
uri,
methods=frozenset({"DELETE"}),
host=host,
strict_slashes=strict_slashes,
version=version,
name=name,
ignore_body=ignore_body,
version_prefix=version_prefix,
error_format=error_format,
**ctx_kwargs,
),
)
def websocket(
self,
uri: str,
host: Optional[Union[str, list[str]]] = None,
strict_slashes: Optional[bool] = None,
subprotocols: Optional[list[str]] = None,
version: Optional[Union[int, str, float]] = None,
name: Optional[str] = None,
apply: bool = True,
version_prefix: str = "/v",
error_format: Optional[str] = None,
**ctx_kwargs: Any,
):
"""Decorate a function to be registered as a websocket route.
Args:
uri (str): Path of the URL.
host (Optional[Union[str, List[str]]]): Host IP or FQDN details.
strict_slashes (Optional[bool]): If the API endpoint needs to
terminate with a `"/"` or not.
subprotocols (Optional[List[str]]): Optional list of str with
supported subprotocols.
version (Optional[Union[int, str, float]]): WebSocket
protocol version.
name (Optional[str]): A unique name assigned to the URL so that
it can be used with url_for.
apply (bool): If set to False, it doesn't apply the route to the
app. Default is `True`.
version_prefix (str): URL path that should be before the version
value. Defaults to `"/v"`.
error_format (Optional[str]): Custom error format string.
**ctx_kwargs (Any): Keyword arguments that begin with
a `ctx_* prefix` will be appended to the route
context (`route.ctx`).
Returns:
tuple: Tuple of routes, decorated function.
"""
return self.route(
uri=uri,
host=host,
methods=None,
strict_slashes=strict_slashes,
version=version,
name=name,
apply=apply,
subprotocols=subprotocols,
websocket=True,
version_prefix=version_prefix,
error_format=error_format,
**ctx_kwargs,
)
def add_websocket_route(
self,
handler,
uri: str,
host: Optional[Union[str, list[str]]] = None,
strict_slashes: Optional[bool] = None,
subprotocols=None,
version: Optional[Union[int, str, float]] = None,
name: Optional[str] = None,
version_prefix: str = "/v",
error_format: Optional[str] = None,
**ctx_kwargs: Any,
):
"""A helper method to register a function as a websocket route.
Args:
handler (Callable): A callable function or instance of a class
that can handle the websocket request.
uri (str): URL path that will be mapped to the websocket handler.
host (Optional[Union[str, List[str]]]): Host IP or FQDN details.
strict_slashes (Optional[bool]): If the API endpoint needs to
terminate with a `"/"` or not.
subprotocols (Optional[List[str]]): Subprotocols to be used with
websocket handshake.
version (Optional[Union[int, str, float]]): Versioning information.
name (Optional[str]): A unique name assigned to the URL.
version_prefix (str): URL path before the version value.
Defaults to `"/v"`.
error_format (Optional[str]): Format for error handling.
**ctx_kwargs (Any): Keyword arguments beginning with `ctx_*`
prefix will be appended to the route context (`route.ctx`).
Returns:
Callable: Object passed as the handler.
"""
return self.websocket(
uri=uri,
host=host,
strict_slashes=strict_slashes,
subprotocols=subprotocols,
version=version,
name=name,
version_prefix=version_prefix,
error_format=error_format,
**ctx_kwargs,
)(handler)
def _determine_error_format(self, handler) -> str:
with suppress(OSError, TypeError):
src = dedent(getsource(handler))
tree = parse(src)
http_response_types = self._get_response_types(tree)
if len(http_response_types) == 1:
return next(iter(http_response_types))
return ""
def _get_response_types(self, node):
types = set()
class HttpResponseVisitor(NodeVisitor):
def visit_Return(self, node: Return) -> Any:
nonlocal types
with suppress(AttributeError):
checks = [node.value.func.id] # type: ignore
if node.value.keywords: # type: ignore
checks += [
k.value
for k in node.value.keywords # type: ignore
if k.arg == "content_type"
]
for check in checks:
if check in RESPONSE_MAPPING:
types.add(RESPONSE_MAPPING[check])
HttpResponseVisitor().visit(node)
return types
def _build_route_context(self, raw: dict[str, Any]) -> HashableDict:
ctx_kwargs = {
key.replace("ctx_", ""): raw.pop(key)
for key in {**raw}.keys()
if key.startswith("ctx_")
}
if raw:
unexpected_arguments = ", ".join(raw.keys())
raise TypeError(
f"Unexpected keyword arguments: {unexpected_arguments}"
)
return HashableDict(ctx_kwargs)
| RouteMixin |
python | pyqtgraph__pyqtgraph | pyqtgraph/examples/GLPainterItem.py | {
"start": 225,
"end": 2793
} | class ____(GLGraphicsItem.GLGraphicsItem):
def __init__(self, **kwds):
super().__init__()
glopts = kwds.pop('glOptions', 'additive')
self.setGLOptions(glopts)
def compute_projection(self):
# note that QRectF.bottom() != QRect.bottom()
rect = QtCore.QRectF(self.view().rect())
ndc_to_viewport = QtGui.QMatrix4x4()
ndc_to_viewport.viewport(rect.left(), rect.bottom(), rect.width(), -rect.height())
return ndc_to_viewport * self.mvpMatrix()
def paint(self):
self.setupGLState()
painter = QtGui.QPainter(self.view())
self.draw(painter)
painter.end()
def draw(self, painter):
painter.setPen(QtCore.Qt.GlobalColor.white)
painter.setRenderHints(QtGui.QPainter.RenderHint.Antialiasing | QtGui.QPainter.RenderHint.TextAntialiasing)
rect = self.view().rect()
af = QtCore.Qt.AlignmentFlag
painter.drawText(rect, af.AlignTop | af.AlignRight, 'TR')
painter.drawText(rect, af.AlignBottom | af.AlignLeft, 'BL')
painter.drawText(rect, af.AlignBottom | af.AlignRight, 'BR')
opts = self.view().cameraParams()
lines = []
center = opts['center']
lines.append(f"center : ({center.x():.1f}, {center.y():.1f}, {center.z():.1f})")
for key in ['distance', 'fov', 'elevation', 'azimuth']:
lines.append(f"{key} : {opts[key]:.1f}")
xyz = self.view().cameraPosition()
lines.append(f"xyz : ({xyz.x():.1f}, {xyz.y():.1f}, {xyz.z():.1f})")
info = "\n".join(lines)
painter.drawText(rect, af.AlignTop | af.AlignLeft, info)
project = self.compute_projection()
hsize = SIZE // 2
for xi in range(-hsize, hsize+1):
for yi in range(-hsize, hsize+1):
if xi == -hsize and yi == -hsize:
# skip one corner for visual orientation
continue
vec3 = QtGui.QVector3D(xi, yi, 0)
pos = project.map(vec3).toPointF()
painter.drawEllipse(pos, 1, 1)
pg.mkQApp("GLPainterItem Example")
glv = GLViewWidget()
glv.show()
glv.setWindowTitle('pyqtgraph example: GLPainterItem')
glv.setCameraPosition(distance=50, elevation=90, azimuth=0)
griditem = GLGridItem()
griditem.setSize(SIZE, SIZE)
griditem.setSpacing(1, 1)
glv.addItem(griditem)
axisitem = GLAxisItem()
axisitem.setSize(SIZE/2, SIZE/2, 1)
glv.addItem(axisitem)
paintitem = GLPainterItem()
glv.addItem(paintitem)
if __name__ == '__main__':
pg.exec()
| GLPainterItem |
python | gevent__gevent | src/gevent/tests/test__local.py | {
"start": 1407,
"end": 1601
} | class ____(local):
def __getattr__(self, name):
if name == 'foo':
return 42
return super(WithGetattr, self).__getattr__(name) # pylint:disable=no-member
| WithGetattr |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/expression7.py | {
"start": 707,
"end": 994
} | class ____: ...
# This should generate an error because binary operators are not allowed
# in type annotations.
def func4(a: ClassA and ClassB): ...
# This should generate an error because binary operators are not allowed
# in type annotations.
def func5(a: ClassA or ClassB): ...
| ClassB |
python | django-debug-toolbar__django-debug-toolbar | tests/test_utils.py | {
"start": 298,
"end": 974
} | class ____(unittest.TestCase):
def test_func(self):
def x():
return 1
res = get_name_from_obj(x)
self.assertEqual(
res, "tests.test_utils.GetNameFromObjTestCase.test_func.<locals>.x"
)
def test_lambda(self):
res = get_name_from_obj(lambda: 1)
self.assertEqual(
res, "tests.test_utils.GetNameFromObjTestCase.test_lambda.<locals>.<lambda>"
)
def test_class(self):
class A:
pass
res = get_name_from_obj(A)
self.assertEqual(
res, "tests.test_utils.GetNameFromObjTestCase.test_class.<locals>.A"
)
| GetNameFromObjTestCase |
python | django__django | tests/middleware_exceptions/middleware.py | {
"start": 3807,
"end": 3886
} | class ____(BaseMiddleware):
pass
@sync_only_middleware
| SyncAndAsyncMiddleware |
python | python-poetry__poetry | src/poetry/utils/env/site_packages.py | {
"start": 456,
"end": 7221
} | class ____:
def __init__(
self,
purelib: Path,
platlib: Path | None = None,
fallbacks: list[Path] | None = None,
) -> None:
self._purelib = purelib
self._platlib = platlib or purelib
if platlib and platlib.resolve() == purelib.resolve():
self._platlib = purelib
self._fallbacks = fallbacks or []
self._candidates: list[Path] = []
for path in itertools.chain([self._purelib, self._platlib], self._fallbacks):
if path not in self._candidates:
self._candidates.append(path)
self._writable_candidates: list[Path] | None = None
@property
def path(self) -> Path:
return self._purelib
@property
def purelib(self) -> Path:
return self._purelib
@property
def platlib(self) -> Path:
return self._platlib
@property
def candidates(self) -> list[Path]:
return self._candidates
@property
def writable_candidates(self) -> list[Path]:
if self._writable_candidates is not None:
return self._writable_candidates
self._writable_candidates = []
for candidate in self._candidates:
if not is_dir_writable(path=candidate, create=True):
continue
self._writable_candidates.append(candidate)
return self._writable_candidates
def make_candidates(
self, path: Path, writable_only: bool = False, strict: bool = False
) -> list[Path]:
candidates = self._candidates if not writable_only else self.writable_candidates
if path.is_absolute():
for candidate in candidates:
with contextlib.suppress(ValueError):
path.relative_to(candidate)
return [path]
site_type = "writable " if writable_only else ""
raise ValueError(
f"{path} is not relative to any discovered {site_type}sites"
)
results = [candidate / path for candidate in candidates]
if not results and strict:
raise RuntimeError(
f'Unable to find a suitable destination for "{path}" in'
f" {paths_csv(self._candidates)}"
)
return results
def distributions(
self, name: str | None = None, writable_only: bool = False
) -> Iterable[metadata.Distribution]:
path = list(
map(
str, self._candidates if not writable_only else self.writable_candidates
)
)
yield from metadata.PathDistribution.discover(name=name, path=path)
def find_distribution(
self, name: str, writable_only: bool = False
) -> metadata.Distribution | None:
for distribution in self.distributions(name=name, writable_only=writable_only):
return distribution
return None
def find_distribution_files_with_name(
self, distribution_name: str, name: str, writable_only: bool = False
) -> Iterable[Path]:
for distribution in self.distributions(
name=distribution_name, writable_only=writable_only
):
files = [] if distribution.files is None else distribution.files
for file in files:
if file.name == name:
path = distribution.locate_file(file)
assert isinstance(path, Path)
yield path
def find_distribution_direct_url_json_files(
self, distribution_name: str, writable_only: bool = False
) -> Iterable[Path]:
return self.find_distribution_files_with_name(
distribution_name=distribution_name,
name="direct_url.json",
writable_only=writable_only,
)
def remove_distribution_files(self, distribution_name: str) -> list[Path]:
paths = []
for distribution in self.distributions(
name=distribution_name, writable_only=True
):
files = [] if distribution.files is None else distribution.files
for file in files:
path = distribution.locate_file(file)
assert isinstance(path, Path)
path.unlink(missing_ok=True)
distribution_path: Path = distribution._path # type: ignore[attr-defined]
if distribution_path.exists():
remove_directory(distribution_path, force=True)
paths.append(distribution_path)
return paths
@overload
def _path_method_wrapper(
self,
path: Path,
method: str,
*args: Any,
return_first: Literal[False],
writable_only: bool = False,
**kwargs: Any,
) -> list[tuple[Path, Any]]: ...
@overload
def _path_method_wrapper(
self,
path: Path,
method: str,
*args: Any,
return_first: bool = True,
writable_only: bool = False,
**kwargs: Any,
) -> tuple[Path, Any]: ...
def _path_method_wrapper(
self,
path: Path,
method: str,
*args: Any,
return_first: bool = True,
writable_only: bool = False,
**kwargs: Any,
) -> tuple[Path, Any] | list[tuple[Path, Any]]:
candidates = self.make_candidates(
path, writable_only=writable_only, strict=True
)
results = []
for candidate in candidates:
with contextlib.suppress(OSError):
result = candidate, getattr(candidate, method)(*args, **kwargs)
if return_first:
return result
results.append(result)
if results:
return results
raise OSError(f"Unable to access any of {paths_csv(candidates)}")
def write_text(self, path: Path, *args: Any, **kwargs: Any) -> Path:
paths: tuple[Path, Any] = self._path_method_wrapper(
path, "write_text", *args, **kwargs
)
return paths[0]
def mkdir(self, path: Path, *args: Any, **kwargs: Any) -> Path:
paths: tuple[Path, Any] = self._path_method_wrapper(
path, "mkdir", *args, **kwargs
)
return paths[0]
def exists(self, path: Path) -> bool:
return any(
value[-1]
for value in self._path_method_wrapper(path, "exists", return_first=False)
)
def find(
self,
path: Path,
writable_only: bool = False,
) -> list[Path]:
return [
value[0]
for value in self._path_method_wrapper(
path, "exists", return_first=False, writable_only=writable_only
)
if value[-1] is True
]
| SitePackages |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/errors.py | {
"start": 13152,
"end": 13538
} | class ____(DagsterUserCodeExecutionError):
"""Indicates an error occurred while loading an input for a step."""
def __init__(self, *args, **kwargs):
self.step_key = check.str_param(kwargs.pop("step_key"), "step_key")
self.input_name = check.str_param(kwargs.pop("input_name"), "input_name")
super().__init__(*args, **kwargs)
| DagsterExecutionLoadInputError |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/metadata/metadata_value.py | {
"start": 29303,
"end": 29817
} | class ____(MetadataValue[TableSchema]):
"""Representation of a schema for arbitrary tabular data.
Args:
schema (TableSchema): The dictionary containing the schema representation.
"""
schema: PublicAttr[TableSchema]
@public
@property
def value(self) -> TableSchema:
"""TableSchema: The wrapped :py:class:`TableSchema`."""
return self.schema
@public
@whitelist_for_serdes
@record_custom(field_to_new_mapping={"lineage": "column_lineage"})
| TableSchemaMetadataValue |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/index1.py | {
"start": 930,
"end": 1164
} | class ____:
def __setitem__(self, index: int, value: "ClassC"): ...
B_or_C = TypeVar("B_or_C", ClassB, ClassC)
def func1(container: B_or_C):
a = container
a[1] = container
TD = TypeVar("TD", bound="ClassD[Any]")
| ClassC |
python | Lightning-AI__lightning | tests/tests_pytorch/utilities/test_auto_restart.py | {
"start": 1027,
"end": 5575
} | class ____(BoringModel):
def __init__(self, should_signal: bool, failure_on_step: bool, failure_on_training: bool, on_last_batch: bool):
super().__init__()
self.should_signal = should_signal
self.failure_on_step = failure_on_step
self.failure_on_training = failure_on_training
self.on_last_batch = on_last_batch
self.seen_train_batches = []
def _signal(self):
if self.should_signal:
# simulate `os.kill(os.getpid(), signal.SIGTERM)`
self.trainer._signal_connector.received_sigterm = True
def training_step(self, batch, batch_idx):
self.seen_train_batches.append(batch)
should_signal = self.trainer.fit_loop.epoch_loop._is_training_done if self.on_last_batch else batch_idx == 2
if self.failure_on_step and self.failure_on_training and should_signal:
self._signal()
return super().training_step(batch, batch_idx)
def validation_step(self, batch, batch_idx):
should_signal = (
self.trainer.fit_loop.epoch_loop.val_loop.batch_progress.is_last_batch
if self.on_last_batch
else batch_idx == 2
)
if self.failure_on_step and not self.failure_on_training and should_signal:
self._signal()
return super().validation_step(batch, batch_idx)
def on_train_epoch_end(self):
if not self.failure_on_step and self.failure_on_training:
self._signal()
def on_validation_epoch_end(self):
if not self.failure_on_step and not self.failure_on_training:
self._signal()
def train_dataloader(self):
return DataLoader(RandomDataset(32, 4))
def val_dataloader(self):
return DataLoader(RandomDataset(32, 4))
def _fit_model(
tmp_path, should_signal, val_check_interval, failure_on_step, failure_on_training, on_last_batch, status=None
):
seed_everything(42)
model = TestAutoRestartModelUnderSignal(should_signal, failure_on_step, failure_on_training, on_last_batch)
class MyTestCallback(Callback):
raising_function = None
def on_exception(self, trainer, pl_module, exception):
if isinstance(exception, SIGTERMException):
caller = inspect.trace()[-1]
class_name = caller[0].f_locals["self"].__class__.__name__
self.raising_method = f"{class_name}:{caller.function}"
test_callback = MyTestCallback()
trainer = Trainer(
default_root_dir=tmp_path,
max_epochs=1,
limit_train_batches=4,
limit_val_batches=4,
val_check_interval=val_check_interval,
num_sanity_val_steps=0,
callbacks=[test_callback, OnExceptionCheckpoint(tmp_path)],
)
if should_signal:
with pytest.raises(SIGTERMException):
trainer.fit(model)
assert test_callback.raising_method == status
else:
trainer.fit(model)
assert trainer.received_sigterm == should_signal
return model
@pytest.mark.parametrize("on_last_batch", [False, True])
@pytest.mark.parametrize("val_check_interval", [0.5, 1.0])
@pytest.mark.parametrize("failure_on_training", [False, True])
@pytest.mark.parametrize("failure_on_step", [False, True])
@RunIf(skip_windows=True)
def test_auto_restart_under_signal(on_last_batch, val_check_interval, failure_on_training, failure_on_step, tmp_path):
if failure_on_step:
if on_last_batch:
if failure_on_training:
# Breaking on first validation batch.
# This is done to capture the random state of the validation dataloader.
status = "_EvaluationLoop:_evaluation_step"
else:
# when breaking on last batch of validation, we should exist on `run_end` val_check_interval == 1.0
status = "_FitLoop:on_advance_end" if val_check_interval == 1.0 else "_TrainingEpochLoop:on_advance_end"
else:
status = "_TrainingEpochLoop:on_advance_end" if failure_on_training else "_EvaluationLoop:_evaluation_step"
else:
if val_check_interval == 1.0:
status = "_FitLoop:on_advance_end"
else:
# `on_train_epoch_end` happens after `on_validation_epoch_end` since Lightning v1.4
status = "_FitLoop:on_advance_end" if failure_on_training else "_TrainingEpochLoop:on_advance_end"
_fit_model(tmp_path, True, val_check_interval, failure_on_step, failure_on_training, on_last_batch, status=status)
| TestAutoRestartModelUnderSignal |
python | PyCQA__pylint | pylint/config/config_file_parser.py | {
"start": 664,
"end": 3876
} | class ____:
"""Class to parse various formats of configuration files."""
@staticmethod
def parse_ini_file(file_path: Path) -> PylintConfigFileData:
"""Parse and handle errors of an ini configuration file.
Raises ``configparser.Error``.
"""
parser = configparser.ConfigParser(inline_comment_prefixes=("#", ";"))
# Use this encoding in order to strip the BOM marker, if any.
with open(file_path, encoding="utf_8_sig") as fp:
parser.read_file(fp)
config_content: dict[str, str] = {}
options: list[str] = []
ini_file_with_sections = _RawConfParser._ini_file_with_sections(file_path)
for section in parser.sections():
if ini_file_with_sections and not section.startswith("pylint"):
continue
for option, value in parser[section].items():
config_content[option] = value
options += [f"--{option}", value]
return config_content, options
@staticmethod
def _ini_file_with_sections(file_path: Path) -> bool:
"""Return whether the file uses sections."""
if "setup.cfg" in file_path.parts:
return True
if "tox.ini" in file_path.parts:
return True
return False
@staticmethod
def parse_toml_file(file_path: Path) -> PylintConfigFileData:
"""Parse and handle errors of a toml configuration file.
Raises ``tomllib.TOMLDecodeError``.
"""
with open(file_path, mode="rb") as fp:
content = tomllib.load(fp)
try:
sections_values = content["tool"]["pylint"]
except KeyError:
return {}, []
config_content: dict[str, str] = {}
options: list[str] = []
for opt, values in sections_values.items():
if isinstance(values, dict):
for config, value in values.items():
value = _parse_rich_type_value(value)
config_content[config] = value
options += [f"--{config}", value]
else:
values = _parse_rich_type_value(values)
config_content[opt] = values
options += [f"--{opt}", values]
return config_content, options
@staticmethod
def parse_config_file(
file_path: Path | None, verbose: bool
) -> PylintConfigFileData:
"""Parse a config file and return str-str pairs.
Raises ``tomllib.TOMLDecodeError``, ``configparser.Error``.
"""
if file_path is None:
if verbose:
print(
"No config file found, using default configuration", file=sys.stderr
)
return {}, []
file_path = Path(os.path.expandvars(file_path)).expanduser()
if not file_path.exists():
raise OSError(f"The config file {file_path} doesn't exist!")
if verbose:
print(f"Using config file {file_path}", file=sys.stderr)
if file_path.suffix == ".toml":
return _RawConfParser.parse_toml_file(file_path)
return _RawConfParser.parse_ini_file(file_path)
| _RawConfParser |
python | py-pdf__pypdf | tests/__init__.py | {
"start": 4294,
"end": 5022
} | class ____:
"""Allow changing the PIL/Pillow configuration for some limited scope."""
def __init__(self) -> None:
self._saved_load_truncated_images = False
def __enter__(self) -> Self:
# Allow loading incomplete images.
from PIL import ImageFile # noqa: PLC0415
self._saved_load_truncated_images = ImageFile.LOAD_TRUNCATED_IMAGES
ImageFile.LOAD_TRUNCATED_IMAGES = True
return self
def __exit__(self, type_, value, traceback) -> Optional[bool]:
from PIL import ImageFile # noqa: PLC0415
ImageFile.LOAD_TRUNCATED_IMAGES = self._saved_load_truncated_images
if type_:
# Error.
return None
return True
| PILContext |
python | keras-team__keras | keras/src/layers/preprocessing/image_preprocessing/random_contrast_test.py | {
"start": 164,
"end": 4901
} | class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomContrast,
init_kwargs={
"factor": 0.75,
"value_range": (0, 255),
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
self.run_layer_test(
layers.RandomContrast,
init_kwargs={
"factor": 0.75,
"value_range": (0, 255),
"seed": 1,
"data_format": "channels_first",
},
input_shape=(8, 3, 4, 4),
supports_masking=False,
expected_output_shape=(8, 3, 4, 4),
)
def test_random_contrast_with_value_range_0_to_255(self):
seed = 9809
np.random.seed(seed)
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = np.random.random((12, 8, 16, 3))
height_axis = -3
width_axis = -2
else:
inputs = np.random.random((12, 3, 8, 16))
height_axis = -2
width_axis = -1
inputs = backend.convert_to_tensor(inputs, dtype="float32")
layer = layers.RandomContrast(
factor=0.5, value_range=(0, 255), seed=seed
)
transformation = layer.get_random_transformation(inputs, training=True)
outputs = layer.transform_images(inputs, transformation, training=True)
# Actual contrast arithmetic
np.random.seed(seed)
factor = backend.convert_to_numpy(transformation["contrast_factor"])
inputs = backend.convert_to_numpy(inputs)
inp_mean = np.mean(inputs, axis=height_axis, keepdims=True)
inp_mean = np.mean(inp_mean, axis=width_axis, keepdims=True)
actual_outputs = (inputs - inp_mean) * factor + inp_mean
outputs = backend.convert_to_numpy(outputs)
actual_outputs = np.clip(actual_outputs, 0, 255)
self.assertAllClose(outputs, actual_outputs)
def test_random_contrast_with_value_range_0_to_1(self):
seed = 9809
np.random.seed(seed)
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = np.random.random((12, 8, 16, 3))
height_axis = -3
width_axis = -2
else:
inputs = np.random.random((12, 3, 8, 16))
height_axis = -2
width_axis = -1
inputs = backend.convert_to_tensor(inputs, dtype="float32")
layer = layers.RandomContrast(factor=0.5, value_range=(0, 1), seed=seed)
transformation = layer.get_random_transformation(inputs, training=True)
outputs = layer.transform_images(inputs, transformation, training=True)
# Actual contrast arithmetic
np.random.seed(seed)
factor = backend.convert_to_numpy(transformation["contrast_factor"])
inputs = backend.convert_to_numpy(inputs)
inp_mean = np.mean(inputs, axis=height_axis, keepdims=True)
inp_mean = np.mean(inp_mean, axis=width_axis, keepdims=True)
actual_outputs = (inputs - inp_mean) * factor + inp_mean
outputs = backend.convert_to_numpy(outputs)
actual_outputs = np.clip(actual_outputs, 0, 1)
self.assertAllClose(outputs, actual_outputs)
def test_tf_data_compatibility(self):
layer = layers.RandomContrast(factor=0.5, seed=1337)
input_data = np.random.random((2, 8, 8, 3))
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
next(iter(ds)).numpy()
def test_dict_input(self):
layer = layers.RandomContrast(factor=0.1, bounding_box_format="xyxy")
data = {
"images": np.random.random((2, 4, 5, 3)),
"labels": np.random.random((2, 7)),
"segmentation_masks": np.random.random((2, 4, 5, 7)),
"bounding_boxes": {
"boxes": np.array([[1, 2, 2, 3]]),
"labels": np.array([0]),
},
}
transformed_data = layer(data)
self.assertEqual(
data["images"].shape[:-1],
transformed_data["segmentation_masks"].shape[:-1],
)
self.assertAllClose(data["labels"], transformed_data["labels"])
self.assertAllClose(
data["bounding_boxes"]["boxes"],
transformed_data["bounding_boxes"]["boxes"],
)
self.assertAllClose(
data["bounding_boxes"]["labels"],
transformed_data["bounding_boxes"]["labels"],
)
| RandomContrastTest |
python | getsentry__sentry | src/sentry/flags/providers.py | {
"start": 16690,
"end": 17531
} | class ____(TypedDict):
"""A simplified type which is easier to work with than the row definition."""
action: str
flag: str
created_at: datetime.datetime
created_by: str
tags: dict[str, str]
def handle_flag_pole_event_internal(items: list[FlagAuditLogItem], organization_id: int) -> None:
write(
[
{
"action": ACTION_MAP[item["action"]],
"created_at": item["created_at"],
"created_by": item["created_by"],
"created_by_type": CREATED_BY_TYPE_MAP["name"],
"flag": item["flag"],
"organization_id": organization_id,
"provider": PROVIDER_MAP["flagpole"],
"tags": item["tags"],
}
for item in items
]
)
"""Helpers."""
| FlagAuditLogItem |
python | Textualize__textual | src/textual/drivers/win32.py | {
"start": 2472,
"end": 2631
} | class ____(Structure):
"""https://docs.microsoft.com/en-us/windows/console/menu-event-record-str"""
_fields_ = [("dwCommandId", UINT)]
| MENU_EVENT_RECORD |
python | django__django | tests/admin_views/models.py | {
"start": 7989,
"end": 8213
} | class ____(models.Model):
name = models.CharField(blank=False, max_length=80)
email = models.EmailField(blank=False, max_length=175)
def __str__(self):
return "%s (%s)" % (self.name, self.email)
| Subscriber |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 135047,
"end": 136274
} | class ____(GeneratedAirbyteSource):
@public
def __init__(
self,
name: str,
access_token: str,
api_key: str,
client_id: str,
plaid_env: str,
start_date: Optional[str] = None,
):
"""Airbyte Source for Plaid.
Documentation can be found at https://plaid.com/docs/api/
Args:
name (str): The name of the destination.
access_token (str): The end-user's Link access token.
api_key (str): The Plaid API key to use to hit the API.
client_id (str): The Plaid client id
plaid_env (str): The Plaid environment
start_date (Optional[str]): The date from which you'd like to replicate data for Plaid in the format YYYY-MM-DD. All data generated after this date will be replicated.
"""
self.access_token = check.str_param(access_token, "access_token")
self.api_key = check.str_param(api_key, "api_key")
self.client_id = check.str_param(client_id, "client_id")
self.plaid_env = check.str_param(plaid_env, "plaid_env")
self.start_date = check.opt_str_param(start_date, "start_date")
super().__init__("Plaid", name)
| PlaidSource |
python | sympy__sympy | sympy/tensor/index_methods.py | {
"start": 560,
"end": 15428
} | class ____(Exception):
pass
def _unique_and_repeated(inds):
"""
Returns the unique and repeated indices. Also note, from the examples given below
that the order of indices is maintained as given in the input.
Examples
========
>>> from sympy.tensor.index_methods import _unique_and_repeated
>>> _unique_and_repeated([2, 3, 1, 3, 0, 4, 0])
([2, 1, 4], [3, 0])
"""
uniq = OrderedDict()
for i in inds:
if i in uniq:
uniq[i] = 0
else:
uniq[i] = 1
return sift(uniq, lambda x: uniq[x], binary=True)
def _remove_repeated(inds):
"""
Removes repeated objects from sequences
Returns a set of the unique objects and a tuple of all that have been
removed.
Examples
========
>>> from sympy.tensor.index_methods import _remove_repeated
>>> l1 = [1, 2, 3, 2]
>>> _remove_repeated(l1)
({1, 3}, (2,))
"""
u, r = _unique_and_repeated(inds)
return set(u), tuple(r)
def _get_indices_Mul(expr, return_dummies=False):
"""Determine the outer indices of a Mul object.
Examples
========
>>> from sympy.tensor.index_methods import _get_indices_Mul
>>> from sympy.tensor.indexed import IndexedBase, Idx
>>> i, j, k = map(Idx, ['i', 'j', 'k'])
>>> x = IndexedBase('x')
>>> y = IndexedBase('y')
>>> _get_indices_Mul(x[i, k]*y[j, k])
({i, j}, {})
>>> _get_indices_Mul(x[i, k]*y[j, k], return_dummies=True)
({i, j}, {}, (k,))
"""
inds = list(map(get_indices, expr.args))
inds, syms = list(zip(*inds))
inds = list(map(list, inds))
inds = list(reduce(lambda x, y: x + y, inds))
inds, dummies = _remove_repeated(inds)
symmetry = {}
for s in syms:
for pair in s:
if pair in symmetry:
symmetry[pair] *= s[pair]
else:
symmetry[pair] = s[pair]
if return_dummies:
return inds, symmetry, dummies
else:
return inds, symmetry
def _get_indices_Pow(expr):
"""Determine outer indices of a power or an exponential.
A power is considered a universal function, so that the indices of a Pow is
just the collection of indices present in the expression. This may be
viewed as a bit inconsistent in the special case:
x[i]**2 = x[i]*x[i] (1)
The above expression could have been interpreted as the contraction of x[i]
with itself, but we choose instead to interpret it as a function
lambda y: y**2
applied to each element of x (a universal function in numpy terms). In
order to allow an interpretation of (1) as a contraction, we need
contravariant and covariant Idx subclasses. (FIXME: this is not yet
implemented)
Expressions in the base or exponent are subject to contraction as usual,
but an index that is present in the exponent, will not be considered
contractable with its own base. Note however, that indices in the same
exponent can be contracted with each other.
Examples
========
>>> from sympy.tensor.index_methods import _get_indices_Pow
>>> from sympy import Pow, exp, IndexedBase, Idx
>>> A = IndexedBase('A')
>>> x = IndexedBase('x')
>>> i, j, k = map(Idx, ['i', 'j', 'k'])
>>> _get_indices_Pow(exp(A[i, j]*x[j]))
({i}, {})
>>> _get_indices_Pow(Pow(x[i], x[i]))
({i}, {})
>>> _get_indices_Pow(Pow(A[i, j]*x[j], x[i]))
({i}, {})
"""
base, exp = expr.as_base_exp()
binds, bsyms = get_indices(base)
einds, esyms = get_indices(exp)
inds = binds | einds
# FIXME: symmetries from power needs to check special cases, else nothing
symmetries = {}
return inds, symmetries
def _get_indices_Add(expr):
"""Determine outer indices of an Add object.
In a sum, each term must have the same set of outer indices. A valid
expression could be
x(i)*y(j) - x(j)*y(i)
But we do not allow expressions like:
x(i)*y(j) - z(j)*z(j)
FIXME: Add support for Numpy broadcasting
Examples
========
>>> from sympy.tensor.index_methods import _get_indices_Add
>>> from sympy.tensor.indexed import IndexedBase, Idx
>>> i, j, k = map(Idx, ['i', 'j', 'k'])
>>> x = IndexedBase('x')
>>> y = IndexedBase('y')
>>> _get_indices_Add(x[i] + x[k]*y[i, k])
({i}, {})
"""
inds = list(map(get_indices, expr.args))
inds, syms = list(zip(*inds))
# allow broadcast of scalars
non_scalars = [x for x in inds if x != set()]
if not non_scalars:
return set(), {}
if not all(x == non_scalars[0] for x in non_scalars[1:]):
raise IndexConformanceException(f"Indices are not consistent: {expr}")
if not reduce(lambda x, y: x != y or y, syms):
symmetries = syms[0]
else:
# FIXME: search for symmetries
symmetries = {}
return non_scalars[0], symmetries
def get_indices(expr):
"""Determine the outer indices of expression ``expr``
By *outer* we mean indices that are not summation indices. Returns a set
and a dict. The set contains outer indices and the dict contains
information about index symmetries.
Examples
========
>>> from sympy.tensor.index_methods import get_indices
>>> from sympy import symbols
>>> from sympy.tensor import IndexedBase
>>> x, y, A = map(IndexedBase, ['x', 'y', 'A'])
>>> i, j, a, z = symbols('i j a z', integer=True)
The indices of the total expression is determined, Repeated indices imply a
summation, for instance the trace of a matrix A:
>>> get_indices(A[i, i])
(set(), {})
In the case of many terms, the terms are required to have identical
outer indices. Else an IndexConformanceException is raised.
>>> get_indices(x[i] + A[i, j]*y[j])
({i}, {})
:Exceptions:
An IndexConformanceException means that the terms ar not compatible, e.g.
>>> get_indices(x[i] + y[j]) #doctest: +SKIP
(...)
IndexConformanceException: Indices are not consistent: x(i) + y(j)
.. warning::
The concept of *outer* indices applies recursively, starting on the deepest
level. This implies that dummies inside parenthesis are assumed to be
summed first, so that the following expression is handled gracefully:
>>> get_indices((x[i] + A[i, j]*y[j])*x[j])
({i, j}, {})
This is correct and may appear convenient, but you need to be careful
with this as SymPy will happily .expand() the product, if requested. The
resulting expression would mix the outer ``j`` with the dummies inside
the parenthesis, which makes it a different expression. To be on the
safe side, it is best to avoid such ambiguities by using unique indices
for all contractions that should be held separate.
"""
# We call ourself recursively to determine indices of sub expressions.
# break recursion
if isinstance(expr, Indexed):
c = expr.indices
inds, dummies = _remove_repeated(c)
return inds, {}
elif expr is None:
return set(), {}
elif isinstance(expr, Idx):
return {expr}, {}
elif expr.is_Atom:
return set(), {}
# recurse via specialized functions
else:
if expr.is_Mul:
return _get_indices_Mul(expr)
elif expr.is_Add:
return _get_indices_Add(expr)
elif expr.is_Pow or isinstance(expr, exp):
return _get_indices_Pow(expr)
elif isinstance(expr, Piecewise):
# FIXME: No support for Piecewise yet
return set(), {}
elif isinstance(expr, Function):
# Support ufunc like behaviour by returning indices from arguments.
# Functions do not interpret repeated indices across arguments
# as summation
ind0 = set()
for arg in expr.args:
ind, sym = get_indices(arg)
ind0 |= ind
return ind0, sym
# this test is expensive, so it should be at the end
elif not expr.has(Indexed):
return set(), {}
raise NotImplementedError(
f"FIXME: No specialized handling of type {type(expr)}")
def get_contraction_structure(expr):
"""Determine dummy indices of ``expr`` and describe its structure
By *dummy* we mean indices that are summation indices.
The structure of the expression is determined and described as follows:
1) A conforming summation of Indexed objects is described with a dict where
the keys are summation indices and the corresponding values are sets
containing all terms for which the summation applies. All Add objects
in the SymPy expression tree are described like this.
2) For all nodes in the SymPy expression tree that are *not* of type Add, the
following applies:
If a node discovers contractions in one of its arguments, the node
itself will be stored as a key in the dict. For that key, the
corresponding value is a list of dicts, each of which is the result of a
recursive call to get_contraction_structure(). The list contains only
dicts for the non-trivial deeper contractions, omitting dicts with None
as the one and only key.
.. Note:: The presence of expressions among the dictionary keys indicates
multiple levels of index contractions. A nested dict displays nested
contractions and may itself contain dicts from a deeper level. In
practical calculations the summation in the deepest nested level must be
calculated first so that the outer expression can access the resulting
indexed object.
Examples
========
>>> from sympy.tensor.index_methods import get_contraction_structure
>>> from sympy import default_sort_key
>>> from sympy.tensor import IndexedBase, Idx
>>> x, y, A = map(IndexedBase, ['x', 'y', 'A'])
>>> i, j, k, l = map(Idx, ['i', 'j', 'k', 'l'])
>>> get_contraction_structure(x[i]*y[i] + A[j, j])
{(i,): {x[i]*y[i]}, (j,): {A[j, j]}}
>>> get_contraction_structure(x[i]*y[j])
{None: {x[i]*y[j]}}
A multiplication of contracted factors results in nested dicts representing
the internal contractions.
>>> d = get_contraction_structure(x[i, i]*y[j, j])
>>> sorted(d.keys(), key=default_sort_key)
[None, x[i, i]*y[j, j]]
In this case, the product has no contractions:
>>> d[None]
{x[i, i]*y[j, j]}
Factors are contracted "first":
>>> sorted(d[x[i, i]*y[j, j]], key=default_sort_key)
[{(i,): {x[i, i]}}, {(j,): {y[j, j]}}]
A parenthesized Add object is also returned as a nested dictionary. The
term containing the parenthesis is a Mul with a contraction among the
arguments, so it will be found as a key in the result. It stores the
dictionary resulting from a recursive call on the Add expression.
>>> d = get_contraction_structure(x[i]*(y[i] + A[i, j]*x[j]))
>>> sorted(d.keys(), key=default_sort_key)
[(A[i, j]*x[j] + y[i])*x[i], (i,)]
>>> d[(i,)]
{(A[i, j]*x[j] + y[i])*x[i]}
>>> d[x[i]*(A[i, j]*x[j] + y[i])]
[{None: {y[i]}, (j,): {A[i, j]*x[j]}}]
Powers with contractions in either base or exponent will also be found as
keys in the dictionary, mapping to a list of results from recursive calls:
>>> d = get_contraction_structure(A[j, j]**A[i, i])
>>> d[None]
{A[j, j]**A[i, i]}
>>> nested_contractions = d[A[j, j]**A[i, i]]
>>> nested_contractions[0]
{(j,): {A[j, j]}}
>>> nested_contractions[1]
{(i,): {A[i, i]}}
The description of the contraction structure may appear complicated when
represented with a string in the above examples, but it is easy to iterate
over:
>>> from sympy import Expr
>>> for key in d:
... if isinstance(key, Expr):
... continue
... for term in d[key]:
... if term in d:
... # treat deepest contraction first
... pass
... # treat outermost contactions here
"""
# We call ourself recursively to inspect sub expressions.
if isinstance(expr, Indexed):
junk, key = _remove_repeated(expr.indices)
return {key or None: {expr}}
elif expr.is_Atom:
return {None: {expr}}
elif expr.is_Mul:
junk, junk, key = _get_indices_Mul(expr, return_dummies=True)
result = {key or None: {expr}}
# recurse on every factor
nested = []
for fac in expr.args:
facd = get_contraction_structure(fac)
if not (None in facd and len(facd) == 1):
nested.append(facd)
if nested:
result[expr] = nested
return result
elif expr.is_Pow or isinstance(expr, exp):
# recurse in base and exp separately. If either has internal
# contractions we must include ourselves as a key in the returned dict
b, e = expr.as_base_exp()
dbase = get_contraction_structure(b)
dexp = get_contraction_structure(e)
dicts = []
for d in dbase, dexp:
if not (None in d and len(d) == 1):
dicts.append(d)
result = {None: {expr}}
if dicts:
result[expr] = dicts
return result
elif expr.is_Add:
# Note: we just collect all terms with identical summation indices, We
# do nothing to identify equivalent terms here, as this would require
# substitutions or pattern matching in expressions of unknown
# complexity.
result = {}
for term in expr.args:
# recurse on every term
d = get_contraction_structure(term)
for key in d:
if key in result:
result[key] |= d[key]
else:
result[key] = d[key]
return result
elif isinstance(expr, Piecewise):
# FIXME: No support for Piecewise yet
return {None: expr}
elif isinstance(expr, Function):
# Collect non-trivial contraction structures in each argument
# We do not report repeated indices in separate arguments as a
# contraction
deeplist = []
for arg in expr.args:
deep = get_contraction_structure(arg)
if not (None in deep and len(deep) == 1):
deeplist.append(deep)
d = {None: {expr}}
if deeplist:
d[expr] = deeplist
return d
# this test is expensive, so it should be at the end
elif not expr.has(Indexed):
return {None: {expr}}
raise NotImplementedError(
f"FIXME: No specialized handling of type {type(expr)}")
| IndexConformanceException |
python | Pylons__pyramid | tests/test_config/__init__.py | {
"start": 92,
"end": 547
} | class ____(Interface):
pass
def dummy_tween_factory(handler, registry): # pragma: no cover
pass
def dummy_tween_factory2(handler, registry): # pragma: no cover
pass
def dummy_include(config):
config.registry.included = True
config.action('discrim', None, config.package)
def dummy_include2(config):
config.registry.also_included = True
config.action('discrim', None, config.package)
includeme = dummy_include
| IFactory |
python | walkccc__LeetCode | solutions/957. Prison Cells After N Days/957.py | {
"start": 0,
"end": 461
} | class ____:
def prisonAfterNDays(self, cells: list[int], n: int) -> list[int]:
nextDayCells = [0] * len(cells)
day = 0
while n > 0:
n -= 1
for i in range(1, len(cells) - 1):
nextDayCells[i] = 1 if cells[i - 1] == cells[i + 1] else 0
if day == 0:
firstDayCells = nextDayCells.copy()
elif nextDayCells == firstDayCells:
n %= day
cells = nextDayCells.copy()
day += 1
return cells
| Solution |
python | tensorflow__tensorflow | tensorflow/python/tpu/tests/tpu_embedding_v1_correctness_test.py | {
"start": 1153,
"end": 4702
} | class ____(tpu_embedding_base_test.TPUEmbeddingBaseTest
):
def _get_strategy(self):
if hasattr(self, 'strategy'):
return self.strategy
return super(TPUEmbeddingV0CorrectnessTest, self)._get_strategy()
def _create_mid_level(self, optimizer=None):
# Create `TPUEmbedding` object.
if optimizer is None:
optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
return tpu_embedding_v1.TPUEmbeddingV0(
feature_config=self.feature_config, optimizer=optimizer)
def _create_strategy_and_mid_level(self, optimizer_name):
strategy = self._get_strategy()
with strategy.scope():
if optimizer_name == 'sgd':
embedding_optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
elif optimizer_name == 'adagrad':
embedding_optimizer = tpu_embedding_v2_utils.Adagrad(
learning_rate=0.1)
elif optimizer_name == 'adam':
embedding_optimizer = tpu_embedding_v2_utils.Adam(
learning_rate=0.1)
elif optimizer_name == 'ftrl':
embedding_optimizer = tpu_embedding_v2_utils.FTRL(
learning_rate=0.1)
else:
raise ValueError('optimizer is not recognized: ', optimizer_name)
mid_level_api = self._create_mid_level(optimizer=embedding_optimizer)
return strategy, mid_level_api
@parameterized.parameters(True, False)
def test_enqueue_with_weights(self, ragged):
strategy, mid_level_api = self._create_strategy_and_mid_level('sgd')
weight = 0.5
if ragged:
dataset = self._create_ragged_dataset(
strategy, include_weights=True, weight=weight)
else:
dataset = self._create_sparse_dataset(
strategy, include_weights=True, weight=weight)
dataset_iter = iter(
strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
@def_function.function
def embedding_lookup(features, weights):
def step(features, weights):
return mid_level_api(features, weights)
return strategy.run(step, args=(features, weights))
features, weights = next(dataset_iter)
# Replace the weight for the second feature by None to test.
weights = (weights[0], None, weights[2])
no_weights_activations = embedding_lookup(features, weights=None)
weights_activations = embedding_lookup(features, weights=weights)
no_weights0 = (self._unpack(strategy, no_weights_activations[0]),
self._unpack(strategy, no_weights_activations[1]),
self._unpack(strategy, no_weights_activations[2]))
weights0 = (self._unpack(strategy, weights_activations[0]),
self._unpack(strategy, weights_activations[1]),
self._unpack(strategy, weights_activations[2]))
# videos table has sum combiner and users table has mean combiner.
# i.e. users table lookups isn't affected by the weights as all the weights
# are the same.
# Tuple entry 0 and 1 are the watched and favorited features from the videos
# table and entry 2 is the friends feature from the users table.
# Note that None was passed as a weight for entry 1 so weight should have no
# effect.
weight = (0.5, 1.0, 1.0)
golden = tuple([no_weight * w for no_weight, w in zip(no_weights0, weight)])
self.assertAllClose(golden, weights0)
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
| TPUEmbeddingV0CorrectnessTest |
python | pypa__warehouse | tests/common/db/packaging.py | {
"start": 3809,
"end": 3983
} | class ____(WarehouseFactory):
class Meta:
model = File.Event
source = factory.SubFactory(FileFactory)
additional = {"publisher_url": None}
| FileEventFactory |
python | django__django | django/db/models/functions/datetime.py | {
"start": 4378,
"end": 4492
} | class ____(Extract):
"""Return the ISO-8601 week-numbering year."""
lookup_name = "iso_year"
| ExtractIsoYear |
python | tensorflow__tensorflow | tensorflow/python/framework/weak_tensor.py | {
"start": 6360,
"end": 7455
} | class ____(core.Value, WeakTensor):
"""A weakly typed Eager Tensor."""
__name__ = "tf.EagerWeakTensor"
# Methods that are only available for EagerTensor.
def numpy(self):
"""Copy of the contents of this EagerWeakTensor into a NumPy array or scalar."""
if not isinstance(self.tensor, ops.EagerTensor):
raise ValueError("WeakTensor.numpy() is only supported in eager mode.")
return self.tensor.numpy()
def __complex__(self):
return self.tensor.__complex__()
def __int__(self):
return self.tensor.__int__()
def __float__(self):
return self.tensor.__float__()
def __index__(self):
return self.tensor.__index__()
def __format__(self, format_spec):
return f"{self.tensor.__format__(format_spec)} weakly typed"
def __array__(self, dtype=None):
# We need to explicitly call np.array() because
# self_tensor.__array__() for scalars raise:
# ValueError: object __array__ method not producing an array
# resource_variable_ops also follows the same pattern.
return np.array(self.tensor.__array__(dtype))
| EagerWeakTensor |
python | mitmproxy__pdoc | test/testdata/misc.py | {
"start": 546,
"end": 1064
} | class ____:
@Descriptor
def size(self):
"""This is the size"""
# Testing function and object default values
def default_func():
pass
default_obj = object()
var_with_default_obj = default_obj
"""this shouldn't render the object address"""
var_with_default_func = default_func
"""this just renders like a normal function"""
def func_with_defaults(a=default_obj, b=default_func):
"""this shouldn't render object or function addresses"""
pass
# Testing classmethod links in code
| Issue226 |
python | numba__numba | numba/core/typeinfer.py | {
"start": 36307,
"end": 70908
} | class ____(object):
"""
Operates on block that shares the same ir.Scope.
"""
def __init__(self, context, func_ir, warnings):
self.context = context
# sort based on label, ensure iteration order!
self.blocks = OrderedDict()
for k in sorted(func_ir.blocks.keys()):
self.blocks[k] = func_ir.blocks[k]
self.generator_info = func_ir.generator_info
self.func_id = func_ir.func_id
self.func_ir = func_ir
self.typevars = TypeVarMap()
self.typevars.set_context(context)
self.constraints = ConstraintNetwork()
self.warnings = warnings
# { index: mangled name }
self.arg_names = {}
# self.return_type = None
# Set of assumed immutable globals
self.assumed_immutables = set()
# Track all calls and associated constraints
self.calls = []
# The inference result of the above calls
self.calltypes = utils.UniqueDict()
# Target var -> constraint with refine hook
self.refine_map = {}
if config.DEBUG or config.DEBUG_TYPEINFER:
self.debug = TypeInferDebug(self)
else:
self.debug = NullDebug()
self._skip_recursion = False
def copy(self, skip_recursion=False):
clone = TypeInferer(self.context, self.func_ir, self.warnings)
clone.arg_names = self.arg_names.copy()
clone._skip_recursion = skip_recursion
for k, v in self.typevars.items():
if not v.locked and v.defined:
clone.typevars[k].add_type(v.getone(), loc=v.define_loc)
return clone
def _mangle_arg_name(self, name):
# Disambiguise argument name
return "arg.%s" % (name,)
def _get_return_vars(self):
rets = []
for blk in self.blocks.values():
inst = blk.terminator
if isinstance(inst, ir.Return):
rets.append(inst.value)
return rets
def get_argument_types(self):
return [self.typevars[k].getone() for k in self.arg_names.values()]
def seed_argument(self, name, index, typ):
name = self._mangle_arg_name(name)
self.seed_type(name, typ)
self.arg_names[index] = name
def seed_type(self, name, typ):
"""All arguments should be seeded.
"""
self.lock_type(name, typ, loc=None)
def seed_return(self, typ):
"""Seeding of return value is optional.
"""
for var in self._get_return_vars():
self.lock_type(var.name, typ, loc=None)
def build_constraint(self):
for blk in self.blocks.values():
for inst in blk.body:
self.constrain_statement(inst)
def return_types_from_partial(self):
"""
Resume type inference partially to deduce the return type.
Note: No side-effect to `self`.
Returns the inferred return type or None if it cannot deduce the return
type.
"""
# Clone the typeinferer and disable typing recursive calls
cloned = self.copy(skip_recursion=True)
# rebuild constraint network
cloned.build_constraint()
# propagate without raising
cloned.propagate(raise_errors=False)
# get return types
rettypes = set()
for retvar in cloned._get_return_vars():
if retvar.name in cloned.typevars:
typevar = cloned.typevars[retvar.name]
if typevar and typevar.defined:
rettypes.add(types.unliteral(typevar.getone()))
if not rettypes:
return
# unify return types
return cloned._unify_return_types(rettypes)
def propagate(self, raise_errors=True):
newtoken = self.get_state_token()
oldtoken = None
# Since the number of types are finite, the typesets will eventually
# stop growing.
while newtoken != oldtoken:
self.debug.propagate_started()
oldtoken = newtoken
# Errors can appear when the type set is incomplete; only
# raise them when there is no progress anymore.
errors = self.constraints.propagate(self)
newtoken = self.get_state_token()
self.debug.propagate_finished()
if errors:
if raise_errors:
force_lit_args = [e for e in errors
if isinstance(e, ForceLiteralArg)]
if not force_lit_args:
raise errors[0]
else:
raise reduce(operator.or_, force_lit_args)
else:
return errors
def add_type(self, var, tp, loc, unless_locked=False):
assert isinstance(var, str), type(var)
tv = self.typevars[var]
if unless_locked and tv.locked:
return
oldty = tv.type
unified = tv.add_type(tp, loc=loc)
if unified != oldty:
self.propagate_refined_type(var, unified)
def add_calltype(self, inst, signature):
assert signature is not None
self.calltypes[inst] = signature
def copy_type(self, src_var, dest_var, loc):
self.typevars[dest_var].union(self.typevars[src_var], loc=loc)
def lock_type(self, var, tp, loc, literal_value=NOTSET):
tv = self.typevars[var]
tv.lock(tp, loc=loc, literal_value=literal_value)
def propagate_refined_type(self, updated_var, updated_type):
source_constraint = self.refine_map.get(updated_var)
if source_constraint is not None:
source_constraint.refine(self, updated_type)
def unify(self, raise_errors=True):
"""
Run the final unification pass over all inferred types, and
catch imprecise types.
"""
typdict = utils.UniqueDict()
def find_offender(name, exhaustive=False):
# finds the offending variable definition by name
# if exhaustive is set it will try and trace through temporary
# variables to find a concrete offending definition.
offender = None
for block in self.func_ir.blocks.values():
offender = block.find_variable_assignment(name)
if offender is not None:
if not exhaustive:
break
try: # simple assignment
hasattr(offender.value, 'name')
offender_value = offender.value.name
except (AttributeError, KeyError):
break
orig_offender = offender
if offender_value.startswith('$'):
offender = find_offender(offender_value,
exhaustive=exhaustive)
if offender is None:
offender = orig_offender
break
return offender
def diagnose_imprecision(offender):
# helper for diagnosing imprecise types
list_msg = """\n
For Numba to be able to compile a list, the list must have a known and
precise type that can be inferred from the other variables. Whilst sometimes
the type of empty lists can be inferred, this is not always the case, see this
documentation for help:
https://numba.readthedocs.io/en/stable/user/troubleshoot.html#my-code-has-an-untyped-list-problem
"""
if offender is not None:
# This block deals with imprecise lists
if hasattr(offender, 'value'):
if hasattr(offender.value, 'op'):
# might be `foo = []`
if offender.value.op == 'build_list':
return list_msg
# or might be `foo = list()`
elif offender.value.op == 'call':
try: # assignment involving a call
call_name = offender.value.func.name
# find the offender based on the call name
offender = find_offender(call_name)
if isinstance(offender.value, ir.Global):
if offender.value.name == 'list':
return list_msg
except (AttributeError, KeyError):
pass
return "" # no help possible
def check_var(name):
tv = self.typevars[name]
if not tv.defined:
if raise_errors:
offender = find_offender(name)
val = getattr(offender, 'value', 'unknown operation')
loc = getattr(offender, 'loc', ir.unknown_loc)
msg = ("Type of variable '%s' cannot be determined, "
"operation: %s, location: %s")
raise TypingError(msg % (var, val, loc), loc)
else:
typdict[var] = types.unknown
return
tp = tv.getone()
if isinstance(tp, types.UndefinedFunctionType):
tp = tp.get_precise()
if not tp.is_precise():
offender = find_offender(name, exhaustive=True)
msg = ("Cannot infer the type of variable '%s'%s, "
"have imprecise type: %s. %s")
istmp = " (temporary variable)" if var.startswith('$') else ""
loc = getattr(offender, 'loc', ir.unknown_loc)
# is this an untyped list? try and provide help
extra_msg = diagnose_imprecision(offender)
if raise_errors:
raise TypingError(msg % (var, istmp, tp, extra_msg), loc)
else:
typdict[var] = types.unknown
return
else: # type is precise, hold it
typdict[var] = tp
# For better error display, check first user-visible vars, then
# temporaries
temps = set(k for k in self.typevars if not k[0].isalpha())
others = set(self.typevars) - temps
for var in sorted(others):
check_var(var)
for var in sorted(temps):
check_var(var)
try:
retty = self.get_return_type(typdict)
except Exception as e:
# partial type inference may raise e.g. attribute error if a
# constraint has no computable signature, ignore this as needed
if raise_errors:
raise e
else:
retty = None
else:
typdict = utils.UniqueDict(
typdict, **{v.name: retty for v in self._get_return_vars()})
try:
fntys = self.get_function_types(typdict)
except Exception as e:
# partial type inference may raise e.g. attribute error if a
# constraint has no computable signature, ignore this as needed
if raise_errors:
raise e
else:
fntys = None
if self.generator_info:
retty = self.get_generator_type(typdict, retty,
raise_errors=raise_errors)
def check_undef_var_in_calls():
# Check for undefined variables in the call arguments.
for callnode, calltype in self.calltypes.items():
if calltype is not None:
for i, v in enumerate(calltype.args, start=1):
if v is types._undef_var:
m = f"undefined variable used in call argument #{i}"
raise TypingError(m, loc=callnode.loc)
check_undef_var_in_calls()
self.debug.unify_finished(typdict, retty, fntys)
return typdict, retty, fntys
def get_generator_type(self, typdict, retty, raise_errors=True):
gi = self.generator_info
arg_types = [None] * len(self.arg_names)
for index, name in self.arg_names.items():
arg_types[index] = typdict[name]
state_types = None
try:
state_types = [typdict[var_name] for var_name in gi.state_vars]
except KeyError:
msg = "Cannot type generator: state variable types cannot be found"
if raise_errors:
raise TypingError(msg)
state_types = [types.unknown for _ in gi.state_vars]
yield_types = None
try:
yield_types = [typdict[y.inst.value.name]
for y in gi.get_yield_points()]
except KeyError:
msg = "Cannot type generator: yield type cannot be found"
if raise_errors:
raise TypingError(msg)
if not yield_types:
msg = "Cannot type generator: it does not yield any value"
if raise_errors:
raise TypingError(msg)
yield_types = [types.unknown for _ in gi.get_yield_points()]
if not yield_types or all(yield_types) == types.unknown:
# unknown yield, probably partial type inference, escape
return types.Generator(self.func_id.func, types.unknown, arg_types,
state_types, has_finalizer=True)
yield_type = self.context.unify_types(*yield_types)
if yield_type is None or isinstance(yield_type, types.Optional):
msg = "Cannot type generator: cannot unify yielded types %s"
yp_highlights = []
for y in gi.get_yield_points():
msg = (_termcolor.errmsg("Yield of: IR '%s', type '%s', "
"location: %s"))
yp_highlights.append(msg % (str(y.inst),
typdict[y.inst.value.name],
y.inst.loc.strformat()))
explain_ty = set()
for ty in yield_types:
if isinstance(ty, types.Optional):
explain_ty.add(ty.type)
explain_ty.add(types.NoneType('none'))
else:
explain_ty.add(ty)
if raise_errors:
raise TypingError("Can't unify yield type from the "
"following types: %s"
% ", ".join(sorted(map(str, explain_ty))) +
"\n\n" + "\n".join(yp_highlights))
return types.Generator(self.func_id.func, yield_type, arg_types,
state_types, has_finalizer=True)
def get_function_types(self, typemap):
"""
Fill and return the calltypes map.
"""
# XXX why can't this be done on the fly?
calltypes = self.calltypes
for call, constraint in self.calls:
calltypes[call] = constraint.get_call_signature()
return calltypes
def _unify_return_types(self, rettypes):
if rettypes:
unified = self.context.unify_types(*rettypes)
if isinstance(unified, types.FunctionType):
# unified is allowed to be UndefinedFunctionType
# instance (that is imprecise).
return unified
if unified is None or not unified.is_precise():
def check_type(atype):
lst = []
for k, v in self.typevars.items():
if atype == v.type:
lst.append(k)
returns = {}
for x in reversed(lst):
for block in self.func_ir.blocks.values():
for instr in block.find_insts(ir.Return):
value = instr.value
if isinstance(value, ir.Var):
name = value.name
else:
pass
if x == name:
returns[x] = instr
break
interped = ""
for name, offender in returns.items():
loc = getattr(offender, 'loc', ir.unknown_loc)
msg = ("Return of: IR name '%s', type '%s', "
"location: %s")
interped = msg % (name, atype, loc.strformat())
return interped
problem_str = []
for xtype in rettypes:
problem_str.append(_termcolor.errmsg(check_type(xtype)))
raise TypingError("Can't unify return type from the "
"following types: %s"
% ", ".join(sorted(map(str, rettypes))) +
"\n" + "\n".join(problem_str))
return unified
else:
# Function without a successful return path
return types.none
def get_return_type(self, typemap):
rettypes = set()
for var in self._get_return_vars():
rettypes.add(typemap[var.name])
retty = self._unify_return_types(rettypes)
# Check return value is not undefined
if retty is types._undef_var:
raise TypingError("return value is undefined")
return retty
def get_state_token(self):
"""The algorithm is monotonic. It can only grow or "refine" the
typevar map.
"""
return [tv.type for name, tv in sorted(self.typevars.items())]
def constrain_statement(self, inst):
if isinstance(inst, ir.Assign):
self.typeof_assign(inst)
elif isinstance(inst, ir.SetItem):
self.typeof_setitem(inst)
elif isinstance(inst, ir.StaticSetItem):
self.typeof_static_setitem(inst)
elif isinstance(inst, ir.DelItem):
self.typeof_delitem(inst)
elif isinstance(inst, ir.SetAttr):
self.typeof_setattr(inst)
elif isinstance(inst, ir.Print):
self.typeof_print(inst)
elif isinstance(inst, ir.StoreMap):
self.typeof_storemap(inst)
elif isinstance(inst, (ir.Jump, ir.Branch, ir.Return, ir.Del)):
pass
elif isinstance(inst, (ir.DynamicRaise, ir.DynamicTryRaise)):
pass
elif isinstance(inst, (ir.StaticRaise, ir.StaticTryRaise)):
pass
elif isinstance(inst, ir.PopBlock):
pass # It's a marker statement
elif type(inst) in typeinfer_extensions:
# let external calls handle stmt if type matches
f = typeinfer_extensions[type(inst)]
f(inst, self)
else:
msg = "Unsupported constraint encountered: %s" % inst
raise UnsupportedError(msg, loc=inst.loc)
def typeof_setitem(self, inst):
constraint = SetItemConstraint(target=inst.target, index=inst.index,
value=inst.value, loc=inst.loc)
self.constraints.append(constraint)
self.calls.append((inst, constraint))
def typeof_storemap(self, inst):
constraint = SetItemConstraint(target=inst.dct, index=inst.key,
value=inst.value, loc=inst.loc)
self.constraints.append(constraint)
self.calls.append((inst, constraint))
def typeof_static_setitem(self, inst):
constraint = StaticSetItemConstraint(target=inst.target,
index=inst.index,
index_var=inst.index_var,
value=inst.value, loc=inst.loc)
self.constraints.append(constraint)
self.calls.append((inst, constraint))
def typeof_delitem(self, inst):
constraint = DelItemConstraint(target=inst.target, index=inst.index,
loc=inst.loc)
self.constraints.append(constraint)
self.calls.append((inst, constraint))
def typeof_setattr(self, inst):
constraint = SetAttrConstraint(target=inst.target, attr=inst.attr,
value=inst.value, loc=inst.loc)
self.constraints.append(constraint)
self.calls.append((inst, constraint))
def typeof_print(self, inst):
constraint = PrintConstraint(args=inst.args, vararg=inst.vararg,
loc=inst.loc)
self.constraints.append(constraint)
self.calls.append((inst, constraint))
def typeof_assign(self, inst):
value = inst.value
if isinstance(value, ir.Const):
self.typeof_const(inst, inst.target, value.value)
elif isinstance(value, ir.Var):
self.constraints.append(Propagate(dst=inst.target.name,
src=value.name, loc=inst.loc))
elif isinstance(value, (ir.Global, ir.FreeVar)):
self.typeof_global(inst, inst.target, value)
elif isinstance(value, ir.Arg):
self.typeof_arg(inst, inst.target, value)
elif isinstance(value, ir.Expr):
self.typeof_expr(inst, inst.target, value)
elif isinstance(value, ir.Yield):
self.typeof_yield(inst, inst.target, value)
else:
msg = ("Unsupported assignment encountered: %s %s" %
(type(value), str(value)))
raise UnsupportedError(msg, loc=inst.loc)
def resolve_value_type(self, inst, val):
"""
Resolve the type of a simple Python value, such as can be
represented by literals.
"""
try:
return self.context.resolve_value_type(val)
except ValueError as e:
msg = str(e)
raise TypingError(msg, loc=inst.loc)
def typeof_arg(self, inst, target, arg):
src_name = self._mangle_arg_name(arg.name)
self.constraints.append(ArgConstraint(dst=target.name,
src=src_name,
loc=inst.loc))
def typeof_const(self, inst, target, const):
ty = self.resolve_value_type(inst, const)
if inst.value.use_literal_type:
lit = types.maybe_literal(value=const)
else:
lit = None
self.add_type(target.name, lit or ty, loc=inst.loc)
def typeof_yield(self, inst, target, yield_):
# Sending values into generators isn't supported.
self.add_type(target.name, types.none, loc=inst.loc)
def sentry_modified_builtin(self, inst, gvar):
"""
Ensure that builtins are not modified.
"""
if gvar.name == 'range' and gvar.value is not range:
bad = True
elif gvar.name == 'slice' and gvar.value is not slice:
bad = True
elif gvar.name == 'len' and gvar.value is not len:
bad = True
else:
bad = False
if bad:
raise TypingError("Modified builtin '%s'" % gvar.name,
loc=inst.loc)
def resolve_call(self, fnty, pos_args, kw_args):
"""
Resolve a call to a given function type. A signature is returned.
"""
if isinstance(fnty, types.FunctionType):
return fnty.get_call_type(self, pos_args, kw_args)
if isinstance(fnty, types.RecursiveCall) and not self._skip_recursion:
# Recursive call
disp = fnty.dispatcher_type.dispatcher
pysig, args = disp.fold_argument_types(pos_args, kw_args)
frame = self.context.callstack.match(disp.py_func, args)
# If the signature is not being compiled
if frame is None:
sig = self.context.resolve_function_type(fnty.dispatcher_type,
pos_args, kw_args)
fndesc = disp.overloads[args].fndesc
qual = qualifying_prefix(fndesc.modname, fndesc.qualname)
fnty.add_overloads(args, qual, fndesc.uid)
return sig
fnid = frame.func_id
qual = qualifying_prefix(fnid.modname, fnid.func_qualname)
fnty.add_overloads(args, qual, fnid.unique_id)
# Resume propagation in parent frame
return_type = frame.typeinfer.return_types_from_partial()
# No known return type
if return_type is None:
raise TypingError("cannot type infer runaway recursion")
sig = typing.signature(return_type, *args)
sig = sig.replace(pysig=pysig)
# Keep track of unique return_type
frame.add_return_type(return_type)
return sig
else:
# Normal non-recursive call
return self.context.resolve_function_type(fnty, pos_args, kw_args)
def typeof_global(self, inst, target, gvar):
try:
typ = self.resolve_value_type(inst, gvar.value)
except TypingError as e:
if (gvar.name == self.func_id.func_name
and gvar.name in _temporary_dispatcher_map):
# Self-recursion case where the dispatcher is not (yet?) known
# as a global variable
typ = types.Dispatcher(_temporary_dispatcher_map[gvar.name])
else:
from numba.misc import special
nm = gvar.name
# check if the problem is actually a name error
func_glbls = self.func_id.func.__globals__
if (nm not in func_glbls.keys() and
nm not in special.__all__ and
nm not in __builtins__.keys() and
nm not in self.func_id.code.co_freevars):
errstr = "NameError: name '%s' is not defined"
msg = _termcolor.errmsg(errstr % nm)
e.patch_message(msg)
raise
else:
msg = _termcolor.errmsg("Untyped global name '%s':" % nm)
msg += " %s" # interps the actual error
# if the untyped global is a numba internal function then add
# to the error message asking if it's been imported.
if nm in special.__all__:
tmp = ("\n'%s' looks like a Numba internal function, has "
"it been imported (i.e. 'from numba import %s')?\n" %
(nm, nm))
msg += _termcolor.errmsg(tmp)
e.patch_message(msg % e)
raise
if isinstance(typ, types.Dispatcher) and typ.dispatcher.is_compiling:
# Recursive call
callstack = self.context.callstack
callframe = callstack.findfirst(typ.dispatcher.py_func)
if callframe is not None:
typ = types.RecursiveCall(typ)
else:
raise NotImplementedError(
"call to %s: unsupported recursion"
% typ.dispatcher)
if isinstance(typ, types.Array):
# Global array in nopython mode is constant
typ = typ.copy(readonly=True)
if isinstance(typ, types.BaseAnonymousTuple):
# if it's a tuple of literal types, swap the type for the more
# specific literal version
literaled = [types.maybe_literal(x) for x in gvar.value]
if all(literaled):
typ = types.Tuple(literaled)
# if any of the items in the tuple are arrays, they need to be
# typed as readonly, mutating an array in a global container
# is not supported (should be compile time constant etc).
def mark_array_ro(tup):
newtup = []
for item in tup.types:
if isinstance(item, types.Array):
item = item.copy(readonly=True)
elif isinstance(item, types.BaseAnonymousTuple):
item = mark_array_ro(item)
newtup.append(item)
return types.BaseTuple.from_types(newtup)
typ = mark_array_ro(typ)
self.sentry_modified_builtin(inst, gvar)
# Setting literal_value for globals because they are handled
# like const value in numba
lit = types.maybe_literal(gvar.value)
# The user may have provided the type for this variable already.
# In this case, call add_type() to make sure the value type is
# consistent. See numba.tests.test_array_reductions
# TestArrayReductions.test_array_cumsum for examples.
# Variable type locked by using the locals dict.
tv = self.typevars[target.name]
if tv.locked:
tv.add_type(lit or typ, loc=inst.loc)
else:
self.lock_type(target.name, lit or typ, loc=inst.loc)
self.assumed_immutables.add(inst)
def typeof_expr(self, inst, target, expr):
if expr.op == 'call':
self.typeof_call(inst, target, expr)
elif expr.op in ('getiter', 'iternext'):
self.typeof_intrinsic_call(inst, target, expr.op, expr.value)
elif expr.op == 'exhaust_iter':
constraint = ExhaustIterConstraint(target.name, count=expr.count,
iterator=expr.value,
loc=expr.loc)
self.constraints.append(constraint)
elif expr.op == 'pair_first':
constraint = PairFirstConstraint(target.name, pair=expr.value,
loc=expr.loc)
self.constraints.append(constraint)
elif expr.op == 'pair_second':
constraint = PairSecondConstraint(target.name, pair=expr.value,
loc=expr.loc)
self.constraints.append(constraint)
elif expr.op == 'binop':
self.typeof_intrinsic_call(inst, target, expr.fn, expr.lhs,
expr.rhs)
elif expr.op == 'inplace_binop':
self.typeof_intrinsic_call(inst, target, expr.fn,
expr.lhs, expr.rhs)
elif expr.op == 'unary':
self.typeof_intrinsic_call(inst, target, expr.fn, expr.value)
elif expr.op == 'static_getitem':
constraint = StaticGetItemConstraint(target.name, value=expr.value,
index=expr.index,
index_var=expr.index_var,
loc=expr.loc)
self.constraints.append(constraint)
self.calls.append((inst.value, constraint))
elif expr.op == 'getitem':
self.typeof_intrinsic_call(inst, target, operator.getitem,
expr.value, expr.index,)
elif expr.op == 'typed_getitem':
constraint = TypedGetItemConstraint(target.name, value=expr.value,
dtype=expr.dtype,
index=expr.index,
loc=expr.loc)
self.constraints.append(constraint)
self.calls.append((inst.value, constraint))
elif expr.op == 'getattr':
constraint = GetAttrConstraint(target.name, attr=expr.attr,
value=expr.value, loc=inst.loc,
inst=inst)
self.constraints.append(constraint)
elif expr.op == 'build_tuple':
constraint = BuildTupleConstraint(target.name, items=expr.items,
loc=inst.loc)
self.constraints.append(constraint)
elif expr.op == 'build_list':
constraint = BuildListConstraint(target.name, items=expr.items,
loc=inst.loc)
self.constraints.append(constraint)
elif expr.op == 'build_set':
constraint = BuildSetConstraint(target.name, items=expr.items,
loc=inst.loc)
self.constraints.append(constraint)
elif expr.op == 'build_map':
constraint = BuildMapConstraint(
target.name,
items=expr.items,
special_value=expr.literal_value,
value_indexes=expr.value_indexes,
loc=inst.loc)
self.constraints.append(constraint)
elif expr.op == 'cast':
self.constraints.append(Propagate(dst=target.name,
src=expr.value.name,
loc=inst.loc))
elif expr.op == 'phi':
for iv in expr.incoming_values:
if iv is not ir.UNDEFINED:
self.constraints.append(Propagate(dst=target.name,
src=iv.name,
loc=inst.loc))
elif expr.op == 'make_function':
self.lock_type(target.name, types.MakeFunctionLiteral(expr),
loc=inst.loc, literal_value=expr)
elif expr.op == 'undef':
self.add_type(target.name, types._undef_var, loc=inst.loc)
else:
msg = "Unsupported op-code encountered: %s" % expr
raise UnsupportedError(msg, loc=inst.loc)
def typeof_call(self, inst, target, call):
constraint = CallConstraint(target.name, call.func.name, call.args,
call.kws, call.vararg, loc=inst.loc)
self.constraints.append(constraint)
self.calls.append((inst.value, constraint))
def typeof_intrinsic_call(self, inst, target, func, *args):
constraint = IntrinsicCallConstraint(target.name, func, args,
kws=(), vararg=None, loc=inst.loc)
self.constraints.append(constraint)
self.calls.append((inst.value, constraint))
| TypeInferer |
python | pytorch__pytorch | torch/_dynamo/exc.py | {
"start": 3503,
"end": 4148
} | class ____(TorchDynamoException):
def __init__(
self, *args: Any, first_useful_frame: Optional[types.FrameType], **kwargs: Any
) -> None:
super().__init__(*args, **kwargs)
self.first_useful_frame = first_useful_frame
def remove_dynamo_frames(self) -> typing.Self:
tb = self.__traceback__
if self.first_useful_frame is None or tb is None or config.verbose:
return self
while tb.tb_frame is not self.first_useful_frame:
tb = tb.tb_next
assert tb is not None, "internal error, please report a bug"
return self.with_traceback(tb)
| ShortenTraceback |
python | kamyu104__LeetCode-Solutions | Python/add-two-polynomials-represented-as-linked-lists.py | {
"start": 33,
"end": 109
} | class ____:
def __init__(self, x=0, y=0, next=None):
pass
| PolyNode |
python | streamlit__streamlit | lib/tests/streamlit/elements/layouts_test.py | {
"start": 23636,
"end": 29303
} | class ____(DeltaGeneratorTestCase):
def test_label_required(self):
"""Test that label is required"""
with pytest.raises(TypeError):
st.popover()
def test_just_label(self):
"""Test that it correctly applies label param."""
popover = st.popover("label")
with popover:
# Noop
pass
popover_block = self.get_delta_from_queue()
assert popover_block.add_block.popover.label == "label"
assert not popover_block.add_block.popover.disabled
assert popover_block.add_block.popover.help == ""
assert popover_block.add_block.allow_empty
# Default width should be "content"
assert popover_block.add_block.width_config.use_content
def test_use_container_width_true(self):
"""Test use_container_width=True is mapped to width='stretch'."""
test_widths = [200, "content", "stretch", None]
for width in test_widths:
with self.subTest(width=width):
if width is None:
st.popover("label", use_container_width=True)
else:
st.popover("label", use_container_width=True, width=width)
popover_block = self.get_delta_from_queue()
assert (
popover_block.add_block.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert popover_block.add_block.width_config.use_stretch is True
def test_use_container_width_false(self):
"""Test use_container_width=False is mapped to width='content'."""
test_widths = [200, "stretch", "content", None]
for width in test_widths:
with self.subTest(width=width):
if width is None:
st.popover("label", use_container_width=False)
else:
st.popover("label", use_container_width=False, width=width)
popover_block = self.get_delta_from_queue()
assert (
popover_block.add_block.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_CONTENT.value
)
assert popover_block.add_block.width_config.use_content is True
def test_disabled(self):
"""Test that it correctly applies disabled param."""
popover = st.popover("label", disabled=True)
with popover:
# Noop
pass
popover_block = self.get_delta_from_queue()
assert popover_block.add_block.popover.label == "label"
assert popover_block.add_block.popover.disabled
def test_help(self):
"""Test that it correctly applies help param."""
popover = st.popover("label", help="help text")
with popover:
# Noop
pass
popover_block = self.get_delta_from_queue()
assert popover_block.add_block.popover.label == "label"
assert popover_block.add_block.popover.help == "help text"
def test_valid_emoji_icon(self):
"""Test that it can be called with an emoji icon"""
popover = st.popover("label", icon="🦄")
with popover:
# Noop
pass
popover_block = self.get_delta_from_queue()
assert popover_block.add_block.popover.label == "label"
assert popover_block.add_block.popover.icon == "🦄"
def test_valid_material_icon(self):
"""Test that it can be called with a material icon"""
popover = st.popover("label", icon=":material/download:")
with popover:
# Noop
pass
popover_block = self.get_delta_from_queue()
assert popover_block.add_block.popover.label == "label"
assert popover_block.add_block.popover.icon == ":material/download:"
def test_invalid_emoji_icon(self):
"""Test that it throws an error on invalid emoji icon"""
with pytest.raises(StreamlitAPIException) as e:
st.popover("label", icon="invalid")
assert (
str(e.value)
== 'The value "invalid" is not a valid emoji. Shortcodes are not allowed, '
"please use a single character instead."
)
def test_invalid_material_icon(self):
"""Test that it throws an error on invalid material icon"""
icon = ":material/invalid:"
with pytest.raises(StreamlitAPIException) as e:
st.popover("label", icon=icon)
assert "is not a valid Material icon" in str(e.value)
def test_width_pixel_value(self):
"""Test that pixel width configuration works correctly"""
st.popover("label", width=200)
popover_block = self.get_delta_from_queue()
assert popover_block.add_block.width_config.pixel_width == 200
def test_width_stretch(self):
"""Test that stretch width configuration works correctly"""
st.popover("label", width="stretch")
popover_block = self.get_delta_from_queue()
assert popover_block.add_block.width_config.use_stretch
def test_width_content(self):
"""Test that content width configuration works correctly"""
st.popover("label", width="content")
popover_block = self.get_delta_from_queue()
assert popover_block.add_block.width_config.use_content
@parameterized.expand(["invalid", -100, 0])
def test_invalid_width(self, invalid_width):
"""Test that invalid width values raise an error"""
with pytest.raises(StreamlitAPIException):
st.popover("label", width=invalid_width)
| PopoverContainerTest |
python | jazzband__django-oauth-toolkit | tests/test_authorization_code.py | {
"start": 71967,
"end": 75242
} | class ____(BaseAuthorizationCodeTokenView):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.application.algorithm = Application.RS256_ALGORITHM
cls.application.save()
def test_id_token_public(self):
"""
Request an access token using client_type: public
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
authorization_code = self.get_auth(scope="openid")
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
"client_id": self.application.client_id,
"scope": "openid",
}
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content["token_type"], "Bearer")
self.assertEqual(content["scope"], "openid")
self.assertIn("access_token", content)
self.assertIn("id_token", content)
self.assertEqual(content["expires_in"], self.oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
def test_id_token_code_exchange_succeed_when_redirect_uri_match_with_multiple_query_params(
self,
):
"""
Tests code exchange succeed when redirect uri matches the one used for code request
"""
self.client.login(username="test_user", password="123456")
self.application.redirect_uris = "http://localhost http://example.com?foo=bar"
self.application.save()
# retrieve a valid authorization code
authcode_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "openid",
"redirect_uri": "http://example.com?bar=baz&foo=bar",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=authcode_data)
query_dict = parse_qs(urlparse(response["Location"]).query)
authorization_code = query_dict["code"].pop()
# exchange authorization code for a valid access token
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.com?bar=baz&foo=bar",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content["token_type"], "Bearer")
self.assertEqual(content["scope"], "openid")
self.assertIn("access_token", content)
self.assertIn("id_token", content)
self.assertEqual(content["expires_in"], self.oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
@pytest.mark.oauth2_settings(presets.OIDC_SETTINGS_RW)
| TestOIDCAuthorizationCodeTokenView |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/tests/test_tests/test_common.py | {
"start": 644,
"end": 13066
} | class ____:
@staticmethod
def get_dummy_cat_container(dagger_client: dagger.Client, exit_code: int, secret_file_paths: List, stdout: str, stderr: str):
secret_file_paths = secret_file_paths or []
container = (
dagger_client.container()
.from_("bash:latest")
.with_exec(["mkdir", "-p", common.AcceptanceTests.CONTAINER_TEST_INPUT_DIRECTORY], use_entrypoint=True)
.with_exec(["mkdir", "-p", common.AcceptanceTests.CONTAINER_SECRETS_DIRECTORY], use_entrypoint=True)
)
for secret_file_path in secret_file_paths:
secret_dir_name = str(pathlib.Path(secret_file_path).parent)
container = container.with_exec(["mkdir", "-p", secret_dir_name], use_entrypoint=True)
container = container.with_exec(["sh", "-c", f"echo foo > {secret_file_path}"], use_entrypoint=True)
return container.with_new_file("/stupid_bash_script.sh", contents=f"echo {stdout}; echo {stderr} >&2; exit {exit_code}")
@pytest.fixture
def test_context_ci(self, current_platform, dagger_client):
secret_store = InMemorySecretStore()
secret_store.add_secret("SECRET_SOURCE-FAKER_CREDS", "bar")
context = ConnectorContext(
pipeline_name="test",
connector=ConnectorWithModifiedFiles("source-faker", frozenset()),
git_branch="test",
git_revision="test",
diffed_branch="test",
git_repo_url="test",
report_output_prefix="test",
is_local=False,
targeted_platforms=[current_platform],
secret_stores={"airbyte-connector-testing-secret-store": secret_store},
)
context.dagger_client = dagger_client
return context
@pytest.fixture
def dummy_connector_under_test_container(self, dagger_client) -> dagger.Container:
return dagger_client.container().from_("airbyte/source-faker:latest")
@pytest.fixture
def another_dummy_connector_under_test_container(self, dagger_client) -> dagger.File:
return dagger_client.container().from_("airbyte/source-pokeapi:latest")
async def test_skipped_when_no_acceptance_test_config(self, mocker, test_context_ci):
test_context_ci.connector = mocker.MagicMock(acceptance_test_config=None)
acceptance_test_step = common.AcceptanceTests(test_context_ci, secrets=[])
step_result = await acceptance_test_step._run(None)
assert step_result.status == StepStatus.SKIPPED
@pytest.mark.parametrize(
"exit_code,expected_status,secrets_file_names,expect_updated_secrets",
[
(0, StepStatus.SUCCESS, [], False),
(1, StepStatus.FAILURE, [], False),
(2, StepStatus.FAILURE, [], False),
(common.AcceptanceTests.skipped_exit_code, StepStatus.SKIPPED, [], False),
(0, StepStatus.SUCCESS, [f"{common.AcceptanceTests.CONTAINER_SECRETS_DIRECTORY}/config.json"], False),
(1, StepStatus.FAILURE, [f"{common.AcceptanceTests.CONTAINER_SECRETS_DIRECTORY}/config.json"], False),
(2, StepStatus.FAILURE, [f"{common.AcceptanceTests.CONTAINER_SECRETS_DIRECTORY}/config.json"], False),
(
common.AcceptanceTests.skipped_exit_code,
StepStatus.SKIPPED,
[f"{common.AcceptanceTests.CONTAINER_SECRETS_DIRECTORY}/config.json"],
False,
),
(
0,
StepStatus.SUCCESS,
[
f"{common.AcceptanceTests.CONTAINER_SECRETS_DIRECTORY}/config.json",
f"{common.AcceptanceTests.CONTAINER_SECRETS_DIRECTORY}/updated_configurations/updated_config.json",
],
True,
),
(
1,
StepStatus.FAILURE,
[
f"{common.AcceptanceTests.CONTAINER_SECRETS_DIRECTORY}/config.json",
f"{common.AcceptanceTests.CONTAINER_SECRETS_DIRECTORY}/updated_configurations/updated_config.json",
],
True,
),
(
2,
StepStatus.FAILURE,
[
f"{common.AcceptanceTests.CONTAINER_SECRETS_DIRECTORY}/config.json",
f"{common.AcceptanceTests.CONTAINER_SECRETS_DIRECTORY}/updated_configurations/updated_config.json",
],
True,
),
(
common.AcceptanceTests.skipped_exit_code,
StepStatus.SKIPPED,
[
f"{common.AcceptanceTests.CONTAINER_SECRETS_DIRECTORY}/config.json",
f"{common.AcceptanceTests.CONTAINER_SECRETS_DIRECTORY}/updated_configurations/updated_config.json",
],
True,
),
],
)
async def test__run(
self,
test_context_ci,
mocker,
exit_code: int,
expected_status: StepStatus,
secrets_file_names: List,
expect_updated_secrets: bool,
test_input_dir: dagger.Directory,
):
"""Test the behavior of the run function using a dummy container."""
cat_container = self.get_dummy_cat_container(
test_context_ci.dagger_client, exit_code, secrets_file_names, stdout="hello", stderr="world"
)
async_mock = mocker.AsyncMock(return_value=cat_container)
mocker.patch.object(common.AcceptanceTests, "_build_connector_acceptance_test", side_effect=async_mock)
mocker.patch.object(common.AcceptanceTests, "get_cat_command", return_value=["bash", "/stupid_bash_script.sh"])
test_context_ci.get_connector_dir = mocker.AsyncMock(return_value=test_input_dir)
acceptance_test_step = common.AcceptanceTests(test_context_ci, secrets=[])
step_result = await acceptance_test_step._run(None)
assert step_result.status == expected_status
assert step_result.stdout.strip() == "hello"
assert step_result.stderr.strip() == "world"
if expect_updated_secrets:
assert (
await test_context_ci.updated_secrets_dir.entries()
== await cat_container.directory(f"{common.AcceptanceTests.CONTAINER_SECRETS_DIRECTORY}").entries()
)
assert any("updated_configurations" in str(file_name) for file_name in await test_context_ci.updated_secrets_dir.entries())
@pytest.fixture
def test_input_dir(self, dagger_client, tmpdir):
with open(tmpdir / "acceptance-test-config.yml", "w") as f:
yaml.safe_dump({"connector_image": "airbyte/connector_under_test_image:dev"}, f)
return dagger_client.host().directory(str(tmpdir))
def get_patched_acceptance_test_step(self, mocker, test_context_ci, test_input_dir):
in_memory_secret_store = InMemorySecretStore()
in_memory_secret_store.add_secret("config.json", "connector_secret")
secrets = [Secret("config.json", in_memory_secret_store, file_name="config.json")]
test_context_ci.get_connector_dir = mocker.AsyncMock(return_value=test_input_dir)
test_context_ci.connector_acceptance_test_image = "bash:latest"
mocker.patch.object(docker, "load_image_to_docker_host", return_value="image_sha")
mocker.patch.object(docker, "with_bound_docker_host", lambda _, cat_container: cat_container)
return common.AcceptanceTests(test_context_ci, secrets=secrets)
async def test_cat_container_provisioning(
self, dagger_client, mocker, test_context_ci, test_input_dir, dummy_connector_under_test_container
):
"""Check that the acceptance test container is correctly provisioned.
We check that:
- the test input and secrets are correctly mounted.
- the cache buster and image sha are correctly set as environment variables.
- that the entrypoint is correctly set.
- the current working directory is correctly set.
"""
# The mounted_connector_secrets behaves differently when the test is run locally or in CI.
# It is not masking the secrets when run locally.
# We want to confirm that the secrets are correctly masked when run in CI.
acceptance_test_step = self.get_patched_acceptance_test_step(mocker, test_context_ci, test_input_dir)
cat_container = await acceptance_test_step._build_connector_acceptance_test(dummy_connector_under_test_container, test_input_dir)
assert (
await cat_container.with_exec(["pwd"], use_entrypoint=True).stdout()
).strip() == acceptance_test_step.CONTAINER_TEST_INPUT_DIRECTORY
test_input_ls_result = await cat_container.with_exec(["ls"], use_entrypoint=True).stdout()
assert all(
file_or_directory in test_input_ls_result.splitlines() for file_or_directory in ["secrets", "acceptance-test-config.yml"]
)
assert (
await cat_container.with_exec(
["cat", f"{acceptance_test_step.CONTAINER_SECRETS_DIRECTORY}/config.json"], use_entrypoint=True
).stdout()
== "***"
)
env_vars = {await env_var.name(): await env_var.value() for env_var in await cat_container.env_variables()}
assert "CACHEBUSTER" in env_vars
@pytest.mark.flaky
# This test has shown some flakiness in CI
# This should be investigated and fixed
# https://github.com/airbytehq/airbyte-internal-issues/issues/6304
async def test_cat_container_caching(
self,
dagger_client,
mocker,
test_context_ci,
test_input_dir,
dummy_connector_under_test_container,
another_dummy_connector_under_test_container,
):
"""Check that the acceptance test container caching behavior is correct."""
initial_datetime = datetime.datetime(year=1992, month=6, day=19, hour=13, minute=1, second=0)
with freeze_time(initial_datetime) as frozen_datetime:
acceptance_test_step = self.get_patched_acceptance_test_step(mocker, test_context_ci, test_input_dir)
first_cat_container = await acceptance_test_step._build_connector_acceptance_test(
dummy_connector_under_test_container, test_input_dir
)
fist_date_result = await first_cat_container.with_exec(["date"], use_entrypoint=True).stdout()
frozen_datetime.tick(delta=datetime.timedelta(hours=5))
# Check that cache is used in the same day
second_cat_container = await acceptance_test_step._build_connector_acceptance_test(
dummy_connector_under_test_container, test_input_dir
)
second_date_result = await second_cat_container.with_exec(["date"], use_entrypoint=True).stdout()
assert fist_date_result == second_date_result
# Check that cache bursted after a day
frozen_datetime.tick(delta=datetime.timedelta(days=1, minutes=10))
third_cat_container = await acceptance_test_step._build_connector_acceptance_test(
dummy_connector_under_test_container, test_input_dir
)
third_date_result = await third_cat_container.with_exec(["date"], use_entrypoint=True).stdout()
assert third_date_result != second_date_result
time.sleep(1)
# Check that changing the container invalidates the cache
fourth_cat_container = await acceptance_test_step._build_connector_acceptance_test(
another_dummy_connector_under_test_container, test_input_dir
)
fourth_date_result = await fourth_cat_container.with_exec(["date"], use_entrypoint=True).stdout()
assert fourth_date_result != third_date_result
async def test_params(self, dagger_client, mocker, test_context_ci, test_input_dir):
acceptance_test_step = self.get_patched_acceptance_test_step(mocker, test_context_ci, test_input_dir)
assert set(acceptance_test_step.params_as_cli_options) == {"-ra", "--disable-warnings", "--durations=3"}
acceptance_test_step.extra_params = {"--durations": ["5"], "--collect-only": []}
assert set(acceptance_test_step.params_as_cli_options) == {"-ra", "--disable-warnings", "--durations=5", "--collect-only"}
| TestAcceptanceTests |
python | django__django | tests/test_exceptions/test_validation_error.py | {
"start": 96,
"end": 12403
} | class ____(unittest.TestCase):
def test_messages_concatenates_error_dict_values(self):
message_dict = {}
exception = ValidationError(message_dict)
self.assertEqual(sorted(exception.messages), [])
message_dict["field1"] = ["E1", "E2"]
exception = ValidationError(message_dict)
self.assertEqual(sorted(exception.messages), ["E1", "E2"])
message_dict["field2"] = ["E3", "E4"]
exception = ValidationError(message_dict)
self.assertEqual(sorted(exception.messages), ["E1", "E2", "E3", "E4"])
def test_eq(self):
error1 = ValidationError("message")
error2 = ValidationError("message", code="my_code1")
error3 = ValidationError("message", code="my_code2")
error4 = ValidationError(
"error %(parm1)s %(parm2)s",
code="my_code1",
params={"parm1": "val1", "parm2": "val2"},
)
error5 = ValidationError({"field1": "message", "field2": "other"})
error6 = ValidationError({"field1": "message"})
error7 = ValidationError(
[
ValidationError({"field1": "field error", "field2": "other"}),
"message",
]
)
self.assertEqual(error1, ValidationError("message"))
self.assertNotEqual(error1, ValidationError("message2"))
self.assertNotEqual(error1, error2)
self.assertNotEqual(error1, error4)
self.assertNotEqual(error1, error5)
self.assertNotEqual(error1, error6)
self.assertNotEqual(error1, error7)
self.assertEqual(error1, mock.ANY)
self.assertEqual(error2, ValidationError("message", code="my_code1"))
self.assertNotEqual(error2, ValidationError("other", code="my_code1"))
self.assertNotEqual(error2, error3)
self.assertNotEqual(error2, error4)
self.assertNotEqual(error2, error5)
self.assertNotEqual(error2, error6)
self.assertNotEqual(error2, error7)
self.assertEqual(
error4,
ValidationError(
"error %(parm1)s %(parm2)s",
code="my_code1",
params={"parm1": "val1", "parm2": "val2"},
),
)
self.assertNotEqual(
error4,
ValidationError(
"error %(parm1)s %(parm2)s",
code="my_code2",
params={"parm1": "val1", "parm2": "val2"},
),
)
self.assertNotEqual(
error4,
ValidationError(
"error %(parm1)s %(parm2)s",
code="my_code1",
params={"parm2": "val2"},
),
)
self.assertNotEqual(
error4,
ValidationError(
"error %(parm1)s %(parm2)s",
code="my_code1",
params={"parm2": "val1", "parm1": "val2"},
),
)
self.assertNotEqual(
error4,
ValidationError(
"error val1 val2",
code="my_code1",
),
)
# params ordering is ignored.
self.assertEqual(
error4,
ValidationError(
"error %(parm1)s %(parm2)s",
code="my_code1",
params={"parm2": "val2", "parm1": "val1"},
),
)
self.assertEqual(
error5,
ValidationError({"field1": "message", "field2": "other"}),
)
self.assertNotEqual(
error5,
ValidationError({"field1": "message", "field2": "other2"}),
)
self.assertNotEqual(
error5,
ValidationError({"field1": "message", "field3": "other"}),
)
self.assertNotEqual(error5, error6)
# fields ordering is ignored.
self.assertEqual(
error5,
ValidationError({"field2": "other", "field1": "message"}),
)
self.assertNotEqual(error7, ValidationError(error7.error_list[1:]))
self.assertNotEqual(
ValidationError(["message"]),
ValidationError([ValidationError("message", code="my_code")]),
)
# messages ordering is ignored.
self.assertEqual(
error7,
ValidationError(list(reversed(error7.error_list))),
)
self.assertNotEqual(error4, ValidationError([error4]))
self.assertNotEqual(ValidationError([error4]), error4)
self.assertNotEqual(error4, ValidationError({"field1": error4}))
self.assertNotEqual(ValidationError({"field1": error4}), error4)
def test_eq_nested(self):
error_dict = {
"field1": ValidationError(
"error %(parm1)s %(parm2)s",
code="my_code",
params={"parm1": "val1", "parm2": "val2"},
),
"field2": "other",
}
error = ValidationError(error_dict)
self.assertEqual(error, ValidationError(dict(error_dict)))
self.assertEqual(
error,
ValidationError(
{
"field1": ValidationError(
"error %(parm1)s %(parm2)s",
code="my_code",
params={"parm2": "val2", "parm1": "val1"},
),
"field2": "other",
}
),
)
self.assertNotEqual(
error,
ValidationError(
{**error_dict, "field2": "message"},
),
)
self.assertNotEqual(
error,
ValidationError(
{
"field1": ValidationError(
"error %(parm1)s val2",
code="my_code",
params={"parm1": "val1"},
),
"field2": "other",
}
),
)
def test_hash(self):
error1 = ValidationError("message")
error2 = ValidationError("message", code="my_code1")
error3 = ValidationError("message", code="my_code2")
error4 = ValidationError(
"error %(parm1)s %(parm2)s",
code="my_code1",
params={"parm1": "val1", "parm2": "val2"},
)
error5 = ValidationError({"field1": "message", "field2": "other"})
error6 = ValidationError({"field1": "message"})
error7 = ValidationError(
[
ValidationError({"field1": "field error", "field2": "other"}),
"message",
]
)
self.assertEqual(hash(error1), hash(ValidationError("message")))
self.assertNotEqual(hash(error1), hash(ValidationError("message2")))
self.assertNotEqual(hash(error1), hash(error2))
self.assertNotEqual(hash(error1), hash(error4))
self.assertNotEqual(hash(error1), hash(error5))
self.assertNotEqual(hash(error1), hash(error6))
self.assertNotEqual(hash(error1), hash(error7))
self.assertEqual(
hash(error2),
hash(ValidationError("message", code="my_code1")),
)
self.assertNotEqual(
hash(error2),
hash(ValidationError("other", code="my_code1")),
)
self.assertNotEqual(hash(error2), hash(error3))
self.assertNotEqual(hash(error2), hash(error4))
self.assertNotEqual(hash(error2), hash(error5))
self.assertNotEqual(hash(error2), hash(error6))
self.assertNotEqual(hash(error2), hash(error7))
self.assertEqual(
hash(error4),
hash(
ValidationError(
"error %(parm1)s %(parm2)s",
code="my_code1",
params={"parm1": "val1", "parm2": "val2"},
)
),
)
self.assertNotEqual(
hash(error4),
hash(
ValidationError(
"error %(parm1)s %(parm2)s",
code="my_code2",
params={"parm1": "val1", "parm2": "val2"},
)
),
)
self.assertNotEqual(
hash(error4),
hash(
ValidationError(
"error %(parm1)s %(parm2)s",
code="my_code1",
params={"parm2": "val2"},
)
),
)
self.assertNotEqual(
hash(error4),
hash(
ValidationError(
"error %(parm1)s %(parm2)s",
code="my_code1",
params={"parm2": "val1", "parm1": "val2"},
)
),
)
self.assertNotEqual(
hash(error4),
hash(
ValidationError(
"error val1 val2",
code="my_code1",
)
),
)
# params ordering is ignored.
self.assertEqual(
hash(error4),
hash(
ValidationError(
"error %(parm1)s %(parm2)s",
code="my_code1",
params={"parm2": "val2", "parm1": "val1"},
)
),
)
self.assertEqual(
hash(error5),
hash(ValidationError({"field1": "message", "field2": "other"})),
)
self.assertNotEqual(
hash(error5),
hash(ValidationError({"field1": "message", "field2": "other2"})),
)
self.assertNotEqual(
hash(error5),
hash(ValidationError({"field1": "message", "field3": "other"})),
)
self.assertNotEqual(error5, error6)
# fields ordering is ignored.
self.assertEqual(
hash(error5),
hash(ValidationError({"field2": "other", "field1": "message"})),
)
self.assertNotEqual(
hash(error7),
hash(ValidationError(error7.error_list[1:])),
)
self.assertNotEqual(
hash(ValidationError(["message"])),
hash(ValidationError([ValidationError("message", code="my_code")])),
)
# messages ordering is ignored.
self.assertEqual(
hash(error7),
hash(ValidationError(list(reversed(error7.error_list)))),
)
self.assertNotEqual(hash(error4), hash(ValidationError([error4])))
self.assertNotEqual(hash(ValidationError([error4])), hash(error4))
self.assertNotEqual(
hash(error4),
hash(ValidationError({"field1": error4})),
)
def test_hash_nested(self):
error_dict = {
"field1": ValidationError(
"error %(parm1)s %(parm2)s",
code="my_code",
params={"parm2": "val2", "parm1": "val1"},
),
"field2": "other",
}
error = ValidationError(error_dict)
self.assertEqual(hash(error), hash(ValidationError(dict(error_dict))))
self.assertEqual(
hash(error),
hash(
ValidationError(
{
"field1": ValidationError(
"error %(parm1)s %(parm2)s",
code="my_code",
params={"parm1": "val1", "parm2": "val2"},
),
"field2": "other",
}
)
),
)
self.assertNotEqual(
hash(error),
hash(
ValidationError(
{**error_dict, "field2": "message"},
)
),
)
self.assertNotEqual(
hash(error),
hash(
ValidationError(
{
"field1": ValidationError(
"error %(parm1)s val2",
code="my_code",
params={"parm1": "val1"},
),
"field2": "other",
}
)
),
)
| TestValidationError |
python | pandas-dev__pandas | pandas/tests/tslibs/test_timedeltas.py | {
"start": 3110,
"end": 4813
} | class ____:
def test_array_to_timedelta64_string_with_unit_2d_raises(self):
# check the 'unit is not None and errors != "coerce"' path
# in array_to_timedelta64 raises correctly with 2D values
values = np.array([["1", 2], [3, "4"]], dtype=object)
with pytest.raises(ValueError, match="unit must not be specified"):
array_to_timedelta64(values, unit="s")
def test_array_to_timedelta64_non_object_raises(self):
# check we raise, not segfault
values = np.arange(5)
msg = "'values' must have object dtype"
with pytest.raises(TypeError, match=msg):
array_to_timedelta64(values)
@pytest.mark.parametrize("unit", ["s", "ms", "us"])
def test_ints_to_pytimedelta(unit):
# tests for non-nanosecond cases
arr = np.arange(6, dtype=np.int64).view(f"m8[{unit}]")
res = ints_to_pytimedelta(arr, box=False)
# For non-nanosecond, .astype(object) gives pytimedelta objects
# instead of integers
expected = arr.astype(object)
tm.assert_numpy_array_equal(res, expected)
res = ints_to_pytimedelta(arr, box=True)
expected = np.array([Timedelta(x) for x in arr], dtype=object)
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize("unit", ["Y", "M", "ps", "fs", "as"])
def test_ints_to_pytimedelta_unsupported(unit):
arr = np.arange(6, dtype=np.int64).view(f"m8[{unit}]")
with pytest.raises(NotImplementedError, match=r"\d{1,2}"):
ints_to_pytimedelta(arr, box=False)
msg = "Only resolutions 's', 'ms', 'us', 'ns' are supported"
with pytest.raises(NotImplementedError, match=msg):
ints_to_pytimedelta(arr, box=True)
| TestArrayToTimedelta64 |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-google/llama_index/embeddings/google/gemini.py | {
"start": 591,
"end": 3665
} | class ____(BaseEmbedding):
"""
Google Gemini embeddings.
Args:
model_name (str): Model for embedding.
Defaults to "models/embedding-001".
api_key (Optional[str]): API key to access the model. Defaults to None.
"""
_model: Any = PrivateAttr()
title: Optional[str] = Field(
default="",
description="Title is only applicable for retrieval_document tasks, and is used to represent a document title. For other tasks, title is invalid.",
)
task_type: Optional[str] = Field(
default="retrieval_document",
description="The task for embedding model.",
)
def __init__(
self,
model_name: str = "models/embedding-001",
task_type: Optional[str] = "retrieval_document",
api_key: Optional[str] = None,
title: Optional[str] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
):
super().__init__(
model_name=model_name,
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
title=title,
task_type=task_type,
**kwargs,
)
gemini.configure(api_key=api_key)
self._model = gemini
@classmethod
def class_name(cls) -> str:
return "GeminiEmbedding"
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._model.embed_content(
model=self.model_name,
content=query,
title=self.title,
task_type=self.task_type,
)["embedding"]
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._model.embed_content(
model=self.model_name,
content=text,
title=self.title,
task_type=self.task_type,
)["embedding"]
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
return [
self._model.embed_content(
model=self.model_name,
content=text,
title=self.title,
task_type=self.task_type,
)["embedding"]
for text in texts
]
### Async methods ###
# need to wait async calls from Gemini side to be implemented.
# Issue: https://github.com/google/generative-ai-python/issues/125
async def _aget_query_embedding(self, query: str) -> List[float]:
"""The asynchronous version of _get_query_embedding."""
return self._get_query_embedding(query)
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get text embedding."""
return self._get_text_embedding(text)
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Asynchronously get text embeddings."""
return self._get_text_embeddings(texts)
| GeminiEmbedding |
python | tiangolo__fastapi | docs_src/header_param_models/tutorial003_py39.py | {
"start": 112,
"end": 415
} | class ____(BaseModel):
host: str
save_data: bool
if_modified_since: Union[str, None] = None
traceparent: Union[str, None] = None
x_tag: list[str] = []
@app.get("/items/")
async def read_items(headers: CommonHeaders = Header(convert_underscores=False)):
return headers
| CommonHeaders |
python | huggingface__transformers | src/transformers/models/kosmos2_5/modeling_kosmos2_5.py | {
"start": 30586,
"end": 35888
} | class ____(nn.Module):
"""This module produces sinusoidal positional embeddings of any length."""
# Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding.__init__
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
super().__init__()
self.offset = 2
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.make_weights(num_positions + self.offset, embedding_dim, padding_idx)
# Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding.make_weights
def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx)
if hasattr(self, "weights"):
# in forward put the weights on the correct dtype and device of the param
emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device)
self.register_buffer("weights", emb_weights, persistent=False)
@staticmethod
# Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding.get_embedding
def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
"""
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of
"Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb.to(torch.get_default_dtype())
@torch.no_grad()
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
past_key_values_length: int = 0,
position_ids: Optional[torch.Tensor] = None,
):
if input_ids is not None:
bsz, seq_len = input_ids.size()
if position_ids is None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = self.create_position_ids_from_input_ids(
input_ids, self.padding_idx, past_key_values_length
).to(input_ids.device)
else:
bsz, seq_len = inputs_embeds.size()[:-1]
if position_ids is None:
position_ids = self.create_position_ids_from_inputs_embeds(
inputs_embeds, past_key_values_length, self.padding_idx
)
# expand embeddings if needed
max_pos = self.padding_idx + 1 + seq_len + past_key_values_length
if max_pos > self.weights.size(0):
self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach()
@staticmethod
# Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding.create_position_ids_from_inputs_embeds
def create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length, padding_idx):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
padding_idx + 1, sequence_length + padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length
@staticmethod
# Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings.create_position_ids_from_input_ids
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
# Copied from transformers.models.kosmos2.modeling_kosmos2.Kosmos2TextFFN with Kosmos2->Kosmos2_5
| Kosmos2_5TextSinusoidalPositionalEmbedding |
python | crytic__slither | slither/detectors/statements/pyth_unchecked_confidence.py | {
"start": 143,
"end": 1637
} | class ____(PythUnchecked):
"""
Documentation: This detector finds when the confidence level of a Pyth price is not checked
"""
ARGUMENT = "pyth-unchecked-confidence"
HELP = "Detect when the confidence level of a Pyth price is not checked"
IMPACT = DetectorClassification.MEDIUM
CONFIDENCE = DetectorClassification.HIGH
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#pyth-unchecked-confidence-level"
WIKI_TITLE = "Pyth unchecked confidence level"
WIKI_DESCRIPTION = "Detect when the confidence level of a Pyth price is not checked"
WIKI_RECOMMENDATION = "Check the confidence level of a Pyth price. Visit https://docs.pyth.network/price-feeds/best-practices#confidence-intervals for more information."
WIKI_EXPLOIT_SCENARIO = """
```solidity
import "@pythnetwork/pyth-sdk-solidity/IPyth.sol";
import "@pythnetwork/pyth-sdk-solidity/PythStructs.sol";
contract C {
IPyth pyth;
constructor(IPyth _pyth) {
pyth = _pyth;
}
function bad(bytes32 id, uint256 age) public {
PythStructs.Price memory price = pyth.getEmaPriceNoOlderThan(id, age);
// Use price
}
}
```
The function `A` uses the price without checking its confidence level.
"""
PYTH_FUNCTIONS = [
"getEmaPrice",
"getEmaPriceNoOlderThan",
"getEmaPriceUnsafe",
"getPrice",
"getPriceNoOlderThan",
"getPriceUnsafe",
]
PYTH_FIELD = "conf"
| PythUncheckedConfidence |
python | nedbat__coveragepy | tests/helpers.py | {
"start": 5206,
"end": 11021
} | class ____:
"""Asserts the uniqueness of file names passed to a function."""
def __init__(self, wrapped: Callable[..., Any]) -> None:
self.filenames: set[str] = set()
self.wrapped = wrapped
@classmethod
def hook(cls, obj: Any, method_name: str) -> CheckUniqueFilenames:
"""Replace a method with our checking wrapper.
The method must take a string as a first argument. That argument
will be checked for uniqueness across all the calls to this method.
The values don't have to be file names actually, just strings, but
we only use it for filename arguments.
"""
method = getattr(obj, method_name)
hook = cls(method)
setattr(obj, method_name, hook.wrapper)
return hook
def wrapper(self, filename: str, *args: Any, **kwargs: Any) -> Any:
"""The replacement method. Check that we don't have dupes."""
assert filename not in self.filenames, (
f"File name {filename!r} passed to {self.wrapped!r} twice"
)
self.filenames.add(filename)
return self.wrapped(filename, *args, **kwargs)
def re_lines(pat: str, text: str, match: bool = True) -> list[str]:
"""Return a list of lines selected by `pat` in the string `text`.
If `match` is false, the selection is inverted: only the non-matching
lines are included.
Returns a list, the selected lines, without line endings.
"""
assert len(pat) < 200, "It's super-easy to swap the arguments to re_lines"
return [l for l in text.splitlines() if bool(re.search(pat, l)) == match]
def re_lines_text(pat: str, text: str, match: bool = True) -> str:
"""Return the multi-line text of lines selected by `pat`."""
return "".join(l + "\n" for l in re_lines(pat, text, match=match))
def re_line(pat: str, text: str) -> str:
"""Return the one line in `text` that matches regex `pat`.
Raises an AssertionError if more than one, or less than one, line matches.
"""
lines = re_lines(pat, text)
assert len(lines) == 1
return lines[0]
def remove_tree(dirname: str) -> None:
"""Remove a directory tree.
It's fine for the directory to not exist in the first place.
"""
if os.path.exists(dirname):
shutil.rmtree(dirname)
# Map chars to numbers for arcz_to_arcs
_arcz_map = {".": -1}
_arcz_map.update({c: ord(c) - ord("0") for c in "123456789"})
_arcz_map.update({c: 10 + ord(c) - ord("A") for c in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"})
def arcz_to_arcs(arcz: str) -> list[TArc]:
"""Convert a compact textual representation of arcs to a list of pairs.
The text has space-separated pairs of letters. Period is -1, 1-9 are
1-9, A-Z are 10 through 36. The resulting list is sorted regardless of
the order of the input pairs.
".1 12 2." --> [(-1,1), (1,2), (2,-1)]
Minus signs can be included in the pairs:
"-11, 12, 2-5" --> [(-1,1), (1,2), (2,-5)]
"""
# The `type: ignore[misc]` here are to suppress "Unpacking a string is
# disallowed".
a: str
b: str
arcs = []
for pair in arcz.split():
asgn = bsgn = 1
if len(pair) == 2:
a, b = pair # type: ignore[misc]
else:
assert len(pair) == 3
if pair[0] == "-":
_, a, b = pair # type: ignore[misc]
asgn = -1
else:
assert pair[1] == "-"
a, _, b = pair # type: ignore[misc]
bsgn = -1
arcs.append((asgn * _arcz_map[a], bsgn * _arcz_map[b]))
return sorted(arcs)
@contextlib.contextmanager
def change_dir(new_dir: str | Path) -> Iterator[None]:
"""Change directory, and then change back.
Use as a context manager, it will return to the original
directory at the end of the block.
"""
old_dir = os.getcwd()
os.chdir(str(new_dir))
try:
yield
finally:
os.chdir(old_dir)
T = TypeVar("T")
def assert_count_equal(
a: Iterable[T] | None,
b: Iterable[T] | None,
) -> None:
"""
A pytest-friendly implementation of assertCountEqual.
Assert that `a` and `b` have the same elements, but maybe in different order.
This only works for hashable elements.
"""
assert a is not None
assert b is not None
assert collections.Counter(list(a)) == collections.Counter(list(b))
def get_coverage_warnings(warns: Iterable[warnings.WarningMessage]) -> list[str]:
"""Extract the text of CoverageWarnings."""
warns = [w for w in warns if issubclass(w.category, CoverageWarning)]
texts = [cast(Warning, w.message).args[0] for w in warns]
return texts
def assert_coverage_warnings(
warns: Iterable[warnings.WarningMessage],
*msgs: str | re.Pattern[str],
) -> None:
"""
Assert that the CoverageWarning's in `warns` have `msgs` as messages.
Each msg can be a string compared for equality, or a compiled regex used to
search the text.
"""
actuals = get_coverage_warnings(warns)
assert msgs # don't call this without some messages.
assert len(msgs) == len(actuals)
for actual, expected in zip(actuals, msgs):
if hasattr(expected, "search"):
assert expected.search(actual), f"{actual!r} didn't match {expected!r}"
else:
actual = actual.partition("; see ")[0]
assert actual == expected
@contextlib.contextmanager
def swallow_warnings(
message: str = r".",
category: type[Warning] = CoverageWarning,
) -> Iterator[None]:
"""Swallow particular warnings.
It's OK if they happen, or if they don't happen. Just ignore them.
"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=category, message=message)
yield
| CheckUniqueFilenames |
python | django__django | django/contrib/auth/models.py | {
"start": 3355,
"end": 4596
} | class ____(models.Model):
"""
Groups are a generic way of categorizing users to apply permissions, or
some other label, to those users. A user can belong to any number of
groups.
A user in a group automatically has all the permissions granted to that
group. For example, if the group 'Site editors' has the permission
can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to
apply some label, or extended functionality, to them. For example, you
could create a group 'Special users', and you could write code that would
do special things to those users -- such as giving them access to a
members-only portion of your site, or sending them members-only email
messages.
"""
name = models.CharField(_("name"), max_length=150, unique=True)
permissions = models.ManyToManyField(
Permission,
verbose_name=_("permissions"),
blank=True,
)
objects = GroupManager()
class Meta:
verbose_name = _("group")
verbose_name_plural = _("groups")
def __str__(self):
return self.name
def natural_key(self):
return (self.name,)
| Group |
python | django-guardian__django-guardian | example_project/articles/migrations/0003_auto_20230727_0659.py | {
"start": 93,
"end": 916
} | class ____(migrations.Migration):
dependencies = [
("articles", "0002_custom_generic_permissions"),
]
operations = [
migrations.AlterField(
model_name="article",
name="id",
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID"),
),
migrations.AlterField(
model_name="articlegroupobjectpermission",
name="id",
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID"),
),
migrations.AlterField(
model_name="articleuserobjectpermission",
name="id",
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID"),
),
]
| Migration |
python | simonw__datasette | datasette/utils/testing.py | {
"start": 282,
"end": 1047
} | class ____:
def __init__(self, httpx_response):
self.httpx_response = httpx_response
@property
def status(self):
return self.httpx_response.status_code
# Supports both for test-writing convenience
@property
def status_code(self):
return self.status
@property
def headers(self):
return self.httpx_response.headers
@property
def body(self):
return self.httpx_response.content
@property
def content(self):
return self.body
@property
def cookies(self):
return dict(self.httpx_response.cookies)
@property
def json(self):
return json.loads(self.text)
@property
def text(self):
return self.body.decode("utf8")
| TestResponse |
python | scipy__scipy | scipy/interpolate/tests/test_bary_rational.py | {
"start": 12231,
"end": 16910
} | class ____:
def runge(self, z):
return 1/(1 + z**2)
def scale(self, n, d):
return (-1)**(np.arange(n) + d) * factorial(d)
def test_iv(self):
with pytest.raises(ValueError, match="`x`"):
FloaterHormannInterpolator([[0]], [0], d=0)
with pytest.raises(ValueError, match="`y`"):
FloaterHormannInterpolator([0], 0, d=0)
with pytest.raises(ValueError, match="`x` be of size 2 but got size 1."):
FloaterHormannInterpolator([0], [[1, 1], [1, 1]], d=0)
with pytest.raises(ValueError, match="finite"):
FloaterHormannInterpolator([np.inf], [1], d=0)
with pytest.raises(ValueError, match="`d`"):
FloaterHormannInterpolator([0], [0], d=-1)
with pytest.raises(ValueError, match="`d`"):
FloaterHormannInterpolator([0], [0], d=10)
with pytest.raises(TypeError):
FloaterHormannInterpolator([0], [0], d=0.0)
# reference values from Floater and Hormann 2007 page 8.
@pytest.mark.parametrize("d,expected", [
(0, [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
(1, [1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1]),
(2, [1, 3, 4, 4, 4, 4, 4, 4, 4, 3, 1]),
(3, [1, 4, 7, 8, 8, 8, 8, 8, 7, 4, 1]),
(4, [1, 5, 11, 15, 16, 16, 16, 15, 11, 5, 1])
])
def test_uniform_grid(self, d, expected):
# Check against explicit results on an uniform grid
x = np.arange(11)
r = FloaterHormannInterpolator(x, 0.0*x, d=d)
assert_allclose(r.weights.ravel()*self.scale(x.size, d), expected,
rtol=1e-15, atol=1e-15)
@pytest.mark.parametrize("d", range(10))
def test_runge(self, d):
x = np.linspace(0, 1, 51)
rng = np.random.default_rng(802754237598370893)
xx = rng.uniform(0, 1, size=1000)
y = self.runge(x)
h = x[1] - x[0]
r = FloaterHormannInterpolator(x, y, d=d)
tol = 10*h**(d+1)
assert_allclose(r(xx), self.runge(xx), atol=1e-10, rtol=tol)
# check interpolation property
assert_equal(r(x), self.runge(x))
def test_complex(self):
x = np.linspace(-1, 1)
z = x + x*1j
r = FloaterHormannInterpolator(z, np.sin(z), d=12)
xx = np.linspace(-1, 1, num=1000)
zz = xx + xx*1j
assert_allclose(r(zz), np.sin(zz), rtol=1e-12)
def test_polyinterp(self):
# check that when d=n-1 FH gives a polynomial interpolant
x = np.linspace(0, 1, 11)
xx = np.linspace(0, 1, 1001)
y = np.sin(x)
r = FloaterHormannInterpolator(x, y, d=x.size-1)
p = BarycentricInterpolator(x, y)
assert_allclose(r(xx), p(xx), rtol=1e-12, atol=1e-12)
@pytest.mark.parametrize("y_shape", [(2,), (2, 3, 1), (1, 5, 6, 4)])
@pytest.mark.parametrize("xx_shape", [(100), (10, 10)])
def test_trailing_dim(self, y_shape, xx_shape):
x = np.linspace(0, 1)
y = np.broadcast_to(
np.expand_dims(np.sin(x), tuple(range(1, len(y_shape) + 1))),
x.shape + y_shape
)
r = FloaterHormannInterpolator(x, y)
rng = np.random.default_rng(897138947238097528091759187597)
xx = rng.random(xx_shape)
yy = np.broadcast_to(
np.expand_dims(np.sin(xx), tuple(range(xx.ndim, len(y_shape) + xx.ndim))),
xx.shape + y_shape
)
rr = r(xx)
assert rr.shape == xx.shape + y_shape
assert_allclose(rr, yy, rtol=1e-6)
def test_zeros(self):
x = np.linspace(0, 10, num=100)
r = FloaterHormannInterpolator(x, np.sin(np.pi*x))
err = np.abs(np.subtract.outer(r.roots(), np.arange(11))).min(axis=0)
assert_array_less(err, 1e-5)
def test_no_poles(self):
x = np.linspace(-1, 1)
r = FloaterHormannInterpolator(x, 1/x**2)
p = r.poles()
mask = (p.real >= -1) & (p.real <= 1) & (np.abs(p.imag) < 1.e-12)
assert np.sum(mask) == 0
@pytest.mark.parametrize('eval_shape', [(), (1,), (3,)])
@pytest.mark.parametrize('axis', [-1, 0, 1])
def test_batch(self, eval_shape, axis):
rng = np.random.default_rng(4329872134985134)
n = 10
shape = (2, 3, 4, n)
domain = (0, 10)
x = np.linspace(*domain, n)
y = np.moveaxis(rng.random(shape), -1, axis)
res = FloaterHormannInterpolator(x, y, axis=axis)
ref = BatchFloaterHormann(x, y, axis=axis)
x = rng.uniform(*domain, size=eval_shape)
assert_allclose(res(x), ref(x))
pytest.raises(NotImplementedError, res.roots)
pytest.raises(NotImplementedError, res.residues)
| TestFloaterHormann |
python | h5py__h5py | benchmarks/benchmark_slicing.py | {
"start": 1100,
"end": 7090
} | class ____:
"""
Benchmark for reading slices in the most pathlogical way in a chunked dataset
Allows the test
"""
def __init__(self, ndim=3, size=1024, chunk=64, dtype="float32", precision=16, compression_kwargs=None):
"""
Defines some parameters for the benchmark, can be tuned later on.
:param ndim: work in 3D datasets
:param size: Volume size 1024**3 elements
:param chunk: size of one chunk, with itemsize = 32bits this makes block size of 1MB by default
:param dtype: the type of data to be stored
:param precision: to gain a bit in compression, number of trailing bits to be zeroed.
:param compression_kwargs: a dict with all options for configuring the compression
"""
self.ndim = ndim
self.size = size
self.dtype = numpy.dtype(dtype)
self.chunk = chunk
self.precision = precision
self.tmpdir = None
self.filename = None
self.h5path = "data"
self.total_size = self.size ** self.ndim * self.dtype.itemsize
self.needed_memory = self.size ** (self.ndim-1) * self.dtype.itemsize * self.chunk
if compression_kwargs is None:
self.compression = {}
else:
self.compression = dict(compression_kwargs)
def setup(self):
self.tmpdir = TemporaryDirectory()
self.filename = os.path.join(self.tmpdir.name, "benchmark_slicing.h5")
logger.info("Saving data in %s", self.filename)
logger.info("Total size: %i^%i volume size: %.3fGB, Needed memory: %.3fGB",
self.size, self.ndim, self.total_size/1e9, self.needed_memory/1e9)
shape = [self.size] * self.ndim
chunks = (self.chunk,) * self.ndim
if self.precision and self.dtype.char in "df":
if self.dtype.itemsize == 4:
mask = numpy.uint32(((1<<32) - (1<<(self.precision))))
elif self.dtype.itemsize == 8:
mask = numpy.uint64(((1<<64) - (1<<(self.precision))))
else:
logger.warning("Precision reduction: only float32 and float64 are supported")
else:
self.precision = 0
t0 = time.time()
with h5py.File(self.filename, 'w') as h:
ds = h.create_dataset(self.h5path,
shape,
chunks=chunks,
**self.compression)
for i in range(0, self.size, self.chunk):
x, y, z = numpy.ogrid[i:i+self.chunk, :self.size, :self.size]
data = (numpy.sin(x/3)*numpy.sin(y/5)*numpy.sin(z/7)).astype(self.dtype)
if self.precision:
idata = data.view(mask.dtype)
idata &= mask # mask out the last XX bits
ds[i:i+self.chunk] = data
t1 = time.time()
dt = t1 - t0
filesize = os.stat(self.filename).st_size
logger.info("Compression: %.3f time %.3fs uncompressed data saving speed %.3f MB/s effective write speed %.3f MB/s ",
self.total_size/filesize, dt, self.total_size/dt/1e6, filesize/dt/1e6)
def teardown(self):
self.tmpdir.cleanup()
self.filename = None
@staticmethod
def read_slice(dataset, position):
"""This reads all hyperplans crossing at the given position:
enforces many reads of different chunks,
Probably one of the most pathlogical use-case"""
assert dataset.ndim == len(position)
l = len(position)
res = []
noneslice = slice(None)
for i, w in enumerate(position):
where = [noneslice]*i + [w] + [noneslice]*(l - 1 - i)
res.append(dataset[tuple(where)])
return res
def time_sequential_reads(self, nb_read=64):
"Perform the reading of many orthogonal hyperplanes"
where = [[(i*(self.chunk+1+j))%self.size for j in range(self.ndim)] for i in range(nb_read)]
with h5py.File(self.filename, "r") as h:
ds = h[self.h5path]
t0 = time.time()
for i in where:
self.read_slice(ds, i)
t1 = time.time()
dt = t1 - t0
logger.info("Time for reading %sx%s slices: %.3fs fps: %.3f "%(self.ndim, nb_read, dt, self.ndim*nb_read/dt) +
"Uncompressed data read speed %.3f MB/s"%(self.ndim*nb_read*self.needed_memory/dt/1e6))
return dt
def time_threaded_reads(self, nb_read=64, nthreads=multiprocessing.cpu_count()):
"Perform the reading of many orthogonal hyperplanes, threaded version"
where = [[(i*(self.chunk+1+j))%self.size for j in range(self.ndim)] for i in range(nb_read)]
tasks = Queue()
results = Queue()
quitevent = Event()
pool = [Reader(tasks, results, quitevent) for i in range(nthreads)]
res = []
with h5py.File(self.filename, "r") as h:
ds = h[self.h5path]
t0 = time.time()
for i in where:
tasks.put((self.read_slice, ds, i))
for i in where:
a = results.get()
res.append(a[0])
results.task_done()
tasks.join()
results.join()
t1 = time.time()
# destroy the threads in the pool
quitevent.set()
for i in range(nthreads):
tasks.put(None)
dt = t1 - t0
logger.info("Time for %s-threaded reading %sx%s slices: %.3fs fps: %.3f "%(nthreads, self.ndim, nb_read, dt, self.ndim*nb_read/dt) +
"Uncompressed data read speed %.3f MB/s"%(self.ndim*nb_read*self.needed_memory/dt/1e6))
return dt
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
benchmark = SlicingBenchmark()
benchmark.setup()
benchmark.time_sequential_reads()
benchmark.time_threaded_reads()
benchmark.teardown()
| SlicingBenchmark |
python | pennersr__django-allauth | allauth/idp/oidc/forms.py | {
"start": 2627,
"end": 2717
} | class ____(forms.Form):
action = forms.CharField(required=False)
| DeviceAuthorizationForm |
python | cherrypy__cherrypy | cherrypy/test/test_virtualhost.py | {
"start": 125,
"end": 4201
} | class ____(helper.CPWebCase):
@staticmethod
def setup_server():
class Root:
@cherrypy.expose
def index(self):
return 'Hello, world'
@cherrypy.expose
def dom4(self):
return 'Under construction'
@cherrypy.expose
def method(self, value):
return 'You sent %s' % value
class VHost:
def __init__(self, sitename):
self.sitename = sitename
@cherrypy.expose
def index(self):
return 'Welcome to %s' % self.sitename
@cherrypy.expose
def vmethod(self, value):
return 'You sent %s' % value
@cherrypy.expose
def url(self):
return cherrypy.url('nextpage')
# Test static as a handler (section must NOT include vhost prefix)
static = cherrypy.tools.staticdir.handler(
section='/static',
dir=curdir,
)
root = Root()
root.mydom2 = VHost('Domain 2')
root.mydom3 = VHost('Domain 3')
hostmap = {
'www.mydom2.com': '/mydom2',
'www.mydom3.com': '/mydom3',
'www.mydom4.com': '/dom4',
}
cherrypy.tree.mount(
root,
config={
'/': {
'request.dispatch': cherrypy.dispatch.VirtualHost(
**hostmap,
),
},
# Test static in config (section must include vhost prefix)
'/mydom2/static2': {
'tools.staticdir.on': True,
'tools.staticdir.root': curdir,
'tools.staticdir.dir': 'static',
'tools.staticdir.index': 'index.html',
},
},
)
def testVirtualHost(self):
self.getPage('/', [('Host', 'www.mydom1.com')])
self.assertBody('Hello, world')
self.getPage('/mydom2/', [('Host', 'www.mydom1.com')])
self.assertBody('Welcome to Domain 2')
self.getPage('/', [('Host', 'www.mydom2.com')])
self.assertBody('Welcome to Domain 2')
self.getPage('/', [('Host', 'www.mydom3.com')])
self.assertBody('Welcome to Domain 3')
self.getPage('/', [('Host', 'www.mydom4.com')])
self.assertBody('Under construction')
# Test GET, POST, and positional params
self.getPage('/method?value=root')
self.assertBody('You sent root')
self.getPage('/vmethod?value=dom2+GET', [('Host', 'www.mydom2.com')])
self.assertBody('You sent dom2 GET')
self.getPage(
'/vmethod',
[('Host', 'www.mydom3.com')],
method='POST',
body='value=dom3+POST',
)
self.assertBody('You sent dom3 POST')
self.getPage('/vmethod/pos', [('Host', 'www.mydom3.com')])
self.assertBody('You sent pos')
# Test that cherrypy.url uses the browser url, not the virtual url
self.getPage('/url', [('Host', 'www.mydom2.com')])
self.assertBody('%s://www.mydom2.com/nextpage' % self.scheme)
def test_VHost_plus_Static(self):
# Test static as a handler
self.getPage('/static/style.css', [('Host', 'www.mydom2.com')])
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/css;charset=utf-8')
# Test static in config
self.getPage('/static2/dirback.jpg', [('Host', 'www.mydom2.com')])
self.assertStatus('200 OK')
self.assertHeaderIn('Content-Type', ['image/jpeg', 'image/pjpeg'])
# Test static config with "index" arg
self.getPage('/static2/', [('Host', 'www.mydom2.com')])
self.assertStatus('200 OK')
self.assertBody('Hello, world\r\n')
# Since tools.trailing_slash is on by default, this should redirect
self.getPage('/static2', [('Host', 'www.mydom2.com')])
self.assertStatus(301)
| VirtualHostTest |
python | jazzband__pip-tools | piptools/_compat/pip_compat.py | {
"start": 1358,
"end": 2661
} | class ____:
key: str
version: str
requires: Iterable[Requirement]
direct_url: DirectUrl | None
@classmethod
def from_pip_distribution(cls, dist: BaseDistribution) -> Distribution:
# TODO: Use only the BaseDistribution protocol properties and methods
# instead of specializing by type.
if isinstance(dist, _PkgResourcesDist):
return cls._from_pkg_resources(dist)
else:
return cls._from_importlib(dist)
@classmethod
def _from_pkg_resources(cls, dist: _PkgResourcesDist) -> Distribution:
return cls(
dist._dist.key, dist._dist.version, dist._dist.requires(), dist.direct_url
)
@classmethod
def _from_importlib(cls, dist: _ImportLibDist) -> Distribution:
"""Mimic pkg_resources.Distribution.requires for the case of no
extras.
This doesn't fulfill that API's ``extras`` parameter but
satisfies the needs of pip-tools.
"""
reqs = (Requirement.parse(req) for req in (dist._dist.requires or ()))
requires = [
req
for req in reqs
if not req.marker or req.marker.evaluate({"extra": None})
]
return cls(dist._dist.name, dist._dist.version, requires, dist.direct_url)
| Distribution |
python | optuna__optuna | optuna/samplers/_brute_force.py | {
"start": 4063,
"end": 11665
} | class ____(BaseSampler):
"""Sampler using brute force.
This sampler performs exhaustive search on the defined search space.
Example:
.. testcode::
import optuna
def objective(trial):
c = trial.suggest_categorical("c", ["float", "int"])
if c == "float":
return trial.suggest_float("x", 1, 3, step=0.5)
elif c == "int":
a = trial.suggest_int("a", 1, 3)
b = trial.suggest_int("b", a, 3)
return a + b
study = optuna.create_study(sampler=optuna.samplers.BruteForceSampler())
study.optimize(objective)
Note:
The defined search space must be finite. Therefore, when using
:class:`~optuna.distributions.FloatDistribution` or
:func:`~optuna.trial.Trial.suggest_float`, ``step=None`` is not allowed.
Note:
The sampler may fail to try the entire search space in when the suggestion ranges or
parameters are changed in the same :class:`~optuna.study.Study`.
Args:
seed:
A seed to fix the order of trials as the search order randomly shuffled. Please note
that it is not recommended using this option in distributed optimization settings since
this option cannot ensure the order of trials and may increase the number of duplicate
suggestions during distributed optimization.
avoid_premature_stop:
If :obj:`True`, the sampler performs a strict exhaustive search. Please note
that enabling this option may increase the likelihood of duplicate sampling.
When this option is not enabled (default), the sampler applies a looser criterion for
determining when to stop the search, which may result in incomplete coverage of the
search space. For more information, see https://github.com/optuna/optuna/issues/5780.
"""
def __init__(self, seed: int | None = None, avoid_premature_stop: bool = False) -> None:
self._rng = LazyRandomState(seed)
self._avoid_premature_stop = avoid_premature_stop
def infer_relative_search_space(
self, study: Study, trial: FrozenTrial
) -> dict[str, BaseDistribution]:
return {}
def sample_relative(
self, study: Study, trial: FrozenTrial, search_space: dict[str, BaseDistribution]
) -> dict[str, Any]:
return {}
@staticmethod
def _populate_tree(
tree: _TreeNode, trials: Iterable[FrozenTrial], params: dict[str, Any]
) -> None:
# Populate tree under given params from the given trials.
for trial in trials:
if not all(p in trial.params and trial.params[p] == v for p, v in params.items()):
continue
leaf = tree.add_path(
(
(
param_name,
_enumerate_candidates(param_distribution),
param_distribution.to_internal_repr(trial.params[param_name]),
)
for param_name, param_distribution in trial.distributions.items()
if param_name not in params
)
)
if leaf is not None:
# The parameters are on the defined grid.
if trial.state.is_finished():
leaf.set_leaf()
else:
leaf.set_running()
def sample_independent(
self,
study: Study,
trial: FrozenTrial,
param_name: str,
param_distribution: BaseDistribution,
) -> Any:
exclude_running = not self._avoid_premature_stop
# We directly query the storage to get trials here instead of `study.get_trials`,
# since some pruners such as `HyperbandPruner` use the study transformed
# to filter trials. See https://github.com/optuna/optuna/issues/2327 for details.
trials = study._storage.get_all_trials(
study._study_id,
deepcopy=False,
states=(
TrialState.COMPLETE,
TrialState.PRUNED,
TrialState.RUNNING,
TrialState.FAIL,
),
)
tree = _TreeNode()
candidates = _enumerate_candidates(param_distribution)
tree.expand(param_name, candidates)
# Populating must happen after the initialization above to prevent `tree` from
# being initialized as an empty graph, which is created with n_jobs > 1
# where we get trials[i].params = {} for some i.
self._populate_tree(tree, (t for t in trials if t.number != trial.number), trial.params)
if tree.count_unexpanded(exclude_running) == 0:
return param_distribution.to_external_repr(self._rng.rng.choice(candidates))
else:
return param_distribution.to_external_repr(
tree.sample_child(self._rng.rng, exclude_running)
)
def after_trial(
self,
study: Study,
trial: FrozenTrial,
state: TrialState,
values: Sequence[float] | None,
) -> None:
exclude_running = not self._avoid_premature_stop
# We directly query the storage to get trials here instead of `study.get_trials`,
# since some pruners such as `HyperbandPruner` use the study transformed
# to filter trials. See https://github.com/optuna/optuna/issues/2327 for details.
trials = study._storage.get_all_trials(
study._study_id,
deepcopy=False,
states=(
TrialState.COMPLETE,
TrialState.PRUNED,
TrialState.RUNNING,
TrialState.FAIL,
),
)
tree = _TreeNode()
self._populate_tree(
tree,
(
(
t
if t.number != trial.number
else create_trial(
state=state, # Set current trial as complete.
values=values,
params=trial.params,
distributions=trial.distributions,
)
)
for t in trials
),
{},
)
if tree.count_unexpanded(exclude_running) == 0:
study.stop()
def _enumerate_candidates(param_distribution: BaseDistribution) -> Sequence[float]:
if isinstance(param_distribution, FloatDistribution):
if param_distribution.step is None:
raise ValueError(
"FloatDistribution.step must be given for BruteForceSampler"
" (otherwise, the search space will be infinite)."
)
low = decimal.Decimal(str(param_distribution.low))
high = decimal.Decimal(str(param_distribution.high))
step = decimal.Decimal(str(param_distribution.step))
ret = []
value = low
while value <= high:
ret.append(float(value))
value += step
return ret
elif isinstance(param_distribution, IntDistribution):
return list(
range(param_distribution.low, param_distribution.high + 1, param_distribution.step)
)
elif isinstance(param_distribution, CategoricalDistribution):
return list(range(len(param_distribution.choices))) # Internal representations.
else:
raise ValueError(f"Unknown distribution {param_distribution}.")
| BruteForceSampler |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.