language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
huggingface__transformers
src/transformers/models/blip/image_processing_blip.py
{ "start": 1367, "end": 15069 }
class ____(BaseImageProcessor): r""" Constructs a BLIP image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`): Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be overridden by the `resample` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Optional[dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BICUBIC, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, do_convert_rgb: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 384, "width": 384} size = get_size_dict(size, default_to_square=True) self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.do_convert_rgb = do_convert_rgb # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC def resize( self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") output_size = (size["height"], size["width"]) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, resample: Optional[PILImageResampling] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, do_convert_rgb: Optional[bool] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Controls the size of the image after `resize`. The shortest edge of the image is resized to `size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest edge equal to `int(size["shortest_edge"] * (1333 / 800))`. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to normalize the image by if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to normalize the image by if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) images = self.fetch_images(images) images = make_flat_list_of_images(images) if not valid_images(images): raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor") validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) # PIL RGBA images are converted to RGB if do_convert_rgb: images = [convert_to_rgb(image) for image in images] # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] encoded_outputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors) return encoded_outputs __all__ = ["BlipImageProcessor"]
BlipImageProcessor
python
astropy__astropy
astropy/io/votable/exceptions.py
{ "start": 28566, "end": 29048 }
class ____(VOTableSpecWarning): """ If the field specifies a ``null`` value, that value must conform to the given ``datatype``. **References:** `1.1 <http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:values>`__, `1.2 <http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:values>`__ """ message_template = "null value '{}' does not match field datatype, setting to 0" default_args = ("x",)
W36
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/operators/ec2.py
{ "start": 1329, "end": 3495 }
class ____(AwsBaseOperator[EC2Hook]): """ Start AWS EC2 instance using boto3. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:EC2StartInstanceOperator` :param instance_id: id of the AWS EC2 instance :param aws_conn_id: The Airflow connection used for AWS credentials. If this is ``None`` or empty then the default boto3 behaviour is used. If running Airflow in a distributed manner and aws_conn_id is None or empty, then default boto3 configuration would be used (and must be maintained on each worker node). :param region_name: AWS region_name. If not specified then the default boto3 behaviour is used. :param verify: Whether or not to verify SSL certificates. See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html :param check_interval: time in seconds that the job should wait in between each instance state checks until operation is completed """ aws_hook_class = EC2Hook operator_extra_links = (EC2InstanceLink(),) template_fields: Sequence[str] = aws_template_fields("instance_id", "region_name") ui_color = "#eeaa11" ui_fgcolor = "#ffffff" def __init__( self, *, instance_id: str, check_interval: float = 15, **kwargs, ): super().__init__(**kwargs) self.instance_id = instance_id self.check_interval = check_interval def execute(self, context: Context): self.log.info("Starting EC2 instance %s", self.instance_id) instance = self.hook.get_instance(instance_id=self.instance_id) instance.start() EC2InstanceLink.persist( context=context, operator=self, aws_partition=self.hook.conn_partition, instance_id=self.instance_id, region_name=self.hook.conn_region_name, ) self.hook.wait_for_state( instance_id=self.instance_id, target_state="running", check_interval=self.check_interval, )
EC2StartInstanceOperator
python
encode__django-rest-framework
tests/authentication/models.py
{ "start": 64, "end": 241 }
class ____(models.Model): key = models.CharField(max_length=40, primary_key=True) user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
CustomToken
python
matplotlib__matplotlib
lib/matplotlib/image.py
{ "start": 32193, "end": 39793 }
class ____(_ImageBase): """ An image with pixels on a regular grid, attached to an Axes. Parameters ---------- ax : `~matplotlib.axes.Axes` The Axes the image will belong to. cmap : str or `~matplotlib.colors.Colormap`, default: :rc:`image.cmap` The Colormap instance or registered colormap name used to map scalar data to colors. norm : str or `~matplotlib.colors.Normalize` Maps luminance to 0-1. interpolation : str, default: :rc:`image.interpolation` Supported values are 'none', 'auto', 'nearest', 'bilinear', 'bicubic', 'spline16', 'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos', 'blackman'. interpolation_stage : {'data', 'rgba'}, default: 'data' If 'data', interpolation is carried out on the data provided by the user. If 'rgba', the interpolation is carried out after the colormapping has been applied (visual interpolation). origin : {'upper', 'lower'}, default: :rc:`image.origin` Place the [0, 0] index of the array in the upper left or lower left corner of the Axes. The convention 'upper' is typically used for matrices and images. extent : tuple, optional The data axes (left, right, bottom, top) for making image plots registered with data plots. Default is to label the pixel centers with the zero-based row and column indices. filternorm : bool, default: True A parameter for the antigrain image resize filter (see the antigrain documentation). If filternorm is set, the filter normalizes integer values and corrects the rounding errors. It doesn't do anything with the source floating point values, it corrects only integers according to the rule of 1.0 which means that any sum of pixel weights must be equal to 1.0. So, the filter function must produce a graph of the proper shape. filterrad : float > 0, default: 4 The filter radius for filters that have a radius parameter, i.e. when interpolation is one of: 'sinc', 'lanczos' or 'blackman'. resample : bool, default: False When True, use a full resampling method. When False, only resample when the output image is larger than the input image. **kwargs : `~matplotlib.artist.Artist` properties """ def __init__(self, ax, *, cmap=None, norm=None, colorizer=None, interpolation=None, origin=None, extent=None, filternorm=True, filterrad=4.0, resample=False, interpolation_stage=None, **kwargs ): self._extent = extent super().__init__( ax, cmap=cmap, norm=norm, colorizer=colorizer, interpolation=interpolation, origin=origin, filternorm=filternorm, filterrad=filterrad, resample=resample, interpolation_stage=interpolation_stage, **kwargs ) def get_window_extent(self, renderer=None): x0, x1, y0, y1 = self._extent bbox = Bbox.from_extents([x0, y0, x1, y1]) return bbox.transformed(self.get_transform()) def make_image(self, renderer, magnification=1.0, unsampled=False): # docstring inherited trans = self.get_transform() # image is created in the canvas coordinate. x1, x2, y1, y2 = self.get_extent() bbox = Bbox(np.array([[x1, y1], [x2, y2]])) transformed_bbox = TransformedBbox(bbox, trans) clip = ((self.get_clip_box() or self.axes.bbox) if self.get_clip_on() else self.get_figure(root=True).bbox) return self._make_image(self._A, bbox, transformed_bbox, clip, magnification, unsampled=unsampled) def _check_unsampled_image(self): """Return whether the image would be better drawn unsampled.""" return self.get_interpolation() == "none" def set_extent(self, extent, **kwargs): """ Set the image extent. Parameters ---------- extent : 4-tuple of float The position and size of the image as tuple ``(left, right, bottom, top)`` in data coordinates. **kwargs Other parameters from which unit info (i.e., the *xunits*, *yunits*, *zunits* (for 3D Axes), *runits* and *thetaunits* (for polar Axes) entries are applied, if present. Notes ----- This updates `.Axes.dataLim`, and, if autoscaling, sets `.Axes.viewLim` to tightly fit the image, regardless of `~.Axes.dataLim`. Autoscaling state is not changed, so a subsequent call to `.Axes.autoscale_view` will redo the autoscaling in accord with `~.Axes.dataLim`. """ (xmin, xmax), (ymin, ymax) = self.axes._process_unit_info( [("x", [extent[0], extent[1]]), ("y", [extent[2], extent[3]])], kwargs) if kwargs: raise _api.kwarg_error("set_extent", kwargs) xmin = self.axes._validate_converted_limits( xmin, self.convert_xunits) xmax = self.axes._validate_converted_limits( xmax, self.convert_xunits) ymin = self.axes._validate_converted_limits( ymin, self.convert_yunits) ymax = self.axes._validate_converted_limits( ymax, self.convert_yunits) extent = [xmin, xmax, ymin, ymax] self._extent = extent corners = (xmin, ymin), (xmax, ymax) self.axes.update_datalim(corners) self.sticky_edges.x[:] = [xmin, xmax] self.sticky_edges.y[:] = [ymin, ymax] if self.axes.get_autoscalex_on(): self.axes.set_xlim(xmin, xmax, auto=None) if self.axes.get_autoscaley_on(): self.axes.set_ylim(ymin, ymax, auto=None) self.stale = True def get_extent(self): """Return the image extent as tuple (left, right, bottom, top).""" if self._extent is not None: return self._extent else: sz = self.get_size() numrows, numcols = sz if self.origin == 'upper': return (-0.5, numcols-0.5, numrows-0.5, -0.5) else: return (-0.5, numcols-0.5, -0.5, numrows-0.5) def get_cursor_data(self, event): """ Return the image value at the event position or *None* if the event is outside the image. See Also -------- matplotlib.artist.Artist.get_cursor_data """ xmin, xmax, ymin, ymax = self.get_extent() if self.origin == 'upper': ymin, ymax = ymax, ymin arr = self.get_array() data_extent = Bbox([[xmin, ymin], [xmax, ymax]]) array_extent = Bbox([[0, 0], [arr.shape[1], arr.shape[0]]]) trans = self.get_transform().inverted() trans += BboxTransform(boxin=data_extent, boxout=array_extent) point = trans.transform([event.x, event.y]) if any(np.isnan(point)): return None j, i = point.astype(int) # Clip the coordinates at array bounds if not (0 <= i < arr.shape[0]) or not (0 <= j < arr.shape[1]): return None else: return arr[i, j]
AxesImage
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/waiters/test_glue.py
{ "start": 1701, "end": 3111 }
class ____(TestGlueDataQualityCustomWaitersBase): WAITER_NAME = "data_quality_ruleset_evaluation_run_complete" @pytest.fixture def mock_get_job(self): with mock.patch.object(self.client, "get_data_quality_ruleset_evaluation_run") as mock_getter: yield mock_getter @pytest.mark.parametrize("state", GlueDataQualityRuleSetEvaluationRunSensor.SUCCESS_STATES) def test_data_quality_ruleset_evaluation_run_complete(self, state, mock_get_job): mock_get_job.return_value = {"Status": state} GlueDataQualityHook().get_waiter(self.WAITER_NAME).wait(RunId="run_id") @pytest.mark.parametrize("state", GlueDataQualityRuleSetEvaluationRunSensor.FAILURE_STATES) def test_data_quality_ruleset_evaluation_run_failed(self, state, mock_get_job): mock_get_job.return_value = {"Status": state} with pytest.raises(botocore.exceptions.WaiterError): GlueDataQualityHook().get_waiter(self.WAITER_NAME).wait(RunId="run_id") def test_data_quality_ruleset_evaluation_run_wait(self, mock_get_job): wait = {"Status": "RUNNING"} success = {"Status": "SUCCEEDED"} mock_get_job.side_effect = [wait, wait, success] GlueDataQualityHook().get_waiter(self.WAITER_NAME).wait( RunIc="run_id", WaiterConfig={"Delay": 0.01, "MaxAttempts": 3} )
TestGlueDataQualityRuleSetEvaluationRunCompleteWaiter
python
django__django
django/utils/archive.py
{ "start": 1756, "end": 2987 }
class ____: """ The external API class that encapsulates an archive implementation. """ def __init__(self, file): self._archive = self._archive_cls(file)(file) @staticmethod def _archive_cls(file): cls = None if isinstance(file, str): filename = file else: try: filename = file.name except AttributeError: raise UnrecognizedArchiveFormat( "File object not a recognized archive format." ) base, tail_ext = os.path.splitext(filename.lower()) cls = extension_map.get(tail_ext) if not cls: base, ext = os.path.splitext(base) cls = extension_map.get(ext) if not cls: raise UnrecognizedArchiveFormat( "Path not a recognized archive format: %s" % filename ) return cls def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def extract(self, to_path): self._archive.extract(to_path) def list(self): self._archive.list() def close(self): self._archive.close()
Archive
python
ray-project__ray
python/ray/_private/thirdparty/pynvml/pynvml.py
{ "start": 70095, "end": 70377 }
class ____(_PrintableStructure): _fields_ = [ ('isGridLicenseSupported', c_int), ('licensableFeaturesCount', c_uint), ('gridLicensableFeatures', c_nvmlGridLicensableFeature_v3_t * NVML_GRID_LICENSE_FEATURE_MAX_COUNT), ]
c_nvmlGridLicensableFeatures_v3_t
python
sanic-org__sanic
guide/webapp/display/markdown.py
{ "start": 741, "end": 4494 }
class ____(HTMLRenderer): def block_code(self, code: str, info: str | None = None): builder = Builder("Block") with builder.div(class_="code-block"): if info: lexer = get_lexer_by_name(info, stripall=False) formatter = html.HtmlFormatter( style=SanicCodeStyle, wrapcode=True, cssclass=f"highlight language-{info}", ) builder(HTML(highlight(code, lexer, formatter))) with builder.div( class_="code-block__copy", onclick="copyCode(this)", ): builder.div( class_="code-block__rectangle code-block__filled" ).div(class_="code-block__rectangle code-block__outlined") else: builder.pre(E.code(escape(code))) return str(builder) def heading(self, text: str, level: int, **attrs) -> str: ident = slugify(text) if level > 1: text += self._make_tag( "a", {"href": f"#{ident}", "class": "anchor"}, "#" ) return self._make_tag( f"h{level}", { "id": ident, "class": ( f"is-size-{level}-desktop is-size-{level + 2}-touch" ), }, text, ) def link(self, text: str, url: str, title: str | None = None) -> str: url = self.safe_url(url).replace(".md", ".html") url, anchor = url.split("#", 1) if "#" in url else (url, None) if ( not url.endswith("/") and not url.endswith(".html") and not url.startswith("http") ): url += ".html" if anchor: url += f"#{anchor}" attributes: dict[str, str] = {"href": url} if title: attributes["title"] = safe_entity(title) if url.startswith("http"): attributes["target"] = "_blank" attributes["rel"] = "nofollow noreferrer" else: attributes["hx-get"] = url attributes["hx-target"] = "#content" attributes["hx-swap"] = "innerHTML" attributes["hx-push-url"] = "true" return self._make_tag("a", attributes, text) def span(self, text, classes, **attrs) -> str: if classes: attrs["class"] = classes return self._make_tag("span", attrs, text) def list(self, text: str, ordered: bool, **attrs) -> str: tag = "ol" if ordered else "ul" attrs["class"] = tag return self._make_tag(tag, attrs, text) def list_item(self, text: str, **attrs) -> str: attrs["class"] = "li" return self._make_tag("li", attrs, text) def table(self, text: str, **attrs) -> str: attrs["class"] = "table is-fullwidth is-bordered" return self._make_tag("table", attrs, text) def inline_directive(self, text: str, **attrs) -> str: num_dots = text.count(".") display = self.codespan(text) if num_dots <= 1: return display module, *_ = text.rsplit(".", num_dots - 1) href = f"/api/{module}.html" return self._make_tag( "a", {"href": href, "class": "inline-directive"}, display, ) def _make_tag( self, tag: str, attributes: dict[str, str], text: str | None = None ) -> str: attrs = " ".join( f'{key}="{value}"' for key, value in attributes.items() ) if text is None: return f"<{tag} {attrs} />" return f"<{tag} {attrs}>{text}</{tag}>"
DocsRenderer
python
patrick-kidger__equinox
equinox/_module/_module.py
{ "start": 1721, "end": 2629 }
class ____(eqx.Module): @property def foo(self): return self.bar def bar(self): ... ``` so that you can still use `self.foo`, but it is not stored in the PyTree structure. This is a check that was introduced in Equinox v0.11.0. Before this, the above error went uncaught, possibly leading to silently wrong behaviour. """ def _error_method_assignment(self, value: object, /) -> None: if isinstance(value, BoundMethod) and value.__self__ is self: raise ValueError(MSG_METHOD_IN_INIT) _transform_types: set[type] = { type(transform(lambda x: x)) for transform in ( jax.jit, jax.grad, jax.vmap, jax.value_and_grad, jax.jacfwd, jax.jacrev, jax.hessian, jax.custom_jvp, jax.custom_vjp, jax.checkpoint, # pyright: ignore[reportPrivateImportUsage] jax.pmap, ) }
MyModule
python
numba__numba
numba/cuda/cg.py
{ "start": 275, "end": 1490 }
class ____: """A cooperative group representing the entire grid""" def sync() -> None: """Synchronize this grid group""" def this_grid() -> GridGroup: """Get the current grid group.""" return GridGroup() @intrinsic def _this_grid(typingctx): sig = signature(grid_group) def codegen(context, builder, sig, args): one = context.get_constant(types.int32, 1) mod = builder.module return builder.call( nvvmutils.declare_cudaCGGetIntrinsicHandle(mod), (one,)) return sig, codegen @overload(this_grid, target='cuda') def _ol_this_grid(): def impl(): return _this_grid() return impl @intrinsic def _grid_group_sync(typingctx, group): sig = signature(types.int32, group) def codegen(context, builder, sig, args): flags = context.get_constant(types.int32, 0) mod = builder.module return builder.call( nvvmutils.declare_cudaCGSynchronize(mod), (*args, flags)) return sig, codegen @overload_method(GridGroupClass, 'sync', target='cuda') def _ol_grid_group_sync(group): def impl(group): return _grid_group_sync(group) return impl
GridGroup
python
getsentry__sentry
src/sentry/replays/lib/http.py
{ "start": 481, "end": 1217 }
class ____: """Bounded range header. A bounded range header is a pair of integers representing the inclusive range of a unit in the resource. """ def __init__(self, start: int, end: int) -> None: self.start = start self.end = end def make_range(self, last_index: int) -> tuple[int, int]: if self.start > last_index or self.end < self.start or self.start < 0: raise UnsatisfiableRange() return (self.start, min(last_index, self.end)) def read_range(self, bytes: io.BytesIO) -> bytes: start_index, end_index = self.make_range(bytes.getbuffer().nbytes - 1) bytes.seek(start_index) return bytes.read(end_index - start_index + 1)
BoundedRange
python
pandas-dev__pandas
pandas/tests/frame/indexing/test_get.py
{ "start": 75, "end": 690 }
class ____: def test_get(self, float_frame): b = float_frame.get("B") tm.assert_series_equal(b, float_frame["B"]) assert float_frame.get("foo") is None tm.assert_series_equal( float_frame.get("foo", float_frame["B"]), float_frame["B"] ) @pytest.mark.parametrize( "columns, index", [ [None, None], [list("AB"), None], [list("AB"), range(3)], ], ) def test_get_none(self, columns, index): # see gh-5652 assert DataFrame(columns=columns, index=index).get(None) is None
TestGet
python
numba__numba
numba/core/typing/builtins.py
{ "start": 31128, "end": 31724 }
class ____(AbstractTemplate): def generic(self, args, kws): assert not kws it = args[0] if len(args) > 1 and not isinstance(args[1], types.Integer): raise errors.NumbaTypeError("Only integers supported as start " "value in enumerate") elif len(args) > 2: #let python raise its own error enumerate(*args) if isinstance(it, types.IterableType): enumerate_type = types.EnumerateType(it) return signature(enumerate_type, *args) @infer_global(zip)
Enumerate
python
django__django
tests/m2m_regress/models.py
{ "start": 1968, "end": 2171 }
class ____(models.Model): name = models.CharField(max_length=1) class Meta: abstract = True def split(self): raise RuntimeError("split should not be called")
BadModelWithSplit
python
pyca__cryptography
src/cryptography/hazmat/primitives/serialization/ssh.py
{ "start": 2404, "end": 6758 }
class ____: alg: type[algorithms.AES] key_len: int mode: type[modes.CTR] | type[modes.CBC] | type[modes.GCM] block_len: int iv_len: int tag_len: int | None is_aead: bool # ciphers that are actually used in key wrapping _SSH_CIPHERS: dict[bytes, _SSHCipher] = { b"aes256-ctr": _SSHCipher( alg=algorithms.AES, key_len=32, mode=modes.CTR, block_len=16, iv_len=16, tag_len=None, is_aead=False, ), b"aes256-cbc": _SSHCipher( alg=algorithms.AES, key_len=32, mode=modes.CBC, block_len=16, iv_len=16, tag_len=None, is_aead=False, ), b"aes256-gcm@openssh.com": _SSHCipher( alg=algorithms.AES, key_len=32, mode=modes.GCM, block_len=16, iv_len=12, tag_len=16, is_aead=True, ), } # map local curve name to key type _ECDSA_KEY_TYPE = { "secp256r1": _ECDSA_NISTP256, "secp384r1": _ECDSA_NISTP384, "secp521r1": _ECDSA_NISTP521, } def _get_ssh_key_type(key: SSHPrivateKeyTypes | SSHPublicKeyTypes) -> bytes: if isinstance(key, ec.EllipticCurvePrivateKey): key_type = _ecdsa_key_type(key.public_key()) elif isinstance(key, ec.EllipticCurvePublicKey): key_type = _ecdsa_key_type(key) elif isinstance(key, (rsa.RSAPrivateKey, rsa.RSAPublicKey)): key_type = _SSH_RSA elif isinstance(key, (dsa.DSAPrivateKey, dsa.DSAPublicKey)): key_type = _SSH_DSA elif isinstance( key, (ed25519.Ed25519PrivateKey, ed25519.Ed25519PublicKey) ): key_type = _SSH_ED25519 else: raise ValueError("Unsupported key type") return key_type def _ecdsa_key_type(public_key: ec.EllipticCurvePublicKey) -> bytes: """Return SSH key_type and curve_name for private key.""" curve = public_key.curve if curve.name not in _ECDSA_KEY_TYPE: raise ValueError( f"Unsupported curve for ssh private key: {curve.name!r}" ) return _ECDSA_KEY_TYPE[curve.name] def _ssh_pem_encode( data: utils.Buffer, prefix: bytes = _SK_START + b"\n", suffix: bytes = _SK_END + b"\n", ) -> bytes: return b"".join([prefix, _base64_encode(data), suffix]) def _check_block_size(data: utils.Buffer, block_len: int) -> None: """Require data to be full blocks""" if not data or len(data) % block_len != 0: raise ValueError("Corrupt data: missing padding") def _check_empty(data: utils.Buffer) -> None: """All data should have been parsed.""" if data: raise ValueError("Corrupt data: unparsed data") def _init_cipher( ciphername: bytes, password: bytes | None, salt: bytes, rounds: int, ) -> Cipher[modes.CBC | modes.CTR | modes.GCM]: """Generate key + iv and return cipher.""" if not password: raise TypeError( "Key is password-protected, but password was not provided." ) ciph = _SSH_CIPHERS[ciphername] seed = _bcrypt_kdf( password, salt, ciph.key_len + ciph.iv_len, rounds, True ) return Cipher( ciph.alg(seed[: ciph.key_len]), ciph.mode(seed[ciph.key_len :]), ) def _get_u32(data: memoryview) -> tuple[int, memoryview]: """Uint32""" if len(data) < 4: raise ValueError("Invalid data") return int.from_bytes(data[:4], byteorder="big"), data[4:] def _get_u64(data: memoryview) -> tuple[int, memoryview]: """Uint64""" if len(data) < 8: raise ValueError("Invalid data") return int.from_bytes(data[:8], byteorder="big"), data[8:] def _get_sshstr(data: memoryview) -> tuple[memoryview, memoryview]: """Bytes with u32 length prefix""" n, data = _get_u32(data) if n > len(data): raise ValueError("Invalid data") return data[:n], data[n:] def _get_mpint(data: memoryview) -> tuple[int, memoryview]: """Big integer.""" val, data = _get_sshstr(data) if val and val[0] > 0x7F: raise ValueError("Invalid data") return int.from_bytes(val, "big"), data def _to_mpint(val: int) -> bytes: """Storage format for signed bigint.""" if val < 0: raise ValueError("negative mpint not allowed") if not val: return b"" nbytes = (val.bit_length() + 8) // 8 return utils.int_to_bytes(val, nbytes)
_SSHCipher
python
pytorch__pytorch
tools/experimental/torchfuzz/operators/nn_functional.py
{ "start": 31783, "end": 33275 }
class ____(Operator): """Operator for torch.nn.functional.leaky_relu.""" def __init__(self): super().__init__("torch.nn.functional.leaky_relu") @property def torch_op_name(self) -> str | None: """Return the torch operation name.""" return "torch.nn.functional.leaky_relu" def can_produce(self, output_spec: Spec) -> bool: """LeakyReLU can produce tensor outputs with floating point dtypes.""" if not isinstance(output_spec, TensorSpec): return False return is_float_dtype(output_spec.dtype) def fuzz_inputs_specs(self, output_spec: Spec) -> list[Spec]: """Generate input specs for LeakyReLU operation. LeakyReLU is element-wise, so input shape matches output shape. """ if not isinstance(output_spec, TensorSpec): raise ValueError("LeakyReLUOperator can only produce TensorSpec outputs") input_spec = TensorSpec( size=output_spec.size, stride=output_spec.stride, dtype=output_spec.dtype ) return [input_spec] def codegen( self, output_name: str, input_names: list[str], output_spec: Spec ) -> str: """Generate code for LeakyReLU operation.""" if len(input_names) != 1: raise ValueError("LeakyReLU requires exactly 1 input") input_name = input_names[0] return f"{output_name} = torch.nn.functional.leaky_relu({input_name}, negative_slope=0.01)"
LeakyReLUOperator
python
GoogleCloudPlatform__python-docs-samples
appengine/standard/i18n/i18n_utils.py
{ "start": 5517, "end": 7496 }
class ____(object): """A WSGI middleware for i18n. This middleware determines users' preferred language, loads the translations files, and install it to the builtin namespace of the Python runtime. """ def __init__(self, app, default_language="en", locale_path=None): """A constructor for this middleware. Args: app: A WSGI app that you want to wrap with this middleware. default_language: fallback language; ex: 'en', 'ja', etc. locale_path: A directory containing the translations file. (defaults to 'locales' directory) """ self.app = app if locale_path is None: locale_path = os.path.join( os.path.abspath(os.path.dirname(__file__)), "locales" ) self.locale_path = locale_path self.default_language = default_language def __call__(self, environ, start_response): """Called by WSGI when a request comes in. Args: environ: A dict holding environment variables. start_response: A WSGI callable (PEP333). Returns: Application response data as an iterable. It just returns the return value of the inner WSGI app. """ req = Request(environ) preferred_languages = list(req.accept_language) if self.default_language not in preferred_languages: preferred_languages.append(self.default_language) translation = gettext.translation( "messages", self.locale_path, fallback=True, languages=preferred_languages, codeset="utf-8", ) translation.install(unicode=True, names=["gettext", "ngettext"]) environ["i18n_utils.active_translation"] = translation environ["i18n_utils.preferred_languages"] = preferred_languages return self.app(environ, start_response)
I18nMiddleware
python
yaml__pyyaml
lib/yaml/events.py
{ "start": 667, "end": 1007 }
class ____(NodeEvent): def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, flow_style=None): self.anchor = anchor self.tag = tag self.implicit = implicit self.start_mark = start_mark self.end_mark = end_mark self.flow_style = flow_style
CollectionStartEvent
python
allegroai__clearml
clearml/backend_api/services/v2_20/projects.py
{ "start": 119426, "end": 122927 }
class ____(Request): """ Get unique parent tasks for the tasks in the specified projects :param projects: The list of projects which task parents are retrieved. If not passed or empty then all the projects are searched :type projects: Sequence[str] :param tasks_state: Return parents for tasks in the specified state. If Null is provided, parents for all task states will be returned. :type tasks_state: str :param include_subprojects: If set to 'true' and the projects field is not empty then the result includes tasks parents from the subproject tasks :type include_subprojects: bool """ _service = "projects" _action = "get_task_parents" _version = "2.20" _schema = { "definitions": {}, "properties": { "include_subprojects": { "default": True, "description": "If set to 'true' and the projects field is not empty then the result includes tasks parents from the subproject tasks", "type": ["boolean", "null"], }, "projects": { "description": "The list of projects which task parents are retieved. If not passed or empty then all the projects are searched", "items": {"type": "string"}, "type": ["array", "null"], }, "tasks_state": { "default": "active", "description": "Return parents for tasks in the specified state. If Null is provided, parents for all task states will be returned.", "enum": ["active", "archived"], "type": ["string", "null"], }, }, "type": "object", } def __init__( self, projects: Optional[List[str]] = None, tasks_state: Optional[str] = "active", include_subprojects: Optional[bool] = True, **kwargs: Any ) -> None: super(GetTaskParentsRequest, self).__init__(**kwargs) self.projects = projects self.tasks_state = tasks_state self.include_subprojects = include_subprojects @schema_property("projects") def projects(self) -> Optional[List[str]]: return self._property_projects @projects.setter def projects(self, value: Optional[List[str]]) -> None: if value is None: self._property_projects = None return self.assert_isinstance(value, "projects", (list, tuple)) self.assert_isinstance(value, "projects", six.string_types, is_array=True) self._property_projects = value @schema_property("tasks_state") def tasks_state(self) -> Optional[str]: return self._property_tasks_state @tasks_state.setter def tasks_state(self, value: Optional[str]) -> None: if value is None: self._property_tasks_state = None return self.assert_isinstance(value, "tasks_state", six.string_types) self._property_tasks_state = value @schema_property("include_subprojects") def include_subprojects(self) -> Optional[bool]: return self._property_include_subprojects @include_subprojects.setter def include_subprojects(self, value: Optional[bool]) -> None: if value is None: self._property_include_subprojects = None return self.assert_isinstance(value, "include_subprojects", (bool,)) self._property_include_subprojects = value
GetTaskParentsRequest
python
FactoryBoy__factory_boy
tests/test_alchemy.py
{ "start": 3444, "end": 4539 }
class ____(TransactionTestCase): def test_simple_call(self): obj1 = WithGetOrCreateFieldFactory(foo='foo1') obj2 = WithGetOrCreateFieldFactory(foo='foo1') self.assertEqual(obj1, obj2) def test_missing_arg(self): with self.assertRaises(factory.FactoryError): MultifieldModelFactory() def test_raises_exception_when_existing_objs(self): StandardFactory.create_batch(2, foo='foo') with self.assertRaises(sqlalchemy.orm.exc.MultipleResultsFound): WithGetOrCreateFieldFactory(foo='foo') def test_multicall(self): objs = MultifieldModelFactory.create_batch( 6, slug=factory.Iterator(['main', 'alt']), ) self.assertEqual(6, len(objs)) self.assertEqual(2, len(set(objs))) self.assertEqual( list( obj.slug for obj in models.session.query( models.MultiFieldModel.slug ).order_by(models.MultiFieldModel.slug) ), ["alt", "main"], )
SQLAlchemyGetOrCreateTests
python
apache__airflow
task-sdk/src/airflow/sdk/api/datamodels/_generated.py
{ "start": 4460, "end": 4651 }
class ____(BaseModel): """ Response for inactive assets. """ inactive_assets: Annotated[list[AssetProfile] | None, Field(title="Inactive Assets")] = None
InactiveAssetsResponse
python
kamyu104__LeetCode-Solutions
Python/find-pattern-in-infinite-stream-i.py
{ "start": 96, "end": 982 }
class ____(object): def findPattern(self, stream, pattern): """ :type stream: InfiniteStream :type pattern: List[int] :rtype: int """ def getPrefix(pattern): prefix = [-1]*len(pattern) j = -1 for i in xrange(1, len(pattern)): while j+1 > 0 and pattern[j+1] != pattern[i]: j = prefix[j] if pattern[j+1] == pattern[i]: j += 1 prefix[i] = j return prefix prefix = getPrefix(pattern) i = j = -1 while True: d = stream.next() i += 1 while j+1 > 0 and pattern[j+1] != d: j = prefix[j] if pattern[j+1] == d: j += 1 if j+1 == len(pattern): return i-j return -1
Solution
python
spack__spack
var/spack/test_repos/spack_repo/builtin_mock/packages/conditionally_extends_transitive_dep/package.py
{ "start": 217, "end": 587 }
class ____(Package): """Package that tests if the extends directive supports a spec.""" homepage = "http://www.example.com" url = "http://www.example.com/example-1.0.tar.gz" version("1.0", md5="0123456789abcdef0123456789abcdef") extends("extendee", when="@2:") # will not satisfy version depends_on("extension1")
ConditionallyExtendsTransitiveDep
python
huggingface__transformers
tests/models/mamba/test_modeling_mamba.py
{ "start": 1293, "end": 8491 }
class ____: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, intermediate_size=32, hidden_act="silu", hidden_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, num_labels=3, num_choices=4, scope=None, tie_word_embeddings=True, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 self.tie_word_embeddings = tie_word_embeddings def prepare_config_and_inputs( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = ids_tensor([self.batch_size, self.seq_length], 1) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config( gradient_checkpointing=gradient_checkpointing, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, ) return ( config, input_ids, attention_mask, sequence_labels, token_labels, choice_labels, ) def get_config( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): return MambaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, intermediate_size=self.intermediate_size, activation_function=self.hidden_act, n_positions=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, gradient_checkpointing=gradient_checkpointing, tie_word_embeddings=self.tie_word_embeddings, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def create_and_check_mamba_model(self, config, input_ids, *args): config.output_hidden_states = True model = MambaModel(config=config) model.to(torch_device) model.eval() result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.hidden_states), config.num_hidden_layers + 1) def create_and_check_causal_lm(self, config, input_ids, *args): model = MambaForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_state_equivalency(self, config, input_ids, *args): model = MambaModel(config=config) model.to(torch_device) model.eval() outputs = model(input_ids) output_whole = outputs.last_hidden_state outputs = model( input_ids[:, :-1], use_cache=True, cache_position=torch.arange(0, config.conv_kernel, device=input_ids.device), ) output_one = outputs.last_hidden_state # Using the state computed on the first inputs, we will get the same output outputs = model( input_ids[:, -1:], use_cache=True, cache_params=outputs.cache_params, cache_position=torch.arange(config.conv_kernel, config.conv_kernel + 1, device=input_ids.device), ) output_two = outputs.last_hidden_state self.parent.assertTrue(torch.allclose(torch.cat([output_one, output_two], dim=1), output_whole, atol=1e-5)) # TODO the original mamba does not support decoding more than 1 token neither do we def create_and_check_mamba_cached_slow_forward_and_backwards( self, config, input_ids, *args, gradient_checkpointing=False ): model = MambaModel(config) model.to(torch_device) if gradient_checkpointing: model.gradient_checkpointing_enable() # create cache cache = model(input_ids, use_cache=True).cache_params cache.reset() # use cache token_emb = model.embeddings(input_ids) outputs = model.layers[0].mixer.slow_forward( token_emb, cache, cache_position=torch.arange(0, config.conv_kernel, device=input_ids.device) ) loss = torch.log1p(torch.abs(outputs.sum())) self.parent.assertEqual(loss.shape, ()) self.parent.assertEqual(outputs.shape, (self.batch_size, self.seq_length, self.hidden_size)) loss.backward() def create_and_check_mamba_lm_head_forward_and_backwards( self, config, input_ids, *args, gradient_checkpointing=False ): model = MambaForCausalLM(config) model.to(torch_device) if gradient_checkpointing: model.gradient_checkpointing_enable() result = model(input_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def prepare_config_and_inputs_for_common(self): ( config, input_ids, attention_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_torch
MambaModelTester
python
nedbat__coveragepy
tests/helpers.py
{ "start": 12047, "end": 13098 }
class ____(DebugControl): """A `DebugControl` that writes to a StringIO, for testing.""" def __init__(self, options: Iterable[str]) -> None: self.io = io.StringIO() super().__init__(options, self.io) def get_output(self) -> str: """Get the output text from the `DebugControl`.""" return self.io.getvalue() def all_our_source_files() -> Iterable[tuple[Path, str]]: """Iterate over all of our own source files. This is used in tests that need a bunch of Python code to analyze, so we might as well use our own source code as the subject. Produces a stream of (filename, file contents) tuples. """ cov_dir = Path(__file__).parent.parent # To run against all the files in the tox venvs: # for source_file in cov_dir.rglob("*.py"): for sub in [".", "ci", "coverage", "lab", "tests"]: assert (cov_dir / sub).is_dir() for source_file in (cov_dir / sub).glob("*.py"): yield (source_file, source_file.read_text(encoding="utf-8"))
DebugControlString
python
pennersr__django-allauth
allauth/socialaccount/providers/oauth/views.py
{ "start": 602, "end": 1576 }
class ____: client_class = OAuthClient def __init__(self, request): self.request = request def complete_login(self, request, app): """ Returns a SocialLogin instance """ raise NotImplementedError def get_provider(self): adapter = get_adapter(self.request) app = adapter.get_app(self.request, provider=self.provider_id) return app.get_provider(self.request) def _get_client(self, request, callback_url, scope=None): provider = self.get_provider() app = provider.app parameters = {} if scope: parameters["scope"] = " ".join(scope) client = self.client_class( request, app.client_id, app.secret, self.request_token_url, self.access_token_url, callback_url, parameters=parameters, provider=provider, ) return client
OAuthAdapter
python
mlflow__mlflow
tests/pyfunc/test_model_export_with_class_and_artifacts.py
{ "start": 96333, "end": 100984 }
class ____(mlflow.pyfunc.PythonModel): def predict(self, model_input: list[str]) -> list[str]: return model_input def test_lock_model_requirements(monkeypatch: pytest.MonkeyPatch, tmp_path: Path): monkeypatch.setenv("MLFLOW_LOCK_MODEL_DEPENDENCIES", "true") model_info = mlflow.pyfunc.log_model(name="model", python_model=ExampleModel()) pyfunc_model_path = _download_artifact_from_uri(model_info.model_uri, output_path=tmp_path) requirements_txt = next(Path(pyfunc_model_path).rglob("requirements.txt")) requirements_txt_contents = requirements_txt.read_text() assert "# Locked requirements" in requirements_txt_contents assert "mlflow==" in requirements_txt_contents assert "packaging==" in requirements_txt_contents # Check that pip can install the locked requirements subprocess.check_call( [ sys.executable, "-m", "pip", "install", "--ignore-installed", "--dry-run", "--requirement", requirements_txt, ], ) # Check that conda environment can be created with the locked requirements conda_yaml = next(Path(pyfunc_model_path).rglob("conda.yaml")) conda_yaml_contents = conda_yaml.read_text() assert "# Locked requirements" in conda_yaml_contents assert "mlflow==" in requirements_txt_contents assert "packaging==" in conda_yaml_contents subprocess.check_call( [ "conda", "env", "create", "--file", conda_yaml, "--dry-run", "--yes", ], ) def test_lock_model_requirements_pip_requirements(monkeypatch: pytest.MonkeyPatch, tmp_path: Path): monkeypatch.setenv("MLFLOW_LOCK_MODEL_DEPENDENCIES", "true") model_info = mlflow.pyfunc.log_model( name="model", python_model=ExampleModel(), pip_requirements=["openai"], ) pyfunc_model_path = _download_artifact_from_uri(model_info.model_uri, output_path=tmp_path) requirements_txt = next(Path(pyfunc_model_path).rglob("requirements.txt")) contents = requirements_txt.read_text() assert "# Locked requirements" in contents assert "mlflow==" in contents assert "openai==" in contents assert "httpx==" in contents def test_lock_model_requirements_extra_pip_requirements( monkeypatch: pytest.MonkeyPatch, tmp_path: Path ): monkeypatch.setenv("MLFLOW_LOCK_MODEL_DEPENDENCIES", "true") model_info = mlflow.pyfunc.log_model( name="model", python_model=ExampleModel(), extra_pip_requirements=["openai"], ) pyfunc_model_path = _download_artifact_from_uri(model_info.model_uri, output_path=tmp_path) requirements_txt = next(Path(pyfunc_model_path).rglob("requirements.txt")) contents = requirements_txt.read_text() assert "# Locked requirements" in contents assert "mlflow==" in contents assert "openai==" in contents assert "httpx==" in contents def test_lock_model_requirements_constraints(monkeypatch: pytest.MonkeyPatch, tmp_path: Path): constraints_file = tmp_path / "constraints.txt" constraints_file.write_text("openai==1.82.0") monkeypatch.setenv("MLFLOW_LOCK_MODEL_DEPENDENCIES", "true") model_info = mlflow.pyfunc.log_model( name="model", python_model=ExampleModel(), pip_requirements=["openai", f"-c {constraints_file}"], ) pyfunc_model_path = _download_artifact_from_uri(model_info.model_uri, output_path=tmp_path) requirements_txt = next(Path(pyfunc_model_path).rglob("requirements.txt")) contents = requirements_txt.read_text() assert "# Locked requirements" in contents assert "mlflow==" in contents assert "openai==1.82.0" in contents assert "httpx==" in contents @pytest.mark.parametrize( ("input_example", "expected_result"), [(["Hello", "World"], True), (None, False)] ) def test_load_context_with_input_example(input_example, expected_result): class MyModel(mlflow.pyfunc.PythonModel): def load_context(self, context): raise Exception("load_context was called") def predict(self, model_input: list[str], params=None): return model_input msg = "Failed to run the predict function on input example" with mock.patch("mlflow.models.signature._logger.warning") as mock_warning: mlflow.pyfunc.log_model( name="model", python_model=MyModel(), input_example=input_example, ) assert any(msg in call.args[0] for call in mock_warning.call_args_list) == expected_result
ExampleModel
python
django-crispy-forms__django-crispy-forms
crispy_forms/bootstrap.py
{ "start": 24531, "end": 26404 }
class ____(ContainerHolder): """ TabHolder object. It wraps Tab objects in a container. Attributes ---------- template : str The default template which this Layout Object will be rendered with. css_class : str, optional CSS classes to be applied to the ``<div>``. By default None. Parameters ---------- *fields : str, LayoutObject Any number of fields or layout objects as positional arguments to be rendered within the ``<div>``. css_id : str, optional A DOM id for the layout object which will be added to the ``<div>`` if provided. By default None. css_class : str, optional Additional CSS classes to be applied in addition to those declared by the class itself. By default None. template : str, optional Overrides the default template, if provided. By default None. **kwargs : dict, optional Additional attributes are passed to ``flatatt`` and converted into key="value", pairs. These attributes are added to the ``<div>``. Examples -------- Example:: TabHolder( Tab('form_field_1', 'form_field_2'), Tab('form_field_3') ) """ template = "%s/layout/tab.html" def render(self, form, context, template_pack=TEMPLATE_PACK, **kwargs): for tab in self.fields: tab.active = False # Open the group that should be open. self.open_target_group_for_form(form) content = self.get_rendered_fields(form, context, template_pack) links = SafeString("".join(tab.render_link(template_pack) for tab in self.fields)) context.update({"tabs": self, "links": links, "content": content}) template = self.get_template_name(template_pack) return render_to_string(template, context.flatten())
TabHolder
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/test_dataproc.py
{ "start": 86893, "end": 88306 }
class ____(DataprocClusterTestBase): @mock.patch(DATAPROC_PATH.format("Cluster.to_dict")) @mock.patch(DATAPROC_PATH.format("DataprocHook")) def test_execute(self, mock_hook, mock_to_dict): cluster = MagicMock() cluster.status.State.RUNNING = 3 cluster.status.state = 0 mock_hook.return_value.get_cluster.return_value = cluster op = DataprocStartClusterOperator( task_id=TASK_ID, cluster_name=CLUSTER_NAME, region=GCP_REGION, project_id=GCP_PROJECT, request_id=REQUEST_ID, retry=RETRY, timeout=TIMEOUT, metadata=METADATA, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) op.execute(context=self.mock_context) mock_hook.return_value.get_cluster.assert_called_with( region=GCP_REGION, project_id=GCP_PROJECT, cluster_name=CLUSTER_NAME, retry=RETRY, timeout=TIMEOUT, metadata=METADATA, ) mock_hook.return_value.start_cluster.assert_called_once_with( cluster_name=CLUSTER_NAME, region=GCP_REGION, project_id=GCP_PROJECT, cluster_uuid=None, retry=RETRY, timeout=TIMEOUT, metadata=METADATA, )
TestDataprocStartClusterOperator
python
allegroai__clearml
clearml/backend_api/services/v2_9/tasks.py
{ "start": 166778, "end": 167982 }
class ____(Response): """ Response of tasks.edit_configuration endpoint. :param updated: Indicates if the task was updated successfully :type updated: int """ _service = "tasks" _action = "edit_configuration" _version = "2.9" _schema = { "definitions": {}, "properties": { "updated": { "description": "Indicates if the task was updated successfully", "type": ["integer", "null"], } }, "type": "object", } def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None: super(EditConfigurationResponse, self).__init__(**kwargs) self.updated = updated @schema_property("updated") def updated(self) -> Optional[int]: return self._property_updated @updated.setter def updated(self, value: Optional[int]) -> None: if value is None: self._property_updated = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "updated", six.integer_types) self._property_updated = value
EditConfigurationResponse
python
python-jsonschema__jsonschema
jsonschema/tests/test_types.py
{ "start": 3435, "end": 6977 }
class ____(TestCase): def test_simple_type_can_be_extended(self): def int_or_str_int(checker, instance): if not isinstance(instance, (int, str)): return False try: int(instance) except ValueError: return False return True CustomValidator = extend( Draft202012Validator, type_checker=Draft202012Validator.TYPE_CHECKER.redefine( "integer", int_or_str_int, ), ) validator = CustomValidator({"type": "integer"}) validator.validate(4) validator.validate("4") with self.assertRaises(ValidationError): validator.validate(4.4) with self.assertRaises(ValidationError): validator.validate("foo") def test_object_can_be_extended(self): schema = {"type": "object"} Point = namedtuple("Point", ["x", "y"]) type_checker = Draft202012Validator.TYPE_CHECKER.redefine( "object", is_object_or_named_tuple, ) CustomValidator = extend( Draft202012Validator, type_checker=type_checker, ) validator = CustomValidator(schema) validator.validate(Point(x=4, y=5)) def test_object_extensions_require_custom_validators(self): schema = {"type": "object", "required": ["x"]} type_checker = Draft202012Validator.TYPE_CHECKER.redefine( "object", is_object_or_named_tuple, ) CustomValidator = extend( Draft202012Validator, type_checker=type_checker, ) validator = CustomValidator(schema) Point = namedtuple("Point", ["x", "y"]) # Cannot handle required with self.assertRaises(ValidationError): validator.validate(Point(x=4, y=5)) def test_object_extensions_can_handle_custom_validators(self): schema = { "type": "object", "required": ["x"], "properties": {"x": {"type": "integer"}}, } type_checker = Draft202012Validator.TYPE_CHECKER.redefine( "object", is_object_or_named_tuple, ) def coerce_named_tuple(fn): def coerced(validator, value, instance, schema): if is_namedtuple(instance): instance = instance._asdict() return fn(validator, value, instance, schema) return coerced required = coerce_named_tuple(_keywords.required) properties = coerce_named_tuple(_keywords.properties) CustomValidator = extend( Draft202012Validator, type_checker=type_checker, validators={"required": required, "properties": properties}, ) validator = CustomValidator(schema) Point = namedtuple("Point", ["x", "y"]) # Can now process required and properties validator.validate(Point(x=4, y=5)) with self.assertRaises(ValidationError): validator.validate(Point(x="not an integer", y=5)) # As well as still handle objects. validator.validate({"x": 4, "y": 5}) with self.assertRaises(ValidationError): validator.validate({"x": "not an integer", "y": 5}) def test_unknown_type(self): with self.assertRaises(UnknownType) as e: Draft202012Validator({}).is_type(12, "some unknown type") self.assertIn("'some unknown type'", str(e.exception))
TestCustomTypes
python
huggingface__transformers
src/transformers/models/edgetam_video/modeling_edgetam_video.py
{ "start": 73306, "end": 75431 }
class ____(nn.Module): def __init__(self, config: EdgeTamVideoMaskDecoderConfig): super().__init__() self.config = config self.num_hidden_layers = config.num_hidden_layers self.layers = nn.ModuleList() for i in range(self.num_hidden_layers): self.layers.append(EdgeTamVideoTwoWayAttentionBlock(config, skip_first_layer_pe=(i == 0))) self.final_attn_token_to_image = EdgeTamVideoAttention(config) self.layer_norm_final_attn = nn.LayerNorm(config.hidden_size) def forward( self, point_embeddings: Tensor, image_embeddings: Tensor, image_positional_embeddings: Tensor, attention_similarity: Tensor, target_embedding=None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BaseModelOutput]: if image_embeddings is None: raise ValueError("You have to specify an image_embedding") image_embeddings = image_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1) image_positional_embeddings = image_positional_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1) # Prepare queries queries = point_embeddings keys = image_embeddings # Apply transformer blocks and final layernorm for layer in self.layers: if target_embedding is not None: queries += target_embedding queries, keys, _ = layer( queries=queries, keys=keys, query_point_embedding=point_embeddings, key_point_embedding=image_positional_embeddings, attention_similarity=attention_similarity, **kwargs, ) # Apply the final attention layer from the points to the image query = queries + point_embeddings key = keys + image_positional_embeddings attn_out, _ = self.final_attn_token_to_image(query=query, key=key, value=keys) queries = queries + attn_out queries = self.layer_norm_final_attn(queries) return queries, keys
EdgeTamVideoTwoWayTransformer
python
davidhalter__jedi
test/completion/classes.py
{ "start": 4007, "end": 4054 }
class ____(object): FACTOR_1 = 0.1
FactorMixin
python
django__django
tests/template_tests/filter_tests/test_linebreaksbr.py
{ "start": 1037, "end": 1842 }
class ____(SimpleTestCase): def test_newline(self): self.assertEqual(linebreaksbr("line 1\nline 2"), "line 1<br>line 2") def test_carriage(self): self.assertEqual(linebreaksbr("line 1\rline 2"), "line 1<br>line 2") def test_carriage_newline(self): self.assertEqual(linebreaksbr("line 1\r\nline 2"), "line 1<br>line 2") def test_non_string_input(self): self.assertEqual(linebreaksbr(123), "123") def test_autoescape(self): self.assertEqual( linebreaksbr("foo\n<a>bar</a>\nbuz"), "foo<br>&lt;a&gt;bar&lt;/a&gt;<br>buz", ) def test_autoescape_off(self): self.assertEqual( linebreaksbr("foo\n<a>bar</a>\nbuz", autoescape=False), "foo<br><a>bar</a><br>buz", )
FunctionTests
python
kamyu104__LeetCode-Solutions
Python/path-with-minimum-effort.py
{ "start": 6045, "end": 7268 }
class ____(object): def minimumEffortPath(self, heights): """ :type heights: List[List[int]] :rtype: int """ directions = [(0, 1), (1, 0), (0, -1), (-1, 0)] def check(heights, x): lookup = [[False]*len(heights[0]) for _ in xrange(len(heights))] stk = [(0, 0)] while stk: r, c = stk.pop() if (r, c) == (len(heights)-1, len(heights[0])-1): return True for dr, dc in directions: nr, nc = r+dr, c+dc if not (0 <= nr < len(heights) and 0 <= nc < len(heights[0]) and abs(heights[nr][nc]-heights[r][c]) <= x and not lookup[nr][nc]): continue lookup[nr][nc] = True stk.append((nr, nc)) return False left, right = 0, 10**6 while left <= right: mid = left + (right-left)//2 if check(heights, mid): right = mid-1 else: left = mid+1 return left
Solution5
python
getsentry__sentry
src/sentry/integrations/github_enterprise/webhook.py
{ "start": 11599, "end": 12360 }
class ____(GitHubEnterpriseWebhookBase): owner = ApiOwner.ECOSYSTEM publish_status = { "POST": ApiPublishStatus.PRIVATE, } _handlers = { "push": GitHubEnterprisePushEventWebhook, "pull_request": GitHubEnterprisePullRequestEventWebhook, "installation": GitHubEnterpriseInstallationEventWebhook, } @method_decorator(csrf_exempt) def dispatch(self, request: HttpRequest, *args, **kwargs) -> HttpResponse: if request.method != "POST": return HttpResponse(status=405) return super().dispatch(request, *args, **kwargs) @method_decorator(csrf_exempt) def post(self, request: HttpRequest) -> HttpResponse: return self._handle(request)
GitHubEnterpriseWebhookEndpoint
python
davidhalter__jedi
test/examples/pytest_plugin_package/pytest_plugin/plugin.py
{ "start": 118, "end": 215 }
class ____: def login(self, **credentials): ... def logout(self): ...
Client
python
PyCQA__pyflakes
pyflakes/checker.py
{ "start": 15508, "end": 15648 }
class ____(Scope): def __init__(self): super().__init__() # {name: node} self.indirect_assignments = {}
ClassScope
python
facebook__pyre-check
tools/pysa_integration_tests/runner_lib.py
{ "start": 742, "end": 865 }
class ____(Exception): """ Custom Exception to raise when Pyre errors out """ pass @final
PyreErrorException
python
numba__numba
numba/core/types/npytypes.py
{ "start": 1717, "end": 7905 }
class ____(Type): """ A Record datatype can be mapped to a NumPy structured dtype. A record is very flexible since it is laid out as a list of bytes. Fields can be mapped to arbitrary points inside it, even if they overlap. *fields* is a list of `(name:str, data:dict)`. Where `data` is `{ type: Type, offset: int }` *size* is an int; the record size *aligned* is a boolean; whether the record is ABI aligned. """ mutable = True @classmethod def make_c_struct(cls, name_types): """Construct a Record type from a list of (name:str, type:Types). The layout of the structure will follow C. Note: only scalar types are supported currently. """ from numba.core.registry import cpu_target ctx = cpu_target.target_context offset = 0 fields = [] lltypes = [] for k, ty in name_types: if not isinstance(ty, (Number, NestedArray)): msg = "Only Number and NestedArray types are supported, found: {}. " raise TypeError(msg.format(ty)) if isinstance(ty, NestedArray): datatype = ctx.data_model_manager[ty].as_storage_type() else: datatype = ctx.get_data_type(ty) lltypes.append(datatype) size = ctx.get_abi_sizeof(datatype) align = ctx.get_abi_alignment(datatype) # align misaligned = offset % align if misaligned: offset += align - misaligned fields.append((k, { 'type': ty, 'offset': offset, 'alignment': align, })) offset += size # Adjust sizeof structure abi_size = ctx.get_abi_sizeof(ir.LiteralStructType(lltypes)) return Record(fields, size=abi_size, aligned=True) def __init__(self, fields, size, aligned): fields = self._normalize_fields(fields) self.fields = dict(fields) self.size = size self.aligned = aligned # Create description descbuf = [] fmt = "{}[type={};offset={}{}]" for k, infos in fields: extra = "" if infos.alignment is not None: extra += ';alignment={}'.format(infos.alignment) elif infos.title is not None: extra += ';title={}'.format(infos.title) descbuf.append(fmt.format(k, infos.type, infos.offset, extra)) desc = ','.join(descbuf) name = 'Record({};{};{})'.format(desc, self.size, self.aligned) super(Record, self).__init__(name) self.bitwidth = self.dtype.itemsize * 8 @classmethod def _normalize_fields(cls, fields): """ fields: [name: str, value: { type: Type, offset: int, [ alignment: int ], [ title : str], }] """ res = [] for name, infos in sorted(fields, key=lambda x: (x[1]['offset'], x[0])): fd = _RecordField( type=infos['type'], offset=infos['offset'], alignment=infos.get('alignment'), title=infos.get('title'), ) res.append((name, fd)) return res @property def key(self): # Numpy dtype equality doesn't always succeed, use the name instead # (https://github.com/numpy/numpy/issues/5715) return self.name @property def mangling_args(self): return self.__class__.__name__, (self._code,) def __len__(self): """Returns the number of fields """ return len(self.fields) def offset(self, key): """Get the byte offset of a field from the start of the structure. """ return self.fields[key].offset def typeof(self, key): """Get the type of a field. """ return self.fields[key].type def alignof(self, key): """Get the specified alignment of the field. Since field alignment is optional, this may return None. """ return self.fields[key].alignment def has_titles(self): """Returns True the record uses titles. """ return any(fd.title is not None for fd in self.fields.values()) def is_title(self, key): """Returns True if the field named *key* is a title. """ return self.fields[key].title == key @property def members(self): """An ordered list of (name, type) for the fields. """ ordered = sorted(self.fields.items(), key=lambda x: x[1].offset) return [(k, v.type) for k, v in ordered] @property def dtype(self): from numba.np.numpy_support import as_struct_dtype return as_struct_dtype(self) def can_convert_to(self, typingctx, other): """ Convert this Record to the *other*. This method only implements width subtyping for records. """ from numba.core.errors import NumbaExperimentalFeatureWarning if isinstance(other, Record): if len(other.fields) > len(self.fields): return for other_fd, self_fd in zip(other.fields.items(), self.fields.items()): if not other_fd == self_fd: return warnings.warn(f"{self} has been considered a subtype of {other} " f" This is an experimental feature.", category=NumbaExperimentalFeatureWarning) return Conversion.safe def __repr__(self): fields = [f"('{f_name}', " + f"{{'type': {repr(f_info.type)}, " + f"'offset': {f_info.offset}, " + f"'alignment': {f_info.alignment}, " + f"'title': {f_info.title}, " + f"}}" + ")" for f_name, f_info in self.fields.items() ] fields = "[" + ", ".join(fields) + "]" return f"Record({fields}, {self.size}, {self.aligned})"
Record
python
celery__celery
t/unit/concurrency/test_gevent.py
{ "start": 592, "end": 1363 }
class ____: def setup_method(self): self.patching.modules(*gevent_modules) self.greenlet = self.patching('gevent.greenlet') self.GreenletExit = self.patching('gevent.greenlet.GreenletExit') def test_sched(self): self.greenlet.Greenlet = object x = Timer() self.greenlet.Greenlet = Mock() x._Greenlet.spawn_later = Mock() x._GreenletExit = KeyError entry = Mock() g = x._enter(1, 0, entry) assert x.queue x._entry_exit(g) g.kill.assert_called_with() assert not x._queue x._queue.add(g) x.clear() x._queue.add(g) g.kill.side_effect = KeyError() x.clear() g = x._Greenlet() g.cancel()
test_Timer
python
pytorch__pytorch
torch/_guards.py
{ "start": 18721, "end": 19547 }
class ____: global_state: dict[str, tuple[Callable, Any]] = {} def __init__(self, global_states: dict[str, tuple[Callable, Any]]) -> None: self.global_state = global_states def diff(self, other: GlobalContextCheckpointState) -> Optional[set[str]]: """ Produces a delta against another GlobalContextCheckpointState. Returns None if no delta is found, otherwise, return a set() of mismatched global key names. """ r = set(self.global_state.keys()).difference(set(other.global_state.keys())) if len(r) == 0: return None return r def __eq__(self, other: object) -> bool: if not isinstance(other, GlobalContextCheckpointState): return False return self.diff(other) is None
GlobalContextCheckpointState
python
getsentry__sentry
tests/sentry/issues/endpoints/test_project_grouping_configs.py
{ "start": 220, "end": 731 }
class ____(APITestCase): endpoint = "sentry-api-0-project-grouping-configs" def test_permissions(self) -> None: with assume_test_silo_mode(SiloMode.CONTROL): token = ApiToken.objects.create(user=self.user, scope_list=[]) url = reverse(self.endpoint, args=(self.project.organization.slug, self.project.slug)) response = self.client.get(url, HTTP_AUTHORIZATION=f"Bearer {token.token}", format="json") assert response.status_code == 403
ProjectGroupingConfigsTest
python
kubernetes-client__python
kubernetes/base/config/kube_config_test.py
{ "start": 5441, "end": 10567 }
class ____(BaseTestCase): @staticmethod def get_file_content(filename): with open(filename) as f: return f.read() def test_file_given_file(self): temp_filename = _create_temp_file_with_content(TEST_DATA) obj = {TEST_FILE_KEY: temp_filename} t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY) self.assertEqual(TEST_DATA, self.get_file_content(t.as_file())) def test_file_given_non_existing_file(self): temp_filename = NON_EXISTING_FILE obj = {TEST_FILE_KEY: temp_filename} t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY) self.expect_exception(t.as_file, "does not exist") def test_file_given_data(self): obj = {TEST_DATA_KEY: TEST_DATA_BASE64} t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY, data_key_name=TEST_DATA_KEY) self.assertEqual(TEST_DATA, self.get_file_content(t.as_file())) def test_file_given_data_no_base64(self): obj = {TEST_DATA_KEY: TEST_DATA} t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY, data_key_name=TEST_DATA_KEY, base64_file_content=False) self.assertEqual(TEST_DATA, self.get_file_content(t.as_file())) def test_data_given_data(self): obj = {TEST_DATA_KEY: TEST_DATA_BASE64} t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY, data_key_name=TEST_DATA_KEY) self.assertEqual(TEST_DATA_BASE64, t.as_data()) def test_data_given_file(self): obj = { TEST_FILE_KEY: self._create_temp_file(content=TEST_DATA)} t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY) self.assertEqual(TEST_DATA_BASE64, t.as_data()) def test_data_given_file_no_base64(self): obj = { TEST_FILE_KEY: self._create_temp_file(content=TEST_DATA)} t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY, base64_file_content=False) self.assertEqual(TEST_DATA, t.as_data()) def test_data_given_file_and_data(self): obj = { TEST_DATA_KEY: TEST_DATA_BASE64, TEST_FILE_KEY: self._create_temp_file( content=TEST_ANOTHER_DATA)} t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY, data_key_name=TEST_DATA_KEY) self.assertEqual(TEST_DATA_BASE64, t.as_data()) def test_file_given_file_and_data(self): obj = { TEST_DATA_KEY: TEST_DATA_BASE64, TEST_FILE_KEY: self._create_temp_file( content=TEST_ANOTHER_DATA)} t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY, data_key_name=TEST_DATA_KEY) self.assertEqual(TEST_DATA, self.get_file_content(t.as_file())) def test_file_with_custom_dirname(self): tempfile = self._create_temp_file(content=TEST_DATA) tempfile_dir = os.path.dirname(tempfile) tempfile_basename = os.path.basename(tempfile) obj = {TEST_FILE_KEY: tempfile_basename} t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY, file_base_path=tempfile_dir) self.assertEqual(TEST_DATA, self.get_file_content(t.as_file())) def test_create_temp_file_with_content(self): self.assertEqual(TEST_DATA, self.get_file_content( _create_temp_file_with_content(TEST_DATA))) _cleanup_temp_files() def test_file_given_data_bytes(self): obj = {TEST_DATA_KEY: TEST_DATA_BASE64.encode()} t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY, data_key_name=TEST_DATA_KEY) self.assertEqual(TEST_DATA, self.get_file_content(t.as_file())) def test_file_given_data_bytes_no_base64(self): obj = {TEST_DATA_KEY: TEST_DATA.encode()} t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY, data_key_name=TEST_DATA_KEY, base64_file_content=False) self.assertEqual(TEST_DATA, self.get_file_content(t.as_file())) def test_file_given_no_object(self): t = FileOrData(obj=None, file_key_name=TEST_FILE_KEY, data_key_name=TEST_DATA_KEY) self.assertEqual(t.as_file(), None) def test_file_given_no_object_data(self): t = FileOrData(obj=None, file_key_name=TEST_FILE_KEY, data_key_name=TEST_DATA_KEY) self.assertEqual(t.as_data(), None) def test_file_recreation(self): obj = {TEST_DATA_KEY: TEST_DATA_BASE64} t1 = FileOrData( obj=obj, file_key_name=TEST_FILE_KEY, data_key_name=TEST_DATA_KEY, ) first_file_path = t1.as_file() # We manually remove the file from the disk leaving it in the cache os.remove(first_file_path) t2 = FileOrData( obj=obj, file_key_name=TEST_FILE_KEY, data_key_name=TEST_DATA_KEY, ) second_file_path = t2.as_file() self.assertEqual(TEST_DATA, self.get_file_content(second_file_path))
TestFileOrData
python
readthedocs__readthedocs.org
readthedocs/builds/migrations/0053_alter_version_build_data.py
{ "start": 148, "end": 692 }
class ____(migrations.Migration): safe = Safe.after_deploy() dependencies = [ ("builds", "0052_alter_versionautomationrule_polymorphic_ctype"), ] operations = [ migrations.AlterField( model_name="version", name="build_data", field=models.JSONField( blank=True, default=None, null=True, verbose_name="Data generated at build time by the doctool (`readthedocs-build.yaml`).", ), ), ]
Migration
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 341825, "end": 342867 }
class ____(sgqlc.types.Type): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ( "attributes", "emails", "family_name", "given_name", "groups", "name_id", "username", ) attributes = sgqlc.types.Field( sgqlc.types.non_null( sgqlc.types.list_of(sgqlc.types.non_null(ExternalIdentityAttribute)) ), graphql_name="attributes", ) emails = sgqlc.types.Field( sgqlc.types.list_of(sgqlc.types.non_null("UserEmailMetadata")), graphql_name="emails", ) family_name = sgqlc.types.Field(String, graphql_name="familyName") given_name = sgqlc.types.Field(String, graphql_name="givenName") groups = sgqlc.types.Field( sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="groups" ) name_id = sgqlc.types.Field(String, graphql_name="nameId") username = sgqlc.types.Field(String, graphql_name="username")
ExternalIdentitySamlAttributes
python
python-markdown__markdown
markdown/inlinepatterns.py
{ "start": 36024, "end": 36481 }
class ____(ReferenceInlineProcessor): """ Match to a stored reference and return `img` element. """ def makeTag(self, href: str, title: str, text: str) -> etree.Element: """ Return an `img` [`Element`][xml.etree.ElementTree.Element]. """ el = etree.Element("img") el.set("src", href) if title: el.set("title", title) el.set("alt", self.unescape(text)) return el
ImageReferenceInlineProcessor
python
astropy__astropy
astropy/visualization/wcsaxes/tests/test_transform_coord_meta.py
{ "start": 454, "end": 915 }
class ____(CurvedTransform): has_inverse = True def __init__(self, R=6e3): super().__init__() self.R = R def transform(self, xy): x, y = xy[:, 0], xy[:, 1] lam = np.degrees(np.arctan2(y, x)) phi = 90.0 - np.degrees(np.hypot(x, y) / self.R) return np.array((lam, phi)).transpose() transform_non_affine = transform def inverted(self): return LonLatToDistance(R=self.R)
DistanceToLonLat
python
realpython__materials
python-class/animals.py
{ "start": 703, "end": 781 }
class ____(Fish): def swim(self): print("The shark is swimming")
Shark
python
kamyu104__LeetCode-Solutions
Python/right-triangles.py
{ "start": 1170, "end": 1905 }
class ____(object): def numberOfRightTriangles(self, grid): """ :type grid: List[List[int]] :rtype: int """ def get(i, j): return grid[i][j] if n < m else grid[j][i] def count(direction): result = 0 cnt = [0]*min(n, m) for j in direction(xrange(max(n, m))): c = sum(get(i, j) for i in xrange(len(cnt))) for i in xrange(len(cnt)): if get(i, j) == 0: continue result += cnt[i] cnt[i] += c-1 return result n, m = len(grid), len(grid[0]) return count(lambda x: x)+count(reversed)
Solution3
python
django__django
tests/migrations/migrations_test_apps/mutate_state_b/migrations/0001_initial.py
{ "start": 43, "end": 736 }
class ____(migrations.Migration): dependencies = [] operations = [ migrations.SeparateDatabaseAndState( [], [ migrations.CreateModel( name="B", fields=[ ( "id", models.AutoField( serialize=False, verbose_name="ID", auto_created=True, primary_key=True, ), ), ], ), ], ) ]
Migration
python
pallets__click
tests/test_options.py
{ "start": 56786, "end": 56872 }
class ____(enum.Enum): MD5 = "MD5" SHA1 = "SHA1" SHA256 = "SHA-256"
HashType
python
mlflow__mlflow
mlflow/models/dependencies_schemas.py
{ "start": 8110, "end": 9703 }
class ____(Schema): """ Define vector search index resource to serve a model. Args: name (str): The name of the vector search index schema. primary_key (str): The primary key for the index. text_column (str): The main text column for the index. doc_uri (Optional[str]): The document URI for the index. other_columns (Optional[List[str]]): Additional columns in the index. """ def __init__( self, name: str, primary_key: str, text_column: str, doc_uri: str | None = None, other_columns: list[str] | None = None, ): super().__init__(type=DependenciesSchemasType.RETRIEVERS) self.name = name self.primary_key = primary_key self.text_column = text_column self.doc_uri = doc_uri self.other_columns = other_columns or [] def to_dict(self): return { self.type.value: [ { "name": self.name, "primary_key": self.primary_key, "text_column": self.text_column, "doc_uri": self.doc_uri, "other_columns": self.other_columns, } ] } @classmethod def from_dict(cls, data: dict[str, str]): return cls( name=data["name"], primary_key=data["primary_key"], text_column=data["text_column"], doc_uri=data.get("doc_uri"), other_columns=data.get("other_columns", []), ) @dataclass
RetrieverSchema
python
run-llama__llama_index
llama-index-core/llama_index/core/instrumentation/events/llm.py
{ "start": 2759, "end": 3373 }
class ____(BaseEvent): """ LLMCompletionInProgressEvent. Args: prompt (str): The prompt to be completed. response (CompletionResponse): Completion response. """ prompt: str response: CompletionResponse @classmethod def class_name(cls) -> str: """Class name.""" return "LLMCompletionInProgressEvent" def model_dump(self, **kwargs: Any) -> Dict[str, Any]: if isinstance(self.response.raw, BaseModel): self.response.raw = self.response.raw.model_dump() return super().model_dump(**kwargs)
LLMCompletionInProgressEvent
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pylint/duplicate_bases.py
{ "start": 250, "end": 279 }
class ____(A, A, B): ...
G1
python
ray-project__ray
python/ray/cloudpickle/py_pickle.py
{ "start": 346, "end": 676 }
class ____(_Pickler): def __init__(self, file, protocol=None, *, fix_imports=True, buffer_callback=None): super().__init__( file, protocol, fix_imports=fix_imports, buffer_callback=buffer_callback ) # avoid being overrided by cloudpickle self.dispatch = _Pickler.dispatch.copy()
Pickler
python
django__django
django/db/models/functions/window.py
{ "start": 404, "end": 520 }
class ____(Func): function = "DENSE_RANK" output_field = IntegerField() window_compatible = True
DenseRank
python
mlflow__mlflow
mlflow/gateway/app.py
{ "start": 8762, "end": 18621 }
class ____(BaseModel): routes: list[_LegacyRoute] next_page_token: str | None = None model_config = ConfigDict( json_schema_extra={ "example": { "endpoints": [ { "name": "openai-chat", "route_type": "llm/v1/chat", "model": { "name": "gpt-4o-mini", "provider": "openai", }, }, { "name": "anthropic-completions", "route_type": "llm/v1/completions", "model": { "name": "claude-instant-100k", "provider": "anthropic", }, }, { "name": "cohere-embeddings", "route_type": "llm/v1/embeddings", "model": { "name": "embed-english-v2.0", "provider": "cohere", }, }, ], "next_page_token": "eyJpbmRleCI6IDExfQ==", } } ) def create_app_from_config(config: GatewayConfig) -> GatewayAPI: """ Create the GatewayAPI app from the gateway configuration. """ limiter = Limiter( key_func=get_remote_address, storage_uri=MLFLOW_GATEWAY_RATE_LIMITS_STORAGE_URI.get() ) app = GatewayAPI( config=config, limiter=limiter, title="MLflow AI Gateway", description="The core deployments API for reverse proxy interface using remote inference " "endpoints within MLflow", version=VERSION, docs_url=None, ) @app.get("/", include_in_schema=False) async def index(): return RedirectResponse(url="/docs") @app.get("/favicon.ico", include_in_schema=False) async def favicon(): for directory in ["build", "public"]: favicon_file = Path(__file__).parent.parent.joinpath( "server", "js", directory, "favicon.ico" ) if favicon_file.exists(): return FileResponse(favicon_file) raise HTTPException(status_code=404, detail="favicon.ico not found") @app.get("/docs", include_in_schema=False) async def docs(): return get_swagger_ui_html( openapi_url="/openapi.json", title="MLflow AI Gateway", swagger_favicon_url="/favicon.ico", ) # TODO: Remove deployments server URLs after deprecation window elapses @app.get(MLFLOW_DEPLOYMENTS_HEALTH_ENDPOINT) @app.get(MLFLOW_GATEWAY_HEALTH_ENDPOINT, include_in_schema=False) async def health() -> HealthResponse: return {"status": "OK"} # TODO: Remove deployments server URLs after deprecation window elapses @app.get(MLFLOW_DEPLOYMENTS_CRUD_ENDPOINT_BASE + "{endpoint_name}") async def get_endpoint(endpoint_name: str) -> Endpoint: if matched := app.get_dynamic_endpoint(endpoint_name): return matched raise HTTPException( status_code=404, detail=f"The endpoint '{endpoint_name}' is not present or active on the server. Please " "verify the endpoint name.", ) # TODO: Remove the deprecated endpoint @app.get( MLFLOW_GATEWAY_CRUD_ROUTE_BASE + "{route_name}", include_in_schema=False, deprecated=True ) async def _legacy_get_route(route_name: str) -> _LegacyRoute: if matched := app._get_legacy_dynamic_route(route_name): return matched raise HTTPException( status_code=404, detail=f"The route '{route_name}' is not present or active on the server. Please " "verify the route name.", ) @app.get(MLFLOW_GATEWAY_CRUD_ENDPOINT_V3_BASE + "{endpoint_name}", include_in_schema=False) async def get_endpoint_v3(endpoint_name: str) -> Endpoint: if matched := app.dynamic_endpoints.get(endpoint_name): return matched.to_endpoint() raise HTTPException( status_code=404, detail=f"The endpoint '{endpoint_name}' is not present or active on the server. " f"Please verify the endpoint name.", ) @app.get(MLFLOW_GATEWAY_CRUD_ROUTE_V3_BASE + "{route_name}", include_in_schema=False) async def get_route_v3(route_name: str) -> TrafficRouteConfig: if matched := app.traffic_routes.get(route_name): return matched raise HTTPException( status_code=404, detail=f"The route '{route_name}' is not present or active on the server. " f"Please verify the route name.", ) # TODO: Remove deployments server URLs after deprecation window elapses @app.get(MLFLOW_DEPLOYMENTS_CRUD_ENDPOINT_BASE) async def list_endpoints(page_token: str | None = None) -> ListEndpointsResponse: start_idx = SearchRoutesToken.decode(page_token).index if page_token is not None else 0 end_idx = start_idx + MLFLOW_DEPLOYMENTS_LIST_ENDPOINTS_PAGE_SIZE endpoints = list(app.dynamic_endpoints.values()) result = { "endpoints": [endpoint.to_endpoint() for endpoint in endpoints[start_idx:end_idx]] } if len(endpoints[end_idx:]) > 0: next_page_token = SearchRoutesToken(index=end_idx) result["next_page_token"] = next_page_token.encode() return result # TODO: Remove the deprecated endpoint @app.get(MLFLOW_GATEWAY_CRUD_ROUTE_BASE, include_in_schema=False, deprecated=True) async def _legacy_search_routes(page_token: str | None = None) -> _LegacySearchRoutesResponse: start_idx = SearchRoutesToken.decode(page_token).index if page_token is not None else 0 end_idx = start_idx + MLFLOW_GATEWAY_SEARCH_ROUTES_PAGE_SIZE routes = list(app.dynamic_endpoints.values()) result = {"routes": [r._to_legacy_route() for r in routes[start_idx:end_idx]]} if len(routes[end_idx:]) > 0: next_page_token = SearchRoutesToken(index=end_idx) result["next_page_token"] = next_page_token.encode() return result # TODO: Remove deployments server URLs after deprecation window elapses @app.get(MLFLOW_DEPLOYMENTS_LIMITS_BASE + "{endpoint}") @app.get(MLFLOW_GATEWAY_LIMITS_BASE + "{endpoint}", include_in_schema=False) async def get_limits(endpoint: str) -> LimitsConfig: raise HTTPException(status_code=501, detail="The get_limits API is not available yet.") # TODO: Remove deployments server URLs after deprecation window elapses @app.post(MLFLOW_DEPLOYMENTS_LIMITS_BASE) @app.post(MLFLOW_GATEWAY_LIMITS_BASE, include_in_schema=False) async def set_limits(payload: SetLimitsModel) -> LimitsConfig: raise HTTPException(status_code=501, detail="The set_limits API is not available yet.") @app.post("/v1/chat/completions") async def openai_chat_handler( request: Request, payload: chat.RequestPayload ) -> chat.ResponsePayload: name = payload.model prov, endpoint_type = app._get_provider_by_name(name) if endpoint_type != EndpointType.LLM_V1_CHAT: raise HTTPException( status_code=400, detail=f"Endpoint {name!r} is not a chat endpoint.", ) payload.model = None # provider rejects a request with model field, must be set to None if payload.stream: return await make_streaming_response(prov.chat_stream(payload)) else: return await prov.chat(payload) @app.post("/v1/completions") async def openai_completions_handler( request: Request, payload: completions.RequestPayload ) -> completions.ResponsePayload: name = payload.model prov, endpoint_type = app._get_provider_by_name(name) if endpoint_type != EndpointType.LLM_V1_COMPLETIONS: raise HTTPException( status_code=400, detail=f"Endpoint {name!r} is not a completions endpoint.", ) payload.model = None # provider rejects a request with model field, must be set to None if payload.stream: return await make_streaming_response(prov.completions_stream(payload)) else: return await prov.completions(payload) @app.post("/v1/embeddings") async def openai_embeddings_handler( request: Request, payload: embeddings.RequestPayload ) -> embeddings.ResponsePayload: name = payload.model prov, endpoint_type = app._get_provider_by_name(name) if endpoint_type != EndpointType.LLM_V1_EMBEDDINGS: raise HTTPException( status_code=400, detail=f"Endpoint {name!r} is not an embeddings endpoint.", ) payload.model = None # provider rejects a request with model field, must be set to None return await prov.embeddings(payload) return app def create_app_from_path(config_path: str | Path) -> GatewayAPI: """ Load the path and generate the GatewayAPI app instance. """ config = _load_gateway_config(config_path) return create_app_from_config(config) def create_app_from_env() -> GatewayAPI: """ Load the path from the environment variable and generate the GatewayAPI app instance. """ if config_path := MLFLOW_GATEWAY_CONFIG.get(): return create_app_from_path(config_path) raise MlflowException( f"Environment variable {MLFLOW_GATEWAY_CONFIG!r} is not set. " "Please set it to the path of the gateway configuration file." )
_LegacySearchRoutesResponse
python
tensorflow__tensorflow
tensorflow/python/framework/errors_impl.py
{ "start": 13976, "end": 14692 }
class ____(OpError): """Raised when some prerequisites are not met when running an operation. This typically indicates that system is not in state to execute the operation and requires preconditions to be met before successfully executing current operation. For example, this exception is commonly raised when running an operation that reads a `tf.Variable` before it has been initialized. """ def __init__(self, node_def, op, message, *args): """Creates a `FailedPreconditionError`.""" super(FailedPreconditionError, self).__init__(node_def, op, message, FAILED_PRECONDITION, *args) @tf_export("errors.AbortedError")
FailedPreconditionError
python
streamlit__streamlit
lib/tests/streamlit/elements/lib/options_selector_utils_test.py
{ "start": 5682, "end": 13118 }
class ____: """Test class for Enum Coercion feature.""" @pytest.fixture def EnumAOrig(self): class EnumA(enum.Enum): A = enum.auto() B = enum.auto() C = enum.auto() EnumA.__qualname__ = "__main__.EnumA" return EnumA @pytest.fixture def EnumAEqual(self): class EnumA(enum.Enum): A = enum.auto() B = enum.auto() C = enum.auto() EnumA.__qualname__ = "__main__.EnumA" return EnumA @pytest.fixture def EnumADiffMembers(self): class EnumA(enum.Enum): A = enum.auto() B = enum.auto() D = enum.auto() EnumA.__qualname__ = "__main__.EnumA" return EnumA @pytest.fixture def EnumADiffValues(self): class EnumA(enum.Enum): A = "1" B = "2" C = "3" EnumA.__qualname__ = "__main__.EnumA" return EnumA @pytest.fixture def EnumAExtraMembers(self): class EnumA(enum.Enum): A = enum.auto() B = enum.auto() C = enum.auto() D = enum.auto() EnumA.__qualname__ = "__main__.EnumA" return EnumA @pytest.fixture def EnumADiffQualname(self): class EnumA(enum.Enum): A = enum.auto() B = enum.auto() C = enum.auto() EnumA.__qualname__ = "foobar.EnumA" return EnumA @pytest.fixture def EnumB(self): class EnumB(enum.Enum): A = enum.auto() B = enum.auto() C = enum.auto() EnumB.__qualname__ = "__main__.EnumB" return EnumB def test_enum_uniqueness( self, EnumAOrig, EnumAEqual, EnumADiffMembers, EnumADiffValues, EnumADiffQualname, EnumB, EnumAExtraMembers, ): """A preliminary check, to ensure testing the others makes sense.""" assert all( EnumAOrig.A not in enum for enum in ( EnumAEqual, EnumADiffMembers, EnumADiffValues, EnumADiffQualname, EnumAExtraMembers, EnumB, ) ) assert EnumAOrig.A.value == EnumAEqual.A.value assert EnumAOrig.__qualname__ == EnumAEqual.__qualname__ def test_coerce_enum_coercable( self, EnumAOrig, EnumAEqual, EnumADiffValues, ): assert _coerce_enum(EnumAOrig.A, EnumAEqual) is EnumAEqual.A # Different values are coercible by default assert _coerce_enum(EnumAOrig.A, EnumADiffValues) is EnumADiffValues.A def test_coerce_enum_not_coercable( self, EnumAOrig, EnumADiffMembers, EnumAExtraMembers, EnumADiffQualname, EnumB, ): # Things that are not coercible assert _coerce_enum(EnumAOrig.A, EnumADiffMembers) is EnumAOrig.A assert _coerce_enum(EnumAOrig.A, EnumAExtraMembers) is EnumAOrig.A assert _coerce_enum(EnumAOrig.A, EnumB) is EnumAOrig.A assert _coerce_enum(EnumAOrig.A, EnumADiffQualname) is EnumAOrig.A def test_coerce_enum_noop(self, EnumAOrig): assert _coerce_enum(EnumAOrig.A, EnumAOrig) is EnumAOrig.A def test_coerce_enum_errors(self, EnumAOrig, EnumAEqual): with pytest.raises(ValueError, match="Expected an EnumMeta"): _coerce_enum(EnumAOrig.A, EnumAEqual.A) with pytest.raises(ValueError, match="Expected an Enum"): _coerce_enum(EnumAOrig, EnumAEqual) @patch_config_options({"runner.enumCoercion": "off"}) def test_coerce_enum_config_off(self, EnumAOrig, EnumAEqual): assert _coerce_enum(EnumAOrig.A, EnumAEqual) is EnumAOrig.A @patch_config_options({"runner.enumCoercion": "nameAndValue"}) def test_coerce_enum_config_name_and_value( self, EnumAOrig, EnumAEqual, EnumADiffValues ): assert _coerce_enum(EnumAOrig.A, EnumAEqual) is EnumAEqual.A assert _coerce_enum(EnumAOrig.A, EnumADiffValues) is EnumAOrig.A @patch_config_options({"runner.enumCoercion": "badValue"}) def test_coerce_enum_bad_config_value(self, EnumAOrig, EnumAEqual): with pytest.raises(StreamlitAPIException): _coerce_enum(EnumAOrig.A, EnumAEqual) def test_maybe_coerce_enum(self): class EnumA(enum.Enum): A = enum.auto() B = enum.auto() C = enum.auto() EnumAOrig = EnumA class EnumA(enum.Enum): A = enum.auto() B = enum.auto() C = enum.auto() EnumAEqual = EnumA EnumAEqualList = [EnumAEqual.A, EnumAEqual.C, EnumAEqual.B] int_result = RegisterWidgetResult(1, False) intlist_result = RegisterWidgetResult([1, 2, 3], False) single_result = RegisterWidgetResult(EnumAOrig.A, False) single_coerced = RegisterWidgetResult(EnumAEqual.A, False) tuple_result = RegisterWidgetResult((EnumAOrig.A, EnumAOrig.C), True) tuple_coerced = RegisterWidgetResult((EnumAEqual.A, EnumAEqual.C), True) list_result = RegisterWidgetResult([EnumAOrig.A, EnumAOrig.C], True) list_coerced = RegisterWidgetResult([EnumAEqual.A, EnumAEqual.C], True) assert maybe_coerce_enum(single_result, EnumAEqual, []) == single_coerced assert ( maybe_coerce_enum(single_result, EnumAEqualList, EnumAEqualList) == single_coerced ) assert ( maybe_coerce_enum(single_result, EnumAEqualList, [EnumAEqual.A]) == single_coerced ) assert maybe_coerce_enum(single_result, [1, 2, 3], []) is single_result assert maybe_coerce_enum(int_result, EnumAEqual, []) is int_result assert ( maybe_coerce_enum( single_result, EnumAEqualList, [EnumAEqual.A, EnumAOrig.B] ) is single_result ) assert maybe_coerce_enum_sequence(tuple_result, EnumAEqual, []) == tuple_coerced assert ( maybe_coerce_enum_sequence(tuple_result, EnumAEqualList, EnumAEqualList) == tuple_coerced ) assert ( maybe_coerce_enum_sequence(tuple_result, EnumAEqualList, [EnumAEqual.A]) == tuple_coerced ) assert maybe_coerce_enum_sequence(list_result, EnumAEqual, []) == list_coerced assert ( maybe_coerce_enum_sequence(list_result, EnumAEqualList, EnumAEqualList) == list_coerced ) assert ( maybe_coerce_enum_sequence(list_result, EnumAEqualList, [EnumAEqual.A]) == list_coerced ) assert maybe_coerce_enum_sequence(list_result, [1, 2, 3], []) is list_result assert maybe_coerce_enum_sequence(tuple_result, [1, 2, 3], []) is tuple_result assert ( maybe_coerce_enum_sequence(intlist_result, EnumAEqual, []) is intlist_result ) assert ( maybe_coerce_enum_sequence( list_result, EnumAEqualList, [EnumAEqual.A, EnumAOrig.B] ) is list_result ) assert ( maybe_coerce_enum_sequence( tuple_result, EnumAEqualList, [EnumAEqual.A, EnumAOrig.B] ) is tuple_result )
TestEnumCoercion
python
langchain-ai__langchain
libs/core/langchain_core/output_parsers/transform.py
{ "start": 2878, "end": 5835 }
class ____(BaseTransformOutputParser[T]): """Base class for an output parser that can handle streaming input.""" diff: bool = False """In streaming mode, whether to yield diffs between the previous and current parsed output, or just the current parsed output. """ def _diff( self, prev: T | None, next: T, # noqa: A002 ) -> T: """Convert parsed outputs into a diff format. The semantics of this are up to the output parser. Args: prev: The previous parsed output. next: The current parsed output. Returns: The diff between the previous and current parsed output. """ raise NotImplementedError @override def _transform(self, input: Iterator[str | BaseMessage]) -> Iterator[Any]: prev_parsed = None acc_gen: GenerationChunk | ChatGenerationChunk | None = None for chunk in input: chunk_gen: GenerationChunk | ChatGenerationChunk if isinstance(chunk, BaseMessageChunk): chunk_gen = ChatGenerationChunk(message=chunk) elif isinstance(chunk, BaseMessage): chunk_gen = ChatGenerationChunk( message=BaseMessageChunk(**chunk.model_dump()) ) else: chunk_gen = GenerationChunk(text=chunk) acc_gen = chunk_gen if acc_gen is None else acc_gen + chunk_gen # type: ignore[operator] parsed = self.parse_result([acc_gen], partial=True) if parsed is not None and parsed != prev_parsed: if self.diff: yield self._diff(prev_parsed, parsed) else: yield parsed prev_parsed = parsed @override async def _atransform( self, input: AsyncIterator[str | BaseMessage] ) -> AsyncIterator[T]: prev_parsed = None acc_gen: GenerationChunk | ChatGenerationChunk | None = None async for chunk in input: chunk_gen: GenerationChunk | ChatGenerationChunk if isinstance(chunk, BaseMessageChunk): chunk_gen = ChatGenerationChunk(message=chunk) elif isinstance(chunk, BaseMessage): chunk_gen = ChatGenerationChunk( message=BaseMessageChunk(**chunk.model_dump()) ) else: chunk_gen = GenerationChunk(text=chunk) acc_gen = chunk_gen if acc_gen is None else acc_gen + chunk_gen # type: ignore[operator] parsed = await self.aparse_result([acc_gen], partial=True) if parsed is not None and parsed != prev_parsed: if self.diff: yield await run_in_executor(None, self._diff, prev_parsed, parsed) else: yield parsed prev_parsed = parsed
BaseCumulativeTransformOutputParser
python
getsentry__sentry
src/sentry/web/frontend/debug/debug_uptime_auto_detected_monitor_email.py
{ "start": 536, "end": 917 }
class ____(View): def get(self, request: HttpRequest) -> HttpResponse: context = get_context() return MailPreview( text_template="sentry/emails/uptime/auto-detected-monitors.txt", html_template="sentry/emails/uptime/auto-detected-monitors.html", context=context, ).render(request)
DebugUptimeAutoDetectedMonitorEmailView
python
pytorch__pytorch
torch/_inductor/codegen/cpp_gemm_template.py
{ "start": 77885, "end": 77962 }
class ____(metaclass=CppWoqInt4GemmTemplateMeta): pass
CppWoqInt4GemmTemplate
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/pool/base.py
{ "start": 1195, "end": 2770 }
class ____: """describes the state of a DBAPI connection as it is being passed to the :meth:`.PoolEvents.reset` connection pool event. .. versionadded:: 2.0.0b3 """ __slots__ = ("transaction_was_reset", "terminate_only", "asyncio_safe") transaction_was_reset: bool """Indicates if the transaction on the DBAPI connection was already essentially "reset" back by the :class:`.Connection` object. This boolean is True if the :class:`.Connection` had transactional state present upon it, which was then not closed using the :meth:`.Connection.rollback` or :meth:`.Connection.commit` method; instead, the transaction was closed inline within the :meth:`.Connection.close` method so is guaranteed to remain non-present when this event is reached. """ terminate_only: bool """indicates if the connection is to be immediately terminated and not checked in to the pool. This occurs for connections that were invalidated, as well as asyncio connections that were not cleanly handled by the calling code that are instead being garbage collected. In the latter case, operations can't be safely run on asyncio connections within garbage collection as there is not necessarily an event loop present. """ asyncio_safe: bool """Indicates if the reset operation is occurring within a scope where an enclosing event loop is expected to be present for asyncio applications. Will be False in the case that the connection is being garbage collected. """
PoolResetState
python
explosion__spaCy
spacy/lang/cs/__init__.py
{ "start": 215, "end": 305 }
class ____(Language): lang = "cs" Defaults = CzechDefaults __all__ = ["Czech"]
Czech
python
h5py__h5py
h5py/tests/test_dataset.py
{ "start": 4648, "end": 8510 }
class ____(BaseDataset): """ Feature: Datasets can be created from existing data """ def test_create_scalar(self): """ Create a scalar dataset from existing array """ data = np.ones((), 'f') dset = self.f.create_dataset(make_name(), data=data) self.assertEqual(dset.shape, data.shape) def test_create_extended(self): """ Create an extended dataset from existing data """ data = np.ones((63,), 'f') dset = self.f.create_dataset(make_name(), data=data) self.assertEqual(dset.shape, data.shape) def test_dataset_intermediate_group(self): """ Create dataset with missing intermediate groups """ name = make_name("/foo{}/bar/baz") ds = self.f.create_dataset(name, shape=(10, 10), dtype='<i4') self.assertIsInstance(ds, h5py.Dataset) self.assertTrue(name in self.f) def test_reshape(self): """ Create from existing data, and make it fit a new shape """ data = np.arange(30, dtype='f') dset = self.f.create_dataset(make_name(), shape=(10, 3), data=data) self.assertEqual(dset.shape, (10, 3)) self.assertArrayEqual(dset[...], data.reshape((10, 3))) def test_appropriate_low_level_id(self): " Binding Dataset to a non-DatasetID identifier fails with ValueError " with self.assertRaises(ValueError): Dataset(self.f['/'].id) def check_h5_string(self, dset, cset, length): tid = dset.id.get_type() assert isinstance(tid, h5t.TypeStringID) assert tid.get_cset() == cset if length is None: assert tid.is_variable_str() else: assert not tid.is_variable_str() assert tid.get_size() == length def test_create_bytestring(self): """ Creating dataset with byte string yields vlen ASCII dataset """ def check_vlen_ascii(dset): self.check_h5_string(dset, h5t.CSET_ASCII, length=None) check_vlen_ascii(self.f.create_dataset(make_name("a"), data=b'abc')) check_vlen_ascii(self.f.create_dataset(make_name("b"), data=[b'abc', b'def'])) check_vlen_ascii(self.f.create_dataset(make_name("c"), data=[[b'abc'], [b'def']])) check_vlen_ascii(self.f.create_dataset( make_name("d"), data=np.array([b'abc', b'def'], dtype=object) )) def test_create_np_s(self): dset = self.f.create_dataset(make_name(), data=np.array([b'abc', b'def'], dtype='S3')) self.check_h5_string(dset, h5t.CSET_ASCII, length=3) def test_create_strings(self): def check_vlen_utf8(dset): self.check_h5_string(dset, h5t.CSET_UTF8, length=None) check_vlen_utf8(self.f.create_dataset(make_name("a"), data='abc')) check_vlen_utf8(self.f.create_dataset(make_name("b"), data=['abc', 'def'])) check_vlen_utf8(self.f.create_dataset(make_name("c"), data=[['abc'], ['def']])) check_vlen_utf8(self.f.create_dataset( make_name("d"), data=np.array(['abc', 'def'], dtype=object) )) def test_create_np_u(self): with self.assertRaises(TypeError): self.f.create_dataset(make_name(), data=np.array([b'abc', b'def'], dtype='U3')) def test_empty_create_via_None_shape(self): name = make_name() self.f.create_dataset(name, dtype='f') self.assertTrue(is_empty_dataspace(self.f[name].id)) def test_empty_create_via_Empty_class(self): name = make_name() self.f.create_dataset(name, data=h5py.Empty(dtype='f')) self.assertTrue(is_empty_dataspace(self.f[name].id)) def test_create_incompatible_data(self): # Shape tuple is incompatible with data with self.assertRaises(ValueError): self.f.create_dataset(make_name(), shape=4, data= np.arange(3))
TestCreateData
python
lazyprogrammer__machine_learning_examples
ann_class2/batch_norm_theano.py
{ "start": 2908, "end": 5905 }
class ____(object): def __init__(self, hidden_layer_sizes): self.hidden_layer_sizes = hidden_layer_sizes def fit(self, X, Y, Xtest, Ytest, activation=T.nnet.relu, learning_rate=1e-2, mu=0.9, epochs=15, batch_sz=100, print_period=100, show_fig=True): X = X.astype(np.float32) Y = Y.astype(np.int32) # initialize hidden layers N, D = X.shape self.layers = [] M1 = D for M2 in self.hidden_layer_sizes: h = HiddenLayerBatchNorm(M1, M2, activation) self.layers.append(h) M1 = M2 # final layer K = len(set(Y)) h = HiddenLayer(M1, K, T.nnet.softmax) self.layers.append(h) if batch_sz is None: batch_sz = N # collect params for later use self.params = [] for h in self.layers: self.params += h.params # note! we will need to build the output differently # for train and test (prediction) # set up theano functions and variables thX = T.matrix('X') thY = T.ivector('Y') # for training p_y_given_x = self.forward(thX, is_training=True) cost = -T.mean(T.log(p_y_given_x[T.arange(thY.shape[0]), thY])) prediction = T.argmax(p_y_given_x, axis=1) grads = T.grad(cost, self.params) # momentum only updates = momentum_updates(cost, self.params, learning_rate, mu) for layer in self.layers[:-1]: updates += layer.running_update train_op = theano.function( inputs=[thX, thY], outputs=[cost, prediction], updates=updates, ) # for testing test_p_y_given_x = self.forward(thX, is_training=False) test_prediction = T.argmax(test_p_y_given_x, axis=1) self.predict = theano.function( inputs=[thX], outputs=test_prediction, ) n_batches = N // batch_sz costs = [] for i in range(epochs): if n_batches > 1: X, Y = shuffle(X, Y) for j in range(n_batches): Xbatch = X[j*batch_sz:(j*batch_sz+batch_sz)] Ybatch = Y[j*batch_sz:(j*batch_sz+batch_sz)] c, p = train_op(Xbatch, Ybatch) costs.append(c) if (j+1) % print_period == 0: accuracy = np.mean(p == Ybatch) print("epoch:", i, "batch:", j, "n_batches:", n_batches, "cost:", c, "accuracy:", accuracy) print("Train acc:", self.score(X, Y), "Test acc:", self.score(Xtest, Ytest)) if show_fig: plt.plot(costs) plt.show() def forward(self, X, is_training): out = X for h in self.layers[:-1]: out = h.forward(out, is_training) out = self.layers[-1].forward(out) return out def score(self, X, Y): P = self.predict(X) return np.mean(Y == P) def main(): # step 1: get the data and define all the usual variables Xtrain, Xtest, Ytrain, Ytest = get_normalized_data() ann = ANN([500, 300]) ann.fit(Xtrain, Ytrain, Xtest, Ytest, show_fig=True) print("Train accuracy:", ann.score(Xtrain, Ytrain)) print("Test accuracy:", ann.score(Xtest, Ytest)) if __name__ == '__main__': main()
ANN
python
doocs__leetcode
solution/0900-0999/0936.Stamping The Sequence/Solution.py
{ "start": 0, "end": 917 }
class ____: def movesToStamp(self, stamp: str, target: str) -> List[int]: m, n = len(stamp), len(target) indeg = [m] * (n - m + 1) q = deque() g = [[] for _ in range(n)] for i in range(n - m + 1): for j, c in enumerate(stamp): if target[i + j] == c: indeg[i] -= 1 if indeg[i] == 0: q.append(i) else: g[i + j].append(i) ans = [] vis = [False] * n while q: i = q.popleft() ans.append(i) for j in range(m): if not vis[i + j]: vis[i + j] = True for k in g[i + j]: indeg[k] -= 1 if indeg[k] == 0: q.append(k) return ans[::-1] if all(vis) else []
Solution
python
aio-libs__aiohttp
aiohttp/web_exceptions.py
{ "start": 5041, "end": 5120 }
class ____(HTTPSuccessful): status_code = 203
HTTPNonAuthoritativeInformation
python
getsentry__sentry
src/sentry/integrations/msteams/client.py
{ "start": 3089, "end": 3683 }
class ____(MsTeamsClientABC): integration_name = IntegrationProviderSlug.MSTEAMS.value def __init__(self, access_token: str, service_url: str): super().__init__() self.access_token = access_token self.base_url = service_url.rstrip("/") def request(self, method, path, data=None, params=None): headers = {"Authorization": f"Bearer {self.access_token}"} return self._request(method, path, headers=headers, data=data, params=params) # MsTeamsClient is used with an existing integration object and handles token refreshing
MsTeamsPreInstallClient
python
google__pytype
pytype/tests/test_fiddle_overlay.py
{ "start": 350, "end": 397 }
class ____(Generic[T], Buildable[T]): ...
Config
python
getsentry__sentry
src/sentry/backup/services/import_export/model.py
{ "start": 7573, "end": 8009 }
class ____(RpcModel): """ Information about a successful export: the mapping of old pks to new ones, the maximum pk exported, and the JSON string of the exported models. """ is_err: Literal[False] = False mapped_pks: RpcPrimaryKeyMap max_pk: int = 0 json_data: str = "[]" # Using strings, rather than `auto()` integers, makes this more (though not completely) robust to # version skew. @unique
RpcExportOk
python
allegroai__clearml
clearml/backend_api/services/v2_13/workers.py
{ "start": 30941, "end": 43268 }
class ____(NonStrictDataModel): """ :param cpu_usage: Average CPU usage per core :type cpu_usage: Sequence[float] :param gpu_usage: Average GPU usage per GPU card :type gpu_usage: Sequence[float] :param memory_used: Used memory MBs :type memory_used: int :param memory_free: Free memory MBs :type memory_free: int :param gpu_memory_free: GPU free memory MBs :type gpu_memory_free: Sequence[int] :param gpu_memory_used: GPU used memory MBs :type gpu_memory_used: Sequence[int] :param network_tx: Mbytes per second :type network_tx: int :param network_rx: Mbytes per second :type network_rx: int :param disk_free_home: Mbytes free space of /home drive :type disk_free_home: int :param disk_free_temp: Mbytes free space of /tmp drive :type disk_free_temp: int :param disk_read: Mbytes read per second :type disk_read: int :param disk_write: Mbytes write per second :type disk_write: int :param cpu_temperature: CPU temperature :type cpu_temperature: Sequence[float] :param gpu_temperature: GPU temperature :type gpu_temperature: Sequence[float] """ _schema = { "properties": { "cpu_temperature": { "description": "CPU temperature", "items": {"type": "number"}, "type": ["array", "null"], }, "cpu_usage": { "description": "Average CPU usage per core", "items": {"type": "number"}, "type": ["array", "null"], }, "disk_free_home": { "description": "Mbytes free space of /home drive", "type": ["integer", "null"], }, "disk_free_temp": { "description": "Mbytes free space of /tmp drive", "type": ["integer", "null"], }, "disk_read": { "description": "Mbytes read per second", "type": ["integer", "null"], }, "disk_write": { "description": "Mbytes write per second", "type": ["integer", "null"], }, "gpu_memory_free": { "description": "GPU free memory MBs", "items": {"type": "integer"}, "type": ["array", "null"], }, "gpu_memory_used": { "description": "GPU used memory MBs", "items": {"type": "integer"}, "type": ["array", "null"], }, "gpu_temperature": { "description": "GPU temperature", "items": {"type": "number"}, "type": ["array", "null"], }, "gpu_usage": { "description": "Average GPU usage per GPU card", "items": {"type": "number"}, "type": ["array", "null"], }, "memory_free": { "description": "Free memory MBs", "type": ["integer", "null"], }, "memory_used": { "description": "Used memory MBs", "type": ["integer", "null"], }, "network_rx": { "description": "Mbytes per second", "type": ["integer", "null"], }, "network_tx": { "description": "Mbytes per second", "type": ["integer", "null"], }, }, "type": "object", } def __init__( self, cpu_usage: Optional[List[float]] = None, gpu_usage: Optional[List[float]] = None, memory_used: Optional[int] = None, memory_free: Optional[int] = None, gpu_memory_free: Optional[List[int]] = None, gpu_memory_used: Optional[List[int]] = None, network_tx: Optional[int] = None, network_rx: Optional[int] = None, disk_free_home: Optional[int] = None, disk_free_temp: Optional[int] = None, disk_read: Optional[int] = None, disk_write: Optional[int] = None, cpu_temperature: Optional[List[float]] = None, gpu_temperature: Optional[List[float]] = None, **kwargs: Any ) -> None: super(MachineStats, self).__init__(**kwargs) self.cpu_usage = cpu_usage self.gpu_usage = gpu_usage self.memory_used = memory_used self.memory_free = memory_free self.gpu_memory_free = gpu_memory_free self.gpu_memory_used = gpu_memory_used self.network_tx = network_tx self.network_rx = network_rx self.disk_free_home = disk_free_home self.disk_free_temp = disk_free_temp self.disk_read = disk_read self.disk_write = disk_write self.cpu_temperature = cpu_temperature self.gpu_temperature = gpu_temperature @schema_property("cpu_usage") def cpu_usage(self) -> Optional[List[float]]: return self._property_cpu_usage @cpu_usage.setter def cpu_usage(self, value: Optional[List[float]]) -> None: if value is None: self._property_cpu_usage = None return self.assert_isinstance(value, "cpu_usage", (list, tuple)) self.assert_isinstance(value, "cpu_usage", six.integer_types + (float,), is_array=True) self._property_cpu_usage = value @schema_property("gpu_usage") def gpu_usage(self) -> Optional[List[float]]: return self._property_gpu_usage @gpu_usage.setter def gpu_usage(self, value: Optional[List[float]]) -> None: if value is None: self._property_gpu_usage = None return self.assert_isinstance(value, "gpu_usage", (list, tuple)) self.assert_isinstance(value, "gpu_usage", six.integer_types + (float,), is_array=True) self._property_gpu_usage = value @schema_property("memory_used") def memory_used(self) -> Optional[int]: return self._property_memory_used @memory_used.setter def memory_used(self, value: Optional[int]) -> None: if value is None: self._property_memory_used = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "memory_used", six.integer_types) self._property_memory_used = value @schema_property("memory_free") def memory_free(self) -> Optional[int]: return self._property_memory_free @memory_free.setter def memory_free(self, value: Optional[int]) -> None: if value is None: self._property_memory_free = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "memory_free", six.integer_types) self._property_memory_free = value @schema_property("gpu_memory_free") def gpu_memory_free(self) -> Optional[List[int]]: return self._property_gpu_memory_free @gpu_memory_free.setter def gpu_memory_free(self, value: Optional[List[int]]) -> None: if value is None: self._property_gpu_memory_free = None return self.assert_isinstance(value, "gpu_memory_free", (list, tuple)) value = [int(v) if isinstance(v, float) and v.is_integer() else v for v in value] self.assert_isinstance(value, "gpu_memory_free", six.integer_types, is_array=True) self._property_gpu_memory_free = value @schema_property("gpu_memory_used") def gpu_memory_used(self) -> Optional[List[int]]: return self._property_gpu_memory_used @gpu_memory_used.setter def gpu_memory_used(self, value: Optional[List[int]]) -> None: if value is None: self._property_gpu_memory_used = None return self.assert_isinstance(value, "gpu_memory_used", (list, tuple)) value = [int(v) if isinstance(v, float) and v.is_integer() else v for v in value] self.assert_isinstance(value, "gpu_memory_used", six.integer_types, is_array=True) self._property_gpu_memory_used = value @schema_property("network_tx") def network_tx(self) -> Optional[int]: return self._property_network_tx @network_tx.setter def network_tx(self, value: Optional[int]) -> None: if value is None: self._property_network_tx = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "network_tx", six.integer_types) self._property_network_tx = value @schema_property("network_rx") def network_rx(self) -> Optional[int]: return self._property_network_rx @network_rx.setter def network_rx(self, value: Optional[int]) -> None: if value is None: self._property_network_rx = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "network_rx", six.integer_types) self._property_network_rx = value @schema_property("disk_free_home") def disk_free_home(self) -> Optional[int]: return self._property_disk_free_home @disk_free_home.setter def disk_free_home(self, value: Optional[int]) -> None: if value is None: self._property_disk_free_home = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "disk_free_home", six.integer_types) self._property_disk_free_home = value @schema_property("disk_free_temp") def disk_free_temp(self) -> Optional[int]: return self._property_disk_free_temp @disk_free_temp.setter def disk_free_temp(self, value: Optional[int]) -> None: if value is None: self._property_disk_free_temp = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "disk_free_temp", six.integer_types) self._property_disk_free_temp = value @schema_property("disk_read") def disk_read(self) -> Optional[int]: return self._property_disk_read @disk_read.setter def disk_read(self, value: Optional[int]) -> None: if value is None: self._property_disk_read = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "disk_read", six.integer_types) self._property_disk_read = value @schema_property("disk_write") def disk_write(self) -> Optional[int]: return self._property_disk_write @disk_write.setter def disk_write(self, value: Optional[int]) -> None: if value is None: self._property_disk_write = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "disk_write", six.integer_types) self._property_disk_write = value @schema_property("cpu_temperature") def cpu_temperature(self) -> Optional[List[float]]: return self._property_cpu_temperature @cpu_temperature.setter def cpu_temperature(self, value: Optional[List[float]]) -> None: if value is None: self._property_cpu_temperature = None return self.assert_isinstance(value, "cpu_temperature", (list, tuple)) self.assert_isinstance(value, "cpu_temperature", six.integer_types + (float,), is_array=True) self._property_cpu_temperature = value @schema_property("gpu_temperature") def gpu_temperature(self) -> Optional[List[float]]: return self._property_gpu_temperature @gpu_temperature.setter def gpu_temperature(self, value: Optional[List[float]]) -> None: if value is None: self._property_gpu_temperature = None return self.assert_isinstance(value, "gpu_temperature", (list, tuple)) self.assert_isinstance(value, "gpu_temperature", six.integer_types + (float,), is_array=True) self._property_gpu_temperature = value
MachineStats
python
streamlit__streamlit
lib/streamlit/web/server/bidi_component_request_handler.py
{ "start": 1459, "end": 6932 }
class ____(tornado.web.RequestHandler): """Request handler for serving Custom Components v2 static assets. The handler resolves a requested path to a registered component's asset within its component root, writes the file contents to the response, and sets appropriate ``Content-Type`` and cache headers. If the component or asset cannot be found, a suitable HTTP status is returned. """ def initialize(self, component_manager: BidiComponentManager) -> None: """Initialize the handler with the given component manager. Parameters ---------- component_manager : BidiComponentManager Manager used to look up registered components and their root paths. """ self._component_manager = component_manager def get(self, path: str) -> None: """Serve a component asset for the given URL path. The first path segment is interpreted as the component name. The rest of the path is resolved to a file within that component's root directory. If the file exists and is readable, its bytes are written to the response and the ``Content-Type`` header is set based on the file type. Parameters ---------- path : str Request path in the form ``"<component_name>/<relative_file>"``. Notes ----- This method writes directly to the response and sets appropriate HTTP status codes on error (``404`` for missing components/files, ``403`` for forbidden paths). """ parts = path.split("/") component_name = parts[0] component_def = self._component_manager.get(component_name) if component_def is None: self.write("not found") self.set_status(404) return # Get the component path from the component manager component_path = self._component_manager.get_component_path(component_name) if component_path is None: self.write("not found") self.set_status(404) return # Build a safe absolute path within the component root filename = "/".join(parts[1:]) # If no file segment is provided (e.g. only component name or trailing slash), # treat as not found rather than attempting to open a directory. if not filename or filename.endswith("/"): self.write("not found") self.set_status(404) return abspath = build_safe_abspath(component_path, filename) if abspath is None: self.write("forbidden") self.set_status(403) return # If the resolved path is a directory, return 404 not found. if os.path.isdir(abspath): self.write("not found") self.set_status(404) return try: with open(abspath, "rb") as file: contents = file.read() except OSError: sanitized_abspath = abspath.replace("\n", "").replace("\r", "") _LOGGER.exception( "BidiComponentRequestHandler: GET %s read error", sanitized_abspath ) self.write("read error") self.set_status(404) return self.write(contents) self.set_header("Content-Type", guess_content_type(abspath)) self.set_extra_headers(path) def set_extra_headers(self, path: str) -> None: """Disable cache for HTML files. We assume other assets like JS and CSS are suffixed with their hash, so they can be cached indefinitely. """ if path.endswith(".html"): self.set_header("Cache-Control", "no-cache") else: self.set_header("Cache-Control", "public") def set_default_headers(self) -> None: """Set default CORS headers based on server configuration. If cross-origin requests are fully allowed, ``Access-Control-Allow- Origin`` is set to ``"*"``. Otherwise, if the request ``Origin`` header is an allowed origin, the header is echoed back. """ if streamlit.web.server.routes.allow_all_cross_origin_requests(): self.set_header("Access-Control-Allow-Origin", "*") elif streamlit.web.server.routes.is_allowed_origin( origin := self.request.headers.get("Origin") ): self.set_header("Access-Control-Allow-Origin", cast("str", origin)) def options(self) -> None: """Handle preflight CORS requests. Returns ------- None Responds with HTTP ``204 No Content`` to indicate that the actual request can proceed. """ self.set_status(204) self.finish() @staticmethod def get_url(file_id: str) -> str: """Return the URL for a component asset identified by ``file_id``. Parameters ---------- file_id : str Component file identifier (typically a relative path or hashed filename). Returns ------- str Relative URL path for the resource, to be joined with the server base URL. Examples -------- >>> BidiComponentRequestHandler.get_url("my_component/main.js") '_stcore/bidi-components/my_component/main.js' """ return f"_stcore/bidi-components/{file_id}"
BidiComponentRequestHandler
python
sympy__sympy
sympy/functions/elementary/hyperbolic.py
{ "start": 64476, "end": 70979 }
class ____(InverseHyperbolicFunction): """ ``acsch(x)`` is the inverse hyperbolic cosecant of ``x``. The inverse hyperbolic cosecant function. Examples ======== >>> from sympy import acsch, sqrt, I >>> from sympy.abc import x >>> acsch(x).diff(x) -1/(x**2*sqrt(1 + x**(-2))) >>> acsch(1).diff(x) 0 >>> acsch(1) log(1 + sqrt(2)) >>> acsch(I) -I*pi/2 >>> acsch(-2*I) I*pi/6 >>> acsch(I*(sqrt(6) - sqrt(2))) -5*I*pi/12 See Also ======== sympy.functions.elementary.hyperbolic.asinh References ========== .. [1] https://en.wikipedia.org/wiki/Hyperbolic_function .. [2] https://dlmf.nist.gov/4.37 .. [3] https://functions.wolfram.com/ElementaryFunctions/ArcCsch/ """ def fdiff(self, argindex=1): if argindex == 1: z = self.args[0] return -1/(z**2*sqrt(1 + 1/z**2)) else: raise ArgumentIndexError(self, argindex) @classmethod def eval(cls, arg): if arg.is_Number: if arg is S.NaN: return S.NaN elif arg is S.Infinity or arg is S.NegativeInfinity: return S.Zero elif arg.is_zero: return S.ComplexInfinity elif arg is S.One: return log(1 + sqrt(2)) elif arg is S.NegativeOne: return - log(1 + sqrt(2)) if arg.is_number: cst_table = _acsch_table() if arg in cst_table: return cst_table[arg]*I if arg is S.ComplexInfinity: return S.Zero if arg.is_infinite: return S.Zero if arg.is_zero: return S.ComplexInfinity if arg.could_extract_minus_sign(): return -cls(-arg) @staticmethod @cacheit def taylor_term(n, x, *previous_terms): if n == 0: return log(2 / x) elif n < 0 or n % 2 == 1: return S.Zero else: x = sympify(x) if len(previous_terms) > 2 and n > 2: p = previous_terms[-2] return -p * ((n - 1)*(n-2)) * x**2/(4 * (n//2)**2) else: k = n // 2 R = RisingFactorial(S.Half, k) * n F = factorial(k) * n // 2 * n // 2 return S.NegativeOne**(k +1) * R / F * x**n / 4 def _eval_as_leading_term(self, x, logx, cdir): arg = self.args[0] x0 = arg.subs(x, 0).cancel() # Handling branch points if x0 in (-I, I, S.Zero): return self.rewrite(log)._eval_as_leading_term(x, logx=logx, cdir=cdir) if x0 is S.NaN: expr = self.func(arg.as_leading_term(x)) if expr.is_finite: return expr else: return self if x0 is S.ComplexInfinity: return (1/arg).as_leading_term(x) # Handling points lying on branch cuts (-I, I) if x0.is_imaginary and (1 + x0**2).is_positive: ndir = arg.dir(x, cdir if cdir else 1) if re(ndir).is_positive: if im(x0).is_positive: return -self.func(x0) - I*pi elif re(ndir).is_negative: if im(x0).is_negative: return -self.func(x0) + I*pi else: return self.rewrite(log)._eval_as_leading_term(x, logx=logx, cdir=cdir) return self.func(x0) def _eval_nseries(self, x, n, logx, cdir=0): # acsch from sympy.series.order import O arg = self.args[0] arg0 = arg.subs(x, 0) # Handling branch points if arg0 is I: t = Dummy('t', positive=True) ser = acsch(I + t**2).rewrite(log).nseries(t, 0, 2*n) arg1 = -I + self.args[0] f = arg1.as_leading_term(x) g = (arg1 - f)/ f if not g.is_meromorphic(x, 0): # cannot be expanded return O(1) if n == 0 else -I*pi/2 + O(sqrt(x)) res1 = sqrt(S.One + g)._eval_nseries(x, n=n, logx=logx) res = (res1.removeO()*sqrt(f)).expand() res = ser.removeO().subs(t, res).expand().powsimp() + O(x**n, x) return res if arg0 == S.NegativeOne*I: t = Dummy('t', positive=True) ser = acsch(-I + t**2).rewrite(log).nseries(t, 0, 2*n) arg1 = I + self.args[0] f = arg1.as_leading_term(x) g = (arg1 - f)/ f if not g.is_meromorphic(x, 0): # cannot be expanded return O(1) if n == 0 else I*pi/2 + O(sqrt(x)) res1 = sqrt(S.One + g)._eval_nseries(x, n=n, logx=logx) res = (res1.removeO()*sqrt(f)).expand() return ser.removeO().subs(t, res).expand().powsimp() + O(x**n, x) res = super()._eval_nseries(x, n=n, logx=logx) if arg0 is S.ComplexInfinity: return res # Handling points lying on branch cuts (-I, I) if arg0.is_imaginary and (1 + arg0**2).is_positive: ndir = self.args[0].dir(x, cdir if cdir else 1) if re(ndir).is_positive: if im(arg0).is_positive: return -res - I*pi elif re(ndir).is_negative: if im(arg0).is_negative: return -res + I*pi else: return self.rewrite(log)._eval_nseries(x, n, logx=logx, cdir=cdir) return res def inverse(self, argindex=1): """ Returns the inverse of this function. """ return csch def _eval_rewrite_as_log(self, arg, **kwargs): return log(1/arg + sqrt(1/arg**2 + 1)) _eval_rewrite_as_tractable = _eval_rewrite_as_log def _eval_rewrite_as_asinh(self, arg, **kwargs): return asinh(1/arg) def _eval_rewrite_as_acosh(self, arg, **kwargs): return I*(sqrt(1 - I/arg)/sqrt(I/arg - 1)* acosh(I/arg, evaluate=False) - pi*S.Half) def _eval_rewrite_as_atanh(self, arg, **kwargs): arg2 = arg**2 arg2p1 = arg2 + 1 return sqrt(-arg2)/arg*(pi*S.Half - sqrt(-arg2p1**2)/arg2p1*atanh(sqrt(arg2p1))) def _eval_is_zero(self): return self.args[0].is_infinite def _eval_is_extended_real(self): return self.args[0].is_extended_real def _eval_is_finite(self): return fuzzy_not(self.args[0].is_zero)
acsch
python
sphinx-doc__sphinx
sphinx/ext/intersphinx/_load.py
{ "start": 7927, "end": 16450 }
class ____: intersphinx_cache_limit: int intersphinx_timeout: int | float | None tls_verify: bool tls_cacerts: str | dict[str, str] | None user_agent: str @classmethod def from_config(cls, config: Config) -> _InvConfig: return cls( intersphinx_cache_limit=config.intersphinx_cache_limit, intersphinx_timeout=config.intersphinx_timeout, tls_verify=config.tls_verify, tls_cacerts=config.tls_cacerts, user_agent=config.user_agent, ) def _fetch_inventory_group( *, project: _IntersphinxProject, cache: dict[InventoryURI, InventoryCacheEntry], now: int, config: _InvConfig, srcdir: Path, cache_dir: Path | None, ) -> bool: if config.intersphinx_cache_limit >= 0: # Positive value: cache is expired if its timestamp is below # `now - X days`. cache_time = now - config.intersphinx_cache_limit * 86400 else: # Negative value: cache is expired if its timestamp is below # zero, which is impossible. cache_time = 0 updated = False failures = [] for location in project.locations: # location is either None or a non-empty string if location is None: inv_location = posixpath.join(project.target_uri, INVENTORY_FILENAME) else: inv_location = location if cache_dir is not None: cache_path = cache_dir / f'{project.name}_{INVENTORY_FILENAME}' else: cache_path = None if ( cache_path is not None and '://' in inv_location and project.target_uri not in cache and cache_path.is_file() # the saved 'objects.inv' is not older than the cache expiry time and cache_path.stat().st_mtime >= cache_time ): raw_data = cache_path.read_bytes() inv = _load_inventory(raw_data, target_uri=project.target_uri) cache_path_mtime = int(cache_path.stat().st_mtime) cache[project.target_uri] = project.name, cache_path_mtime, inv.data break # decide whether the inventory must be read: always read local # files; remote ones only if the cache time is expired if ( '://' not in inv_location or project.target_uri not in cache or cache[project.target_uri][1] < cache_time ): LOGGER.info( __("loading intersphinx inventory '%s' from %s ..."), project.name, _get_safe_url(inv_location), ) try: raw_data, target_uri = _fetch_inventory_data( target_uri=project.target_uri, inv_location=inv_location, config=config, srcdir=srcdir, cache_path=cache_path, ) inv = _load_inventory(raw_data, target_uri=target_uri) except Exception as err: failures.append(err.args) continue else: cache[project.target_uri] = project.name, now, inv.data updated = True break if not failures: pass elif len(failures) < len(project.locations): LOGGER.info( __( 'encountered some issues with some of the inventories,' ' but they had working alternatives:' ) ) for fail in failures: LOGGER.info(*fail) else: issues = '\n'.join(f[0] % f[1:] for f in failures) LOGGER.warning( '%s\n%s', __('failed to reach any of the inventories with the following issues:'), issues, ) return updated def fetch_inventory(app: Sphinx, uri: InventoryURI, inv: str) -> Inventory: """Fetch, parse and return an intersphinx inventory file.""" raw_data, uri = _fetch_inventory_data( target_uri=uri, inv_location=inv, config=_InvConfig.from_config(app.config), srcdir=app.srcdir, cache_path=None, ) return _load_inventory(raw_data, target_uri=uri).data def _fetch_inventory_data( *, target_uri: InventoryURI, inv_location: str, config: _InvConfig, srcdir: Path, cache_path: Path | None, ) -> tuple[bytes, str]: """Fetch inventory data from a local or remote source.""" # both *target_uri* (base URI of the links to generate) # and *inv_location* (actual location of the inventory file) # can be local or remote URIs if '://' in target_uri: # inv URI points to remote resource; strip any existing auth target_uri = _strip_basic_auth(target_uri) if '://' in inv_location: raw_data, target_uri = _fetch_inventory_url( target_uri=target_uri, inv_location=inv_location, config=config ) if cache_path is not None: cache_path.parent.mkdir(parents=True, exist_ok=True) cache_path.write_bytes(raw_data) else: raw_data = _fetch_inventory_file(inv_location=inv_location, srcdir=srcdir) return raw_data, target_uri def _load_inventory(raw_data: bytes, /, *, target_uri: InventoryURI) -> _Inventory: """Parse and return an intersphinx inventory file.""" # *target_uri* (base URI of the links to generate) can be a local or remote URI try: inv = InventoryFile.loads(raw_data, uri=target_uri) except ValueError as exc: msg = f'unknown or unsupported inventory version: {exc!r}' raise ValueError(msg) from exc return inv def _fetch_inventory_url( *, target_uri: InventoryURI, inv_location: str, config: _InvConfig ) -> tuple[bytes, str]: try: with requests.get( inv_location, timeout=config.intersphinx_timeout, _user_agent=config.user_agent, _tls_info=(config.tls_verify, config.tls_cacerts), ) as r: r.raise_for_status() raw_data = r.content new_inv_location = r.url except Exception as err: err.args = ( 'intersphinx inventory %r not fetchable due to %s: %s', inv_location, err.__class__, str(err), ) raise if inv_location != new_inv_location: msg = __('intersphinx inventory has moved: %s -> %s') LOGGER.info(msg, inv_location, new_inv_location) if target_uri in { inv_location, os.path.dirname(inv_location), os.path.dirname(inv_location) + '/', }: target_uri = os.path.dirname(new_inv_location) return raw_data, target_uri def _fetch_inventory_file(*, inv_location: str, srcdir: Path) -> bytes: try: with open(srcdir / inv_location, 'rb') as f: raw_data = f.read() except Exception as err: err.args = ( 'intersphinx inventory %r not readable due to %s: %s', inv_location, err.__class__.__name__, str(err), ) raise return raw_data def _get_safe_url(url: str) -> str: """Gets version of *url* with basic auth passwords obscured. This function returns results suitable for printing and logging. E.g.: https://user:12345@example.com => https://user@example.com :param url: a url :type url: ``str`` :return: *url* with password removed :rtype: ``str`` """ parts = urlsplit(url) if parts.username is None: return url else: frags = list(parts) if parts.port: frags[1] = f'{parts.username}@{parts.hostname}:{parts.port}' else: frags[1] = f'{parts.username}@{parts.hostname}' return urlunsplit(frags) def _strip_basic_auth(url: str) -> str: """Returns *url* with basic auth credentials removed. Also returns the basic auth username and password if they're present in *url*. E.g.: https://user:pass@example.com => https://example.com *url* need not include basic auth credentials. :param url: url which may or may not contain basic auth credentials :type url: ``str`` :return: *url* with any basic auth creds removed :rtype: ``str`` """ frags = list(urlsplit(url)) # swap out 'user[:pass]@hostname' for 'hostname' if '@' in frags[1]: frags[1] = frags[1].split('@')[1] return urlunsplit(frags)
_InvConfig
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/orm/decl_api.py
{ "start": 38380, "end": 64630 }
class ____(EventTarget): """Generalized registry for mapping classes. The :class:`_orm.registry` serves as the basis for maintaining a collection of mappings, and provides configurational hooks used to map classes. The three general kinds of mappings supported are Declarative Base, Declarative Decorator, and Imperative Mapping. All of these mapping styles may be used interchangeably: * :meth:`_orm.registry.generate_base` returns a new declarative base class, and is the underlying implementation of the :func:`_orm.declarative_base` function. * :meth:`_orm.registry.mapped` provides a class decorator that will apply declarative mapping to a class without the use of a declarative base class. * :meth:`_orm.registry.map_imperatively` will produce a :class:`_orm.Mapper` for a class without scanning the class for declarative class attributes. This method suits the use case historically provided by the ``sqlalchemy.orm.mapper()`` classical mapping function, which is removed as of SQLAlchemy 2.0. .. versionadded:: 1.4 .. seealso:: :ref:`orm_mapping_classes_toplevel` - overview of class mapping styles. """ _class_registry: clsregistry._ClsRegistryType _managers: weakref.WeakKeyDictionary[ClassManager[Any], Literal[True]] metadata: MetaData constructor: CallableReference[Callable[..., None]] type_annotation_map: _MutableTypeAnnotationMapType _dependents: Set[_RegistryType] _dependencies: Set[_RegistryType] _new_mappers: bool dispatch: dispatcher["registry"] def __init__( self, *, metadata: Optional[MetaData] = None, class_registry: Optional[clsregistry._ClsRegistryType] = None, type_annotation_map: Optional[_TypeAnnotationMapType] = None, constructor: Callable[..., None] = _declarative_constructor, ): r"""Construct a new :class:`_orm.registry` :param metadata: An optional :class:`_schema.MetaData` instance. All :class:`_schema.Table` objects generated using declarative table mapping will make use of this :class:`_schema.MetaData` collection. If this argument is left at its default of ``None``, a blank :class:`_schema.MetaData` collection is created. :param constructor: Specify the implementation for the ``__init__`` function on a mapped class that has no ``__init__`` of its own. Defaults to an implementation that assigns \**kwargs for declared fields and relationships to an instance. If ``None`` is supplied, no __init__ will be provided and construction will fall back to cls.__init__ by way of the normal Python semantics. :param class_registry: optional dictionary that will serve as the registry of class names-> mapped classes when string names are used to identify classes inside of :func:`_orm.relationship` and others. Allows two or more declarative base classes to share the same registry of class names for simplified inter-base relationships. :param type_annotation_map: optional dictionary of Python types to SQLAlchemy :class:`_types.TypeEngine` classes or instances. The provided dict will update the default type mapping. This is used exclusively by the :class:`_orm.MappedColumn` construct to produce column types based on annotations within the :class:`_orm.Mapped` type. .. versionadded:: 2.0 .. seealso:: :ref:`orm_declarative_mapped_column_type_map` """ lcl_metadata = metadata or MetaData() if class_registry is None: class_registry = weakref.WeakValueDictionary() self._class_registry = class_registry self._managers = weakref.WeakKeyDictionary() self.metadata = lcl_metadata self.constructor = constructor self.type_annotation_map = {} if type_annotation_map is not None: self.update_type_annotation_map(type_annotation_map) self._dependents = set() self._dependencies = set() self._new_mappers = False with mapperlib._CONFIGURE_MUTEX: mapperlib._mapper_registries[self] = True def update_type_annotation_map( self, type_annotation_map: _TypeAnnotationMapType, ) -> None: """update the :paramref:`_orm.registry.type_annotation_map` with new values.""" self.type_annotation_map.update( { de_optionalize_union_types(typ): sqltype for typ, sqltype in type_annotation_map.items() } ) def _resolve_type_with_events( self, cls: Any, key: str, raw_annotation: _MatchedOnType, extracted_type: _MatchedOnType, *, raw_pep_593_type: Optional[GenericProtocol[Any]] = None, pep_593_resolved_argument: Optional[_MatchedOnType] = None, raw_pep_695_type: Optional[TypeAliasType] = None, pep_695_resolved_value: Optional[_MatchedOnType] = None, ) -> Optional[sqltypes.TypeEngine[Any]]: """Resolve type with event support for custom type mapping. This method fires the resolve_type_annotation event first to allow custom resolution, then falls back to normal resolution. """ if self.dispatch.resolve_type_annotation: type_resolve = TypeResolve( self, cls, key, raw_annotation, ( pep_593_resolved_argument if pep_593_resolved_argument is not None else ( pep_695_resolved_value if pep_695_resolved_value is not None else extracted_type ) ), raw_pep_593_type, pep_593_resolved_argument, raw_pep_695_type, pep_695_resolved_value, ) for fn in self.dispatch.resolve_type_annotation: result = fn(type_resolve) if result is not None: return sqltypes.to_instance(result) # type: ignore[no-any-return] # noqa: E501 if raw_pep_695_type is not None: sqltype = self._resolve_type(raw_pep_695_type) if sqltype is not None: return sqltype sqltype = self._resolve_type(extracted_type) if sqltype is not None: return sqltype if pep_593_resolved_argument is not None: sqltype = self._resolve_type(pep_593_resolved_argument) return sqltype def _resolve_type( self, python_type: _MatchedOnType ) -> Optional[sqltypes.TypeEngine[Any]]: python_type_type: Type[Any] search: Iterable[Tuple[_MatchedOnType, Type[Any]]] if is_generic(python_type): if is_literal(python_type): python_type_type = python_type # type: ignore[assignment] search = ( (python_type, python_type_type), *((lt, python_type_type) for lt in LITERAL_TYPES), ) else: python_type_type = python_type.__origin__ search = ((python_type, python_type_type),) elif isinstance(python_type, type): python_type_type = python_type search = ((pt, pt) for pt in python_type_type.__mro__) else: python_type_type = python_type # type: ignore[assignment] search = ((python_type, python_type_type),) for pt, flattened in search: # we search through full __mro__ for types. however... sql_type = self.type_annotation_map.get(pt) if sql_type is None: sql_type = sqltypes._type_map_get(pt) # type: ignore # noqa: E501 if sql_type is not None: sql_type_inst = sqltypes.to_instance(sql_type) # ... this additional step will reject most # type -> supertype matches, such as if we had # a MyInt(int) subclass. note also we pass NewType() # here directly; these always have to be in the # type_annotation_map to be useful resolved_sql_type = sql_type_inst._resolve_for_python_type( python_type_type, pt, flattened, ) if resolved_sql_type is not None: return resolved_sql_type return None @property def mappers(self) -> FrozenSet[Mapper[Any]]: """read only collection of all :class:`_orm.Mapper` objects.""" return frozenset(manager.mapper for manager in self._managers) def _set_depends_on(self, registry: RegistryType) -> None: if registry is self: return registry._dependents.add(self) self._dependencies.add(registry) def _flag_new_mapper(self, mapper: Mapper[Any]) -> None: mapper._ready_for_configure = True if self._new_mappers: return for reg in self._recurse_with_dependents({self}): reg._new_mappers = True @classmethod def _recurse_with_dependents( cls, registries: Set[RegistryType] ) -> Iterator[RegistryType]: todo = registries done = set() while todo: reg = todo.pop() done.add(reg) # if yielding would remove dependents, make sure we have # them before todo.update(reg._dependents.difference(done)) yield reg # if yielding would add dependents, make sure we have them # after todo.update(reg._dependents.difference(done)) @classmethod def _recurse_with_dependencies( cls, registries: Set[RegistryType] ) -> Iterator[RegistryType]: todo = registries done = set() while todo: reg = todo.pop() done.add(reg) # if yielding would remove dependencies, make sure we have # them before todo.update(reg._dependencies.difference(done)) yield reg # if yielding would remove dependencies, make sure we have # them before todo.update(reg._dependencies.difference(done)) def _mappers_to_configure(self) -> Iterator[Mapper[Any]]: return ( manager.mapper for manager in list(self._managers) if manager.is_mapped and not manager.mapper.configured and manager.mapper._ready_for_configure ) def _dispose_cls(self, cls: Type[_O]) -> None: clsregistry._remove_class(cls.__name__, cls, self._class_registry) def _add_manager(self, manager: ClassManager[Any]) -> None: self._managers[manager] = True if manager.is_mapped: raise exc.ArgumentError( "Class '%s' already has a primary mapper defined. " % manager.class_ ) assert manager.registry is None manager.registry = self def configure(self, cascade: bool = False) -> None: """Configure all as-yet unconfigured mappers in this :class:`_orm.registry`. The configure step is used to reconcile and initialize the :func:`_orm.relationship` linkages between mapped classes, as well as to invoke configuration events such as the :meth:`_orm.MapperEvents.before_configured` and :meth:`_orm.MapperEvents.after_configured`, which may be used by ORM extensions or user-defined extension hooks. If one or more mappers in this registry contain :func:`_orm.relationship` constructs that refer to mapped classes in other registries, this registry is said to be *dependent* on those registries. In order to configure those dependent registries automatically, the :paramref:`_orm.registry.configure.cascade` flag should be set to ``True``. Otherwise, if they are not configured, an exception will be raised. The rationale behind this behavior is to allow an application to programmatically invoke configuration of registries while controlling whether or not the process implicitly reaches other registries. As an alternative to invoking :meth:`_orm.registry.configure`, the ORM function :func:`_orm.configure_mappers` function may be used to ensure configuration is complete for all :class:`_orm.registry` objects in memory. This is generally simpler to use and also predates the usage of :class:`_orm.registry` objects overall. However, this function will impact all mappings throughout the running Python process and may be more memory/time consuming for an application that has many registries in use for different purposes that may not be needed immediately. .. seealso:: :func:`_orm.configure_mappers` .. versionadded:: 1.4.0b2 """ mapperlib._configure_registries({self}, cascade=cascade) def dispose(self, cascade: bool = False) -> None: """Dispose of all mappers in this :class:`_orm.registry`. After invocation, all the classes that were mapped within this registry will no longer have class instrumentation associated with them. This method is the per-:class:`_orm.registry` analogue to the application-wide :func:`_orm.clear_mappers` function. If this registry contains mappers that are dependencies of other registries, typically via :func:`_orm.relationship` links, then those registries must be disposed as well. When such registries exist in relation to this one, their :meth:`_orm.registry.dispose` method will also be called, if the :paramref:`_orm.registry.dispose.cascade` flag is set to ``True``; otherwise, an error is raised if those registries were not already disposed. .. versionadded:: 1.4.0b2 .. seealso:: :func:`_orm.clear_mappers` """ mapperlib._dispose_registries({self}, cascade=cascade) def _dispose_manager_and_mapper(self, manager: ClassManager[Any]) -> None: if "mapper" in manager.__dict__: mapper = manager.mapper mapper._set_dispose_flags() class_ = manager.class_ self._dispose_cls(class_) instrumentation._instrumentation_factory.unregister(class_) def generate_base( self, mapper: Optional[Callable[..., Mapper[Any]]] = None, cls: Type[Any] = object, name: str = "Base", metaclass: Type[Any] = DeclarativeMeta, ) -> Any: """Generate a declarative base class. Classes that inherit from the returned class object will be automatically mapped using declarative mapping. E.g.:: from sqlalchemy.orm import registry mapper_registry = registry() Base = mapper_registry.generate_base() class MyClass(Base): __tablename__ = "my_table" id = Column(Integer, primary_key=True) The above dynamically generated class is equivalent to the non-dynamic example below:: from sqlalchemy.orm import registry from sqlalchemy.orm.decl_api import DeclarativeMeta mapper_registry = registry() class Base(metaclass=DeclarativeMeta): __abstract__ = True registry = mapper_registry metadata = mapper_registry.metadata __init__ = mapper_registry.constructor .. versionchanged:: 2.0 Note that the :meth:`_orm.registry.generate_base` method is superseded by the new :class:`_orm.DeclarativeBase` class, which generates a new "base" class using subclassing, rather than return value of a function. This allows an approach that is compatible with :pep:`484` typing tools. The :meth:`_orm.registry.generate_base` method provides the implementation for the :func:`_orm.declarative_base` function, which creates the :class:`_orm.registry` and base class all at once. See the section :ref:`orm_declarative_mapping` for background and examples. :param mapper: An optional callable, defaults to :class:`_orm.Mapper`. This function is used to generate new :class:`_orm.Mapper` objects. :param cls: Defaults to :class:`object`. A type to use as the base for the generated declarative base class. May be a class or tuple of classes. :param name: Defaults to ``Base``. The display name for the generated class. Customizing this is not required, but can improve clarity in tracebacks and debugging. :param metaclass: Defaults to :class:`.DeclarativeMeta`. A metaclass or __metaclass__ compatible callable to use as the meta type of the generated declarative base class. .. seealso:: :ref:`orm_declarative_mapping` :func:`_orm.declarative_base` """ metadata = self.metadata bases = not isinstance(cls, tuple) and (cls,) or cls class_dict: Dict[str, Any] = dict(registry=self, metadata=metadata) if isinstance(cls, type): class_dict["__doc__"] = cls.__doc__ if self.constructor is not None: class_dict["__init__"] = self.constructor class_dict["__abstract__"] = True if mapper: class_dict["__mapper_cls__"] = mapper if hasattr(cls, "__class_getitem__"): def __class_getitem__(cls: Type[_T], key: Any) -> Type[_T]: # allow generic classes in py3.9+ return cls class_dict["__class_getitem__"] = __class_getitem__ return metaclass(name, bases, class_dict) @compat_typing.dataclass_transform( field_specifiers=( MappedColumn, RelationshipProperty, Composite, Synonym, mapped_column, relationship, composite, synonym, deferred, ), ) @overload def mapped_as_dataclass(self, __cls: Type[_O], /) -> Type[_O]: ... @overload def mapped_as_dataclass( self, __cls: Literal[None] = ..., /, *, init: Union[_NoArg, bool] = ..., repr: Union[_NoArg, bool] = ..., # noqa: A002 eq: Union[_NoArg, bool] = ..., order: Union[_NoArg, bool] = ..., unsafe_hash: Union[_NoArg, bool] = ..., match_args: Union[_NoArg, bool] = ..., kw_only: Union[_NoArg, bool] = ..., dataclass_callable: Union[_NoArg, Callable[..., Type[Any]]] = ..., ) -> Callable[[Type[_O]], Type[_O]]: ... def mapped_as_dataclass( self, __cls: Optional[Type[_O]] = None, /, *, init: Union[_NoArg, bool] = _NoArg.NO_ARG, repr: Union[_NoArg, bool] = _NoArg.NO_ARG, # noqa: A002 eq: Union[_NoArg, bool] = _NoArg.NO_ARG, order: Union[_NoArg, bool] = _NoArg.NO_ARG, unsafe_hash: Union[_NoArg, bool] = _NoArg.NO_ARG, match_args: Union[_NoArg, bool] = _NoArg.NO_ARG, kw_only: Union[_NoArg, bool] = _NoArg.NO_ARG, dataclass_callable: Union[ _NoArg, Callable[..., Type[Any]] ] = _NoArg.NO_ARG, ) -> Union[Type[_O], Callable[[Type[_O]], Type[_O]]]: """Class decorator that will apply the Declarative mapping process to a given class, and additionally convert the class to be a Python dataclass. .. seealso:: :ref:`orm_declarative_native_dataclasses` - complete background on SQLAlchemy native dataclass mapping :func:`_orm.mapped_as_dataclass` - functional version that may provide better compatibility with mypy .. versionadded:: 2.0 """ decorate = mapped_as_dataclass( self, init=init, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, match_args=match_args, kw_only=kw_only, dataclass_callable=dataclass_callable, ) if __cls: return decorate(__cls) else: return decorate def mapped(self, cls: Type[_O]) -> Type[_O]: """Class decorator that will apply the Declarative mapping process to a given class. E.g.:: from sqlalchemy.orm import registry mapper_registry = registry() @mapper_registry.mapped class Foo: __tablename__ = "some_table" id = Column(Integer, primary_key=True) name = Column(String) See the section :ref:`orm_declarative_mapping` for complete details and examples. :param cls: class to be mapped. :return: the class that was passed. .. seealso:: :ref:`orm_declarative_mapping` :meth:`_orm.registry.generate_base` - generates a base class that will apply Declarative mapping to subclasses automatically using a Python metaclass. .. seealso:: :meth:`_orm.registry.mapped_as_dataclass` """ _ORMClassConfigurator._as_declarative(self, cls, cls.__dict__) return cls def as_declarative_base(self, **kw: Any) -> Callable[[Type[_T]], Type[_T]]: """ Class decorator which will invoke :meth:`_orm.registry.generate_base` for a given base class. E.g.:: from sqlalchemy.orm import registry mapper_registry = registry() @mapper_registry.as_declarative_base() class Base: @declared_attr def __tablename__(cls): return cls.__name__.lower() id = Column(Integer, primary_key=True) class MyMappedClass(Base): ... All keyword arguments passed to :meth:`_orm.registry.as_declarative_base` are passed along to :meth:`_orm.registry.generate_base`. """ def decorate(cls: Type[_T]) -> Type[_T]: kw["cls"] = cls kw["name"] = cls.__name__ return self.generate_base(**kw) # type: ignore return decorate def map_declaratively(self, cls: Type[_O]) -> Mapper[_O]: """Map a class declaratively. In this form of mapping, the class is scanned for mapping information, including for columns to be associated with a table, and/or an actual table object. Returns the :class:`_orm.Mapper` object. E.g.:: from sqlalchemy.orm import registry mapper_registry = registry() class Foo: __tablename__ = "some_table" id = Column(Integer, primary_key=True) name = Column(String) mapper = mapper_registry.map_declaratively(Foo) This function is more conveniently invoked indirectly via either the :meth:`_orm.registry.mapped` class decorator or by subclassing a declarative metaclass generated from :meth:`_orm.registry.generate_base`. See the section :ref:`orm_declarative_mapping` for complete details and examples. :param cls: class to be mapped. :return: a :class:`_orm.Mapper` object. .. seealso:: :ref:`orm_declarative_mapping` :meth:`_orm.registry.mapped` - more common decorator interface to this function. :meth:`_orm.registry.map_imperatively` """ _ORMClassConfigurator._as_declarative(self, cls, cls.__dict__) return cls.__mapper__ # type: ignore def map_imperatively( self, class_: Type[_O], local_table: Optional[FromClause] = None, **kw: Any, ) -> Mapper[_O]: r"""Map a class imperatively. In this form of mapping, the class is not scanned for any mapping information. Instead, all mapping constructs are passed as arguments. This method is intended to be fully equivalent to the now-removed SQLAlchemy ``mapper()`` function, except that it's in terms of a particular registry. E.g.:: from sqlalchemy.orm import registry mapper_registry = registry() my_table = Table( "my_table", mapper_registry.metadata, Column("id", Integer, primary_key=True), ) class MyClass: pass mapper_registry.map_imperatively(MyClass, my_table) See the section :ref:`orm_imperative_mapping` for complete background and usage examples. :param class\_: The class to be mapped. Corresponds to the :paramref:`_orm.Mapper.class_` parameter. :param local_table: the :class:`_schema.Table` or other :class:`_sql.FromClause` object that is the subject of the mapping. Corresponds to the :paramref:`_orm.Mapper.local_table` parameter. :param \**kw: all other keyword arguments are passed to the :class:`_orm.Mapper` constructor directly. .. seealso:: :ref:`orm_imperative_mapping` :ref:`orm_declarative_mapping` """ return _ORMClassConfigurator._mapper(self, class_, local_table, kw) RegistryType = registry if not TYPE_CHECKING: # allow for runtime type resolution of ``ClassVar[_RegistryType]`` _RegistryType = registry # noqa
registry
python
airbytehq__airbyte
airbyte-ci/connectors/live-tests/src/live_tests/report.py
{ "start": 2469, "end": 6642 }
class ____(BaseReport): TEMPLATE_NAME = "private_details.html.j2" SPEC_SECRET_MASK_URL = "https://connectors.airbyte.com/files/registries/v0/specs_secrets_mask.yaml" def __init__(self, path: Path, pytest_config: Config) -> None: self.secret_properties = self.get_secret_properties() super().__init__(path, pytest_config) def get_secret_properties(self) -> list: response = requests.get(self.SPEC_SECRET_MASK_URL) response.raise_for_status() return yaml.safe_load(response.text)["properties"] def scrub_secrets_from_config(self, to_scrub: MutableMapping) -> MutableMapping: if isinstance(to_scrub, dict): for key, value in to_scrub.items(): if key in self.secret_properties: to_scrub[key] = "********" elif isinstance(value, dict): to_scrub[key] = self.scrub_secrets_from_config(value) return to_scrub @property def renderable_connection_objects(self) -> list[dict[str, Any]]: return [ { "workspace_id": connection_objects.workspace_id, "connection_id": connection_objects.connection_id, "hashed_connection_id": connection_objects.hashed_connection_id, "source_config": json.dumps( self.scrub_secrets_from_config( deepcopy(connection_objects.source_config.data) if connection_objects.source_config else {} ), indent=2, ), "url": connection_objects.url, } for connection_objects in self.all_connection_objects ] def render(self) -> None: jinja_env = Environment( loader=PackageLoader(__package__, "templates"), autoescape=select_autoescape(), trim_blocks=False, lstrip_blocks=True, ) template = jinja_env.get_template(self.TEMPLATE_NAME) rendered = template.render( user=self.pytest_config.stash[stash_keys.USER], test_date=self.created_at, all_connection_objects=self.renderable_connection_objects, connector_image=self.pytest_config.stash[stash_keys.CONNECTOR_IMAGE], control_version=self.pytest_config.stash[stash_keys.CONTROL_VERSION], target_version=self.pytest_config.stash[stash_keys.TARGET_VERSION], requested_urls_per_command=self.get_requested_urls_per_command(), fully_generated=self._state is ReportState.FINISHED, ) self.path.write_text(rendered) def get_requested_urls_per_command( self, ) -> dict[Command, list[tuple[int, str, str]]]: requested_urls_per_command = {} all_commands = sorted( list(set(self.control_execution_results_per_command.keys()).union(set(self.target_execution_results_per_command.keys()))), key=lambda command: command.value, ) for command in all_commands: if command in self.control_execution_results_per_command: control_flows = [ flow for exec_result in self.control_execution_results_per_command[command] for flow in exec_result.http_flows ] else: control_flows = [] if command in self.target_execution_results_per_command: target_flows = [ flow for exec_result in self.target_execution_results_per_command[command] for flow in exec_result.http_flows ] else: target_flows = [] all_flows = [] max_flows = max(len(control_flows), len(target_flows)) for i in range(max_flows): control_url = control_flows[i].request.url if i < len(control_flows) else "" target_url = target_flows[i].request.url if i < len(target_flows) else "" all_flows.append((i, control_url, target_url)) requested_urls_per_command[command] = all_flows return requested_urls_per_command
PrivateDetailsReport
python
coleifer__peewee
tests/shortcuts.py
{ "start": 607, "end": 652 }
class ____(TestModel): tag = TextField()
Tag
python
great-expectations__great_expectations
great_expectations/data_context/types/base.py
{ "start": 42411, "end": 46994 }
class ____(Schema): config_version: fields.Number = fields.Number( validate=lambda x: 0 < x < 100, # noqa: PLR2004 # FIXME CoP error_messages={"invalid": "config version must be a number."}, ) fluent_datasources = fields.Dict( keys=fields.Str(), required=False, allow_none=True, load_only=True, ) expectations_store_name = fields.Str() validation_results_store_name = fields.Str() checkpoint_store_name = fields.Str(required=False, allow_none=True) plugins_directory = fields.Str(allow_none=True) stores = fields.Dict(keys=fields.Str(), values=fields.Dict()) data_docs_sites = fields.Dict(keys=fields.Str(), values=fields.Dict(), allow_none=True) config_variables_file_path = fields.Str(allow_none=True) analytics_enabled = fields.Boolean(allow_none=True) data_context_id = fields.UUID(allow_none=True) progress_bars = fields.Nested(ProgressBarsConfigSchema, required=False, allow_none=True) # To ensure backwards compatability, we need to ensure that new options are "opt-in" # If a user has not explicitly configured the value, it will be None and will be wiped by the post_dump hook # noqa: E501 # FIXME CoP REMOVE_KEYS_IF_NONE = [ "progress_bars", # 0.13.49 "fluent_datasources", ] # noinspection PyUnusedLocal @post_dump def remove_keys_if_none(self, data: dict, **kwargs) -> dict: data = copy.deepcopy(data) for key in self.REMOVE_KEYS_IF_NONE: if key in data and data[key] is None: data.pop(key) return data @override def handle_error(self, exc, data, **kwargs) -> None: # type: ignore[override] # FIXME CoP """Log and raise our custom exception when (de)serialization fails.""" if ( exc and exc.messages and isinstance(exc.messages, dict) and all(key is None for key in exc.messages) ): exc.messages = list(itertools.chain.from_iterable(exc.messages.values())) message: str = f"Error while processing DataContextConfig: {' '.join(exc.messages)}" logger.error(message) raise gx_exceptions.InvalidDataContextConfigError( message=message, ) # noinspection PyUnusedLocal @validates_schema def validate_schema(self, data, **kwargs) -> None: if "config_version" not in data: raise gx_exceptions.InvalidDataContextConfigError( # noqa: TRY003 # FIXME CoP "The key `config_version` is missing; please check your config file.", validation_error=ValidationError(message="no config_version key"), ) if not isinstance(data["config_version"], (int, float)): raise gx_exceptions.InvalidDataContextConfigError( # noqa: TRY003 # FIXME CoP "The key `config_version` must be a number. Please check your config file.", validation_error=ValidationError(message="config version not a number"), ) # When migrating from 0.7.x to 0.8.0 if data["config_version"] == 0 and any( store_config["class_name"] == "ValidationResultsStore" for store_config in data["stores"].values() ): raise gx_exceptions.UnsupportedConfigVersionError( # noqa: TRY003 # FIXME CoP "You appear to be using a config version from the 0.7.x series. This version is no longer supported." # noqa: E501 # FIXME CoP ) if data["config_version"] < MINIMUM_SUPPORTED_CONFIG_VERSION: raise gx_exceptions.UnsupportedConfigVersionError( "You appear to have an invalid config version ({}).\n The version number must be at least {}. " # noqa: E501 # FIXME CoP "Please see the migration guide at https://docs.greatexpectations.io/docs/guides/miscellaneous/migration_guide#migrating-to-the-batch-request-v3-api".format( data["config_version"], MINIMUM_SUPPORTED_CONFIG_VERSION ), ) if data["config_version"] > CURRENT_GX_CONFIG_VERSION: raise gx_exceptions.InvalidDataContextConfigError( "You appear to have an invalid config version ({}).\n The maximum valid version is {}.".format( # noqa: E501 # FIXME CoP data["config_version"], CURRENT_GX_CONFIG_VERSION ), validation_error=ValidationError(message="config version too high"), )
DataContextConfigSchema
python
spack__spack
lib/spack/spack/cmd/create.py
{ "start": 25581, "end": 38894 }
class ____: """An instance of BuildSystemAndLanguageGuesser provides a callable object to be used during ``spack create``. By passing this object to ``spack checksum``, we can take a peek at the fetched tarball and discern the build system it uses """ def __init__(self): """Sets the default build system.""" self.build_system = "generic" self._c = False self._cxx = False self._fortran = False # List of files in the archive ordered by their depth in the directory tree. self._file_entries: List[str] = [] def __call__(self, archive: str, url: str) -> None: """Try to guess the type of build system used by a project based on the contents of its archive or the URL it was downloaded from.""" # Peek inside the compressed file. if archive.endswith(".zip") or ".zip#" in archive: try: unzip = which("unzip") assert unzip is not None output = unzip("-lq", archive, output=str) except Exception: output = "" else: try: tar = which("tar") assert tar is not None output = tar("tf", archive, output=str) except Exception: output = "" self._file_entries[:] = output.splitlines() # Files closest to the root should be considered first when determining build system. self._file_entries.sort(key=lambda p: p.count("/")) self._determine_build_system(url) self._determine_language() def _determine_build_system(self, url: str) -> None: # Most octave extensions are hosted on Octave-Forge: # https://octave.sourceforge.net/index.html # They all have the same base URL. if "downloads.sourceforge.net/octave/" in url: self.build_system = "octave" elif url.endswith(".gem"): self.build_system = "ruby" elif url.endswith(".whl") or ".whl#" in url: self.build_system = "python" elif url.endswith(".rock"): self.build_system = "lua" elif self._file_entries: # A list of clues that give us an idea of the build system a package # uses. If the regular expression matches a file contained in the # archive, the corresponding build system is assumed. # NOTE: Order is important here. If a package supports multiple # build systems, we choose the first match in this list. clues = [ (re.compile(pattern), build_system) for pattern, build_system in ( (r"/CMakeLists\.txt$", "cmake"), (r"/NAMESPACE$", "r"), (r"/Cargo\.toml$", "cargo"), (r"/go\.mod$", "go"), (r"/configure$", "autotools"), (r"/configure\.(in|ac)$", "autoreconf"), (r"/Makefile\.am$", "autoreconf"), (r"/pom\.xml$", "maven"), (r"/SConstruct$", "scons"), (r"/waf$", "waf"), (r"/pyproject.toml", "python"), (r"/setup\.(py|cfg)$", "python"), (r"/WORKSPACE$", "bazel"), (r"/Build\.PL$", "perlbuild"), (r"/Makefile\.PL$", "perlmake"), (r"/.*\.gemspec$", "ruby"), (r"/Rakefile$", "ruby"), (r"/setup\.rb$", "ruby"), (r"/.*\.pro$", "qmake"), (r"/.*\.rockspec$", "lua"), (r"/(GNU)?[Mm]akefile$", "makefile"), (r"/DESCRIPTION$", "octave"), (r"/meson\.build$", "meson"), (r"/configure\.py$", "sip"), ) ] # Determine the build system based on the files contained in the archive. for file in self._file_entries: for pattern, build_system in clues: if pattern.search(file): self.build_system = build_system return def _determine_language(self): for entry in self._file_entries: _, ext = os.path.splitext(entry) if not self._c and ext in C_EXT: self._c = True elif not self._cxx and ext in CXX_EXT: self._cxx = True elif not self._fortran and ext in FORTRAN_EXT: self._fortran = True if self._c and self._cxx and self._fortran: return @property def languages(self) -> List[str]: langs: List[str] = [] if self._c: langs.append("c") if self._cxx: langs.append("cxx") if self._fortran: langs.append("fortran") return langs def get_name(name, url): """Get the name of the package based on the supplied arguments. If a name was provided, always use that. Otherwise, if a URL was provided, extract the name from that. Otherwise, use a default. Args: name (str): explicit ``--name`` argument given to ``spack create`` url (str): ``url`` argument given to ``spack create`` Returns: str: The name of the package """ # Default package name result = "example" if name is not None: # Use a user-supplied name if one is present result = name if len(name.strip()) > 0: tty.msg("Using specified package name: '{0}'".format(result)) else: tty.die("A package name must be provided when using the option.") elif url is not None: # Try to guess the package name based on the URL try: result = parse_name(url) if result != url: desc = "URL" else: desc = "package name" tty.msg("This looks like a {0} for {1}".format(desc, result)) except UndetectableNameError: tty.die( "Couldn't guess a name for this package.", " Please report this bug. In the meantime, try running:", " `spack create --name <name> <url>`", ) result = simplify_name(result) if not re.match(r"^[a-z0-9-]+$", result): tty.die("Package name can only contain a-z, 0-9, and '-'") return result def get_url(url: Optional[str]) -> str: """Get the URL to use. Use a default URL if none is provided. Args: url: ``url`` argument to ``spack create`` Returns: The URL of the package """ # Use the user-supplied URL or a default URL if none is present. return url or "https://www.example.com/example-1.2.3.tar.gz" def get_versions(args: argparse.Namespace, name: str) -> Tuple[str, BuildSystemAndLanguageGuesser]: """Returns a list of versions and hashes for a package. Also returns a BuildSystemAndLanguageGuesser object. Returns default values if no URL is provided. Args: args: The arguments given to ``spack create`` name: The name of the package Returns: Tuple of versions and hashes, and a BuildSystemAndLanguageGuesser object """ # Default version with hash hashed_versions = """\ # FIXME: Add proper versions and checksums here. # version("1.2.3", md5="0123456789abcdef0123456789abcdef")""" # Default version without hash unhashed_versions = """\ # FIXME: Add proper versions here. # version("1.2.4")""" # Default guesser guesser = BuildSystemAndLanguageGuesser() valid_url = True try: parsed = urllib.parse.urlparse(args.url) if not parsed.scheme or parsed.scheme == "file": valid_url = False # No point in spidering these except (ValueError, TypeError): valid_url = False if args.url is not None and args.template != "bundle" and valid_url: # Find available versions try: url_dict = find_versions_of_archive(args.url) if len(url_dict) > 1 and not args.batch and sys.stdin.isatty(): url_dict_filtered = spack.stage.interactive_version_filter(url_dict) if url_dict_filtered is None: exit(0) url_dict = url_dict_filtered except UndetectableVersionError: # Use fake versions tty.warn("Couldn't detect version in: {0}".format(args.url)) return hashed_versions, guesser if not url_dict: # If no versions were found, revert to what the user provided version = parse_version(args.url) url_dict = {version: args.url} version_hashes = spack.stage.get_checksums_for_versions( url_dict, name, first_stage_function=guesser, keep_stage=args.keep_stage ) versions = get_version_lines(version_hashes) else: versions = unhashed_versions return versions, guesser def get_build_system( template: Optional[str], url: str, guesser: BuildSystemAndLanguageGuesser ) -> str: """Determine the build system template. If a template is specified, always use that. Otherwise, if a URL is provided, download the tarball and peek inside to guess what build system it uses. Otherwise, use a generic template by default. Args: template: ``--template`` argument given to ``spack create`` url: ``url`` argument given to ``spack create`` guesser: The first_stage_function given to ``spack checksum`` which records the build system it detects Returns: str: The name of the build system template to use """ # Default template selected_template = "generic" if template is not None: selected_template = template # Use a user-supplied template if one is present tty.msg("Using specified package template: '{0}'".format(selected_template)) elif url is not None: # Use whatever build system the guesser detected selected_template = guesser.build_system if selected_template == "generic": tty.warn("Unable to detect a build system. Using a generic package template.") else: msg = "This package looks like it uses the {0} build system" tty.msg(msg.format(selected_template)) return selected_template def get_repository(args: argparse.Namespace, name: str) -> spack.repo.Repo: """Returns a Repo object that will allow us to determine the path where the new package file should be created. Args: args: The arguments given to ``spack create`` name: The name of the package to create Returns: A Repo object capable of determining the path to the package file """ spec = Spec(name) # Figure out namespace for spec if spec.namespace and args.namespace and spec.namespace != args.namespace: tty.die("Namespaces '{0}' and '{1}' do not match.".format(spec.namespace, args.namespace)) if not spec.namespace and args.namespace: spec.namespace = args.namespace # Figure out where the new package should live repo_path = args.repo if repo_path is not None: repo = spack.repo.from_path(repo_path) if spec.namespace and spec.namespace != repo.namespace: tty.die( "Can't create package with namespace {0} in repo with " "namespace {1}".format(spec.namespace, repo.namespace) ) else: if spec.namespace: repo = spack.repo.PATH.get_repo(spec.namespace) else: _repo = spack.repo.PATH.first_repo() assert _repo is not None, "No package repository found" repo = _repo # Set the namespace on the spec if it's not there already if not spec.namespace: spec.namespace = repo.namespace return repo def create(parser, args): # Gather information about the package to be created name = get_name(args.name, args.url) url = get_url(args.url) versions, guesser = get_versions(args, name) build_system = get_build_system(args.template, url, guesser) # Create the package template object constr_args = {"name": name, "versions": versions, "languages": guesser.languages} package_class = templates[build_system] if package_class != BundlePackageTemplate: constr_args["url"] = url package = package_class(**constr_args) tty.msg("Created template for {0} package".format(package.name)) # Create a directory for the new package repo = get_repository(args, name) pkg_path = repo.filename_for_package_name(package.name) if os.path.exists(pkg_path) and not args.force: tty.die( "{0} already exists.".format(pkg_path), " Try running `spack create --force` to overwrite it.", ) else: mkdirp(os.path.dirname(pkg_path)) # Write the new package file package.write(pkg_path) tty.msg("Created package file: {0}".format(pkg_path)) # Optionally open up the new package file in your $EDITOR if not args.skip_editor: editor(pkg_path)
BuildSystemAndLanguageGuesser
python
xlwings__xlwings
xlwings/_xlmac.py
{ "start": 44288, "end": 44690 }
class ____(base_classes.Note): def __init__(self, parent, xl): self.parent = parent self.xl = xl def api(self): return self.xl @property def text(self): return self.xl.Excel_comment_text() @text.setter def text(self, value): self.xl.Excel_comment_text(text=value) def delete(self): self.parent.xl.clear_Excel_comments()
Note
python
getsentry__sentry
tests/sentry/core/endpoints/test_organization_member_details.py
{ "start": 6939, "end": 37287 }
class ____(OrganizationMemberTestBase, HybridCloudTestMixin): method = "put" def setUp(self) -> None: super().setUp() self.curr_user = self.create_user("member@example.com") self.curr_member = self.create_member( organization=self.organization, role="member", user=self.curr_user ) self.other_user = self.create_user("other@example.com") self.other_member = self.create_member( organization=self.organization, role="member", user=self.other_user ) self.curr_invite = self.create_member( organization=self.organization, user=None, email="member_invite@example.com", role="member", inviter_id=self.curr_user.id, ) self.other_invite = self.create_member( organization=self.organization, user=None, email="other_invite@example.com", role="member", inviter_id=self.other_user.id, ) def test_invalid_id(self) -> None: self.get_error_response(self.organization.slug, "trash", reinvite=1, status_code=404) @patch("sentry.models.OrganizationMember.send_invite_email") def test_reinvite_pending_member(self, mock_send_invite_email: MagicMock) -> None: member_om = self.create_member( organization=self.organization, email="foo@example.com", role="member" ) self.get_success_response(self.organization.slug, member_om.id, reinvite=1) mock_send_invite_email.assert_called_once_with() @patch("sentry.models.OrganizationMember.send_invite_email") def test_member_reinvite_pending_member(self, mock_send_invite_email: MagicMock) -> None: self.login_as(self.curr_user) self.organization.flags.disable_member_invite = True self.organization.save() response = self.get_error_response( self.organization.slug, self.curr_invite.id, reinvite=1, status_code=403 ) assert response.data.get("detail") == "You do not have permission to perform this action." response = self.get_error_response( self.organization.slug, self.other_invite.id, reinvite=1, status_code=403 ) assert response.data.get("detail") == "You do not have permission to perform this action." assert not mock_send_invite_email.mock_calls self.organization.flags.disable_member_invite = False self.organization.save() with outbox_runner(): self.get_success_response(self.organization.slug, self.curr_invite.id, reinvite=1) mock_send_invite_email.assert_called_once_with() assert_org_audit_log_exists( organization=self.organization, event=audit_log.get_event_id("MEMBER_REINVITE"), ) mock_send_invite_email.reset_mock() response = self.get_error_response( self.organization.slug, self.other_invite.id, reinvite=1, status_code=403 ) assert response.data.get("detail") == "You cannot modify invitations sent by someone else." assert not mock_send_invite_email.mock_calls @patch("sentry.models.OrganizationMember.send_invite_email") def test_member_can_only_reinvite(self, mock_send_invite_email: MagicMock) -> None: foo = self.create_team(organization=self.organization, name="Team Foo") self.login_as(self.curr_user) self.organization.flags.disable_member_invite = True self.organization.save() response = self.get_error_response( self.organization.slug, self.curr_invite.id, teams=[foo.slug], status_code=403, ) assert response.data.get("detail") == "You do not have permission to perform this action." assert not mock_send_invite_email.mock_calls self.organization.flags.disable_member_invite = False self.organization.save() response = self.get_error_response( self.organization.slug, self.curr_invite.id, teams=[foo.slug], status_code=403, ) assert response.data.get("detail") == "You do not have permission to perform this action." assert not mock_send_invite_email.mock_calls @patch("sentry.models.OrganizationMember.send_invite_email") def test_member_cannot_reinvite_non_pending_members( self, mock_send_invite_email: MagicMock ) -> None: self.login_as(self.curr_user) self.organization.flags.disable_member_invite = True self.organization.save() response = self.get_error_response( self.organization.slug, self.other_member.id, reinvite=1, status_code=403 ) assert response.data.get("detail") == "You do not have permission to perform this action." self.organization.flags.disable_member_invite = False self.organization.save() response = self.get_error_response( self.organization.slug, self.other_member.id, reinvite=1, status_code=403 ) assert response.data.get("detail") == "You do not have permission to perform this action." assert not mock_send_invite_email.mock_calls @patch("sentry.models.OrganizationMember.send_invite_email") def test_cannot_reinvite_and_modify_member(self, mock_send_invite_email: MagicMock) -> None: member_om = self.create_member( organization=self.organization, email="foo@example.com", role="member" ) response = self.get_error_response( self.organization.slug, member_om.id, reinvite=1, role="manager", status_code=403 ) assert ( response.data.get("detail") == "You cannot modify member details when resending an invitation. Separate requests are required." ) assert not mock_send_invite_email.mock_calls @patch("sentry.models.OrganizationMember.send_invite_email") def test_member_details_not_modified_after_reinviting( self, mock_send_invite_email: MagicMock ) -> None: team = self.create_team(organization=self.organization, name="Moo Deng's Team") member_om = self.create_member( organization=self.organization, email="foo@example.com", role="member", teams=[team], ) teams = list(map(lambda team: team.slug, member_om.teams.all())) roles = [t for t in member_om.get_team_roles()] assert member_om.role == "member" assert team.slug in teams assert roles == [ { "team": team.id, "role": None, } ] with outbox_runner(): self.get_success_response(self.organization.slug, member_om.id, reinvite=1) assert_org_audit_log_exists( organization=self.organization, event=audit_log.get_event_id("MEMBER_REINVITE"), ) teams = list(map(lambda team: team.slug, member_om.teams.all())) roles = [t for t in member_om.get_team_roles()] assert member_om.role == "member" assert team.slug in teams assert roles == [ { "team": team.id, "role": None, } ] @patch("sentry.ratelimits.for_organization_member_invite") @patch("sentry.models.OrganizationMember.send_invite_email") def test_rate_limited( self, mock_send_invite_email: MagicMock, mock_rate_limit: MagicMock ) -> None: mock_rate_limit.return_value = True member_om = self.create_member( organization=self.organization, email="foo@example.com", role="member" ) self.get_error_response(self.organization.slug, member_om.id, reinvite=1, status_code=429) assert not mock_send_invite_email.mock_calls @patch("sentry.models.OrganizationMember.send_invite_email") def test_member_cannot_regenerate_pending_invite( self, mock_send_invite_email: MagicMock ) -> None: member_om = self.create_member( organization=self.organization, email="foo@example.com", role="member" ) old_invite = member_om.get_invite_link() member = self.create_user("baz@example.com") self.create_member(organization=self.organization, user=member, role="member") self.login_as(member) self.get_error_response( self.organization.slug, member_om.id, reinvite=1, regenerate=1, status_code=403 ) member_om = OrganizationMember.objects.get(id=member_om.id) assert old_invite == member_om.get_invite_link() assert not mock_send_invite_email.mock_calls self.login_as(self.curr_user) self.organization.flags.disable_member_invite = True self.organization.save() response = self.get_error_response( self.organization.slug, self.curr_invite.id, reinvite=1, regenerate=1, status_code=403 ) assert response.data.get("detail") == "You do not have permission to perform this action." self.organization.flags.disable_member_invite = False self.organization.save() response = self.get_error_response( self.organization.slug, self.curr_invite.id, reinvite=1, regenerate=1, status_code=400, ) assert response.data.get("detail") == "You are missing the member:admin scope." @patch("sentry.models.OrganizationMember.send_invite_email") def test_admin_can_regenerate_pending_invite(self, mock_send_invite_email: MagicMock) -> None: member_om = self.create_member( organization=self.organization, email="foo@example.com", role="member" ) old_invite = member_om.get_invite_link() response = self.get_success_response( self.organization.slug, member_om.id, reinvite=1, regenerate=1 ) member_om = OrganizationMember.objects.get(id=member_om.id) assert old_invite != member_om.get_invite_link() mock_send_invite_email.assert_called_once_with() assert "invite_link" not in response.data self.assert_org_member_mapping(org_member=member_om) @patch("sentry.models.OrganizationMember.send_invite_email") def test_reinvite_invite_expired_member(self, mock_send_invite_email: MagicMock) -> None: member = self.create_member( organization=self.organization, email="foo@example.com", role="member", token_expires_at="2018-10-20 00:00:00+00:00", ) self.get_error_response(self.organization.slug, member.id, reinvite=1, status_code=400) assert mock_send_invite_email.called is False member = OrganizationMember.objects.get(pk=member.id) assert member.token_expired @patch("sentry.models.OrganizationMember.send_invite_email") def test_regenerate_invite_expired_member(self, mock_send_invite_email: MagicMock) -> None: member = self.create_member( organization=self.organization, email="foo@example.com", role="member", token_expires_at="2018-10-20 00:00:00+00:00", ) self.get_success_response(self.organization.slug, member.id, reinvite=1, regenerate=1) mock_send_invite_email.assert_called_once_with() member = OrganizationMember.objects.get(pk=member.id) assert member.token_expired is False self.assert_org_member_mapping(org_member=member) @patch("sentry.models.OrganizationMember.send_invite_email") def test_cannot_reinvite_unapproved_invite(self, mock_send_invite_email: MagicMock) -> None: member = self.create_member( organization=self.organization, email="foo@example.com", role="member", invite_status=InviteStatus.REQUESTED_TO_JOIN.value, ) self.get_error_response(self.organization.slug, member.id, reinvite=1, status_code=404) @patch("sentry.models.OrganizationMember.send_invite_email") def test_cannot_regenerate_unapproved_invite(self, mock_send_invite_email: MagicMock) -> None: member = self.create_member( organization=self.organization, email="foo@example.com", role="member", invite_status=InviteStatus.REQUESTED_TO_JOIN.value, ) self.get_error_response( self.organization.slug, member.id, reinvite=1, regenerate=1, status_code=404 ) def test_reinvite_sso_link(self) -> None: member = self.create_user("bar@example.com") member_om = self.create_member(organization=self.organization, user=member, role="member") with assume_test_silo_mode(SiloMode.CONTROL): AuthProvider.objects.create( organization_id=self.organization.id, provider="dummy", flags=1 ) with self.tasks(): self.get_success_response(self.organization.slug, member_om.id, reinvite=1) assert len(mail.outbox) == 1 def test_can_update_member_membership(self) -> None: member = self.create_user("baz@example.com") member_om = self.create_member( organization=self.organization, user=member, role="member", teams=[] ) with outbox_runner(): self.get_success_response(self.organization.slug, member_om.id, role="manager") member_om = OrganizationMember.objects.get(id=member_om.id) assert member_om.role == "manager" self.assert_org_member_mapping(org_member=member_om) def test_cannot_update_own_membership(self) -> None: member_om = OrganizationMember.objects.get( organization=self.organization, user_id=self.user.id ) self.get_error_response( self.organization.slug, member_om.id, role="manager", status_code=400 ) member_om = OrganizationMember.objects.get(user_id=self.user.id) assert member_om.role == "owner" def test_can_update_teams(self) -> None: foo = self.create_team(organization=self.organization, name="Team Foo") bar = self.create_team(organization=self.organization, name="Team Bar") member = self.create_user("baz@example.com") member_om = self.create_member( organization=self.organization, user=member, role="member", teams=[] ) self.get_success_response(self.organization.slug, member_om.id, teams=[foo.slug, bar.slug]) member_teams = OrganizationMemberTeam.objects.filter(organizationmember=member_om) team_ids = list(map(lambda x: x.team_id, member_teams)) assert foo.id in team_ids assert bar.id in team_ids member_om = OrganizationMember.objects.get(id=member_om.id) teams = list(map(lambda team: team.slug, member_om.teams.all())) assert foo.slug in teams assert bar.slug in teams @with_feature("organizations:team-roles") def test_can_update_teams_with_feature_flag(self) -> None: self.test_can_update_teams() def test_can_update_teams_using_teamRoles(self) -> None: foo = self.create_team(organization=self.organization, name="Team Foo") bar = self.create_team(organization=self.organization, name="Team Bar") member = self.create_user("baz@example.com") member_om = self.create_member( organization=self.organization, user=member, role="member", teams=[] ) self.get_success_response( self.organization.slug, member_om.id, teamRoles=[ { "teamSlug": foo.slug, "role": None, }, { "teamSlug": bar.slug, "role": None, }, ], ) member_teams = OrganizationMemberTeam.objects.filter(organizationmember=member_om) team_ids = list(map(lambda x: x.team_id, member_teams)) assert foo.id in team_ids assert bar.id in team_ids def test_cannot_update_with_invalid_team(self) -> None: member = self.create_user("baz@example.com") member_om = self.create_member( organization=self.organization, user=member, role="member", teams=[] ) self.get_error_response( self.organization.slug, member_om.id, teams=["invalid"], status_code=400 ) member_om = OrganizationMember.objects.get(id=member_om.id) teams = list(map(lambda team: team.slug, member_om.teams.all())) assert len(teams) == 0 def test_can_update_org_role(self) -> None: member = self.create_user("baz@example.com") member_om = self.create_member( organization=self.organization, user=member, role="member", teams=[] ) with outbox_runner(): self.get_success_response(self.organization.slug, member_om.id, role="manager") member_om = OrganizationMember.objects.get( organization=self.organization, user_id=member.id ) assert member_om.role == "manager" self.assert_org_member_mapping(org_member=member_om) @with_feature("organizations:team-roles") def test_can_update_team_role(self) -> None: foo = self.create_team(organization=self.organization, name="Team Foo") member = self.create_user("baz@example.com") member_om = self.create_member( organization=self.organization, user=member, role="member", teams=[foo] ) member_omt = OrganizationMemberTeam.objects.get(organizationmember=member_om, team=foo) assert member_omt.role is None self.get_success_response( self.organization.slug, member_om.id, teamRoles=[ { "teamSlug": foo.slug, "role": "admin", }, ], ) member_omt = OrganizationMemberTeam.objects.get(organizationmember=member_om, team=foo) assert member_omt.role == "admin" self.get_success_response( self.organization.slug, member_om.id, teamRoles=[ { "teamSlug": foo.slug, "role": None, }, ], ) member_omt = OrganizationMemberTeam.objects.get(organizationmember=member_om, team=foo) assert member_omt.role is None def test_cannot_update_with_invalid_role(self) -> None: member = self.create_user("baz@example.com") member_om = self.create_member( organization=self.organization, user=member, role="member", teams=[] ) self.get_error_response( self.organization.slug, member_om.id, role="invalid", status_code=400 ) member_om = OrganizationMember.objects.get( organization=self.organization, user_id=member.id ) assert member_om.role == "member" @with_feature({"organizations:team-roles": False}) def test_can_update_from_retired_role_without_flag(self) -> None: member = self.create_user("baz@example.com") member_om = self.create_member( organization=self.organization, user=member, role="admin", teams=[] ) with outbox_runner(): self.get_success_response(self.organization.slug, member_om.id, role="member") member_om = OrganizationMember.objects.get( organization=self.organization, user_id=member.id ) assert member_om.role == "member" self.assert_org_member_mapping(org_member=member_om) @with_feature("organizations:team-roles") def test_can_update_from_retired_role_with_flag(self) -> None: member = self.create_user("baz@example.com") member_om = self.create_member( organization=self.organization, user=member, role="admin", teams=[] ) with outbox_runner(): self.get_success_response(self.organization.slug, member_om.id, role="member") member_om = OrganizationMember.objects.get( organization=self.organization, user_id=member.id ) assert member_om.role == "member" self.assert_org_member_mapping(org_member=member_om) @with_feature({"organizations:team-roles": False}) def test_can_update_to_retired_role_without_flag(self) -> None: member = self.create_user("baz@example.com") member_om = self.create_member( organization=self.organization, user=member, role="member", teams=[] ) with outbox_runner(): self.get_success_response(self.organization.slug, member_om.id, role="admin") member_om = OrganizationMember.objects.get( organization=self.organization, user_id=member.id ) assert member_om.role == "admin" self.assert_org_member_mapping(org_member=member_om) @with_feature("organizations:team-roles") def test_cannot_update_to_retired_role_with_flag(self) -> None: member = self.create_user("baz@example.com") member_om = self.create_member( organization=self.organization, user=member, role="member", teams=[] ) self.get_error_response(self.organization.slug, member_om.id, role="admin", status_code=400) member_om = OrganizationMember.objects.get( organization=self.organization, user_id=member.id ) assert member_om.role == "member" @patch("sentry.models.OrganizationMember.send_sso_link_email") def test_cannot_reinvite_normal_member(self, mock_send_sso_link_email: MagicMock) -> None: member = self.create_user("bar@example.com") member_om = self.create_member(organization=self.organization, user=member, role="member") self.get_error_response(self.organization.slug, member_om.id, reinvite=1, status_code=400) def test_cannot_lower_superior_role(self) -> None: owner = self.create_user("baz@example.com") owner_om = self.create_member( organization=self.organization, user=owner, role="owner", teams=[] ) manager = self.create_user("foo@example.com") self.create_member(organization=self.organization, user=manager, role="manager", teams=[]) self.login_as(manager) self.get_error_response(self.organization.slug, owner_om.id, role="member", status_code=403) owner_om = OrganizationMember.objects.get(organization=self.organization, user_id=owner.id) assert owner_om.role == "owner" def test_with_internal_integration(self) -> None: member = self.create_user("baz@example.com") member_om = self.create_member(organization=self.organization, user=member, role="member") internal_integration = self.create_internal_integration( name="my_app", organization=self.organization, scopes=("member:admin",), webhook_url="http://example.com", ) token = self.create_internal_integration_token( user=self.user, internal_integration=internal_integration ) response = self.client.put( reverse(self.endpoint, args=[self.organization.slug, member_om.id]), {"role": "manager"}, HTTP_AUTHORIZATION=f"Bearer {token.token}", ) # The app token has no associated OrganizationMember and therefore no role. # So we can't authorize it to promote to a role less than or equal to its # own. This may be supported in the future. For now, assert that it provides # a graceful authorization failure. assert response.status_code == 400 def test_cannot_update_partnership_member(self) -> None: member = self.create_user("bar@example.com") member_om = self.create_member( organization=self.organization, user=member, role="member", flags=OrganizationMember.flags["partnership:restricted"], ) self.get_error_response(self.organization.slug, member_om.id, status_code=403) @patch( "sentry.roles.organization_roles.get", wraps=mock_organization_roles_get_factory(organization_roles.get), ) def test_cannot_add_to_team_when_team_roles_disabled(self, mock_get: MagicMock) -> None: team = self.create_team(organization=self.organization, name="Team Foo") self.member = self.create_user() self.member_om = self.create_member( organization=self.organization, user=self.member, role="member", teams=[] ) owner_user = self.create_user("owner@localhost") self.owner = self.create_member( user=owner_user, organization=self.organization, role="owner" ) self.login_as(user=owner_user) response = self.get_error_response( self.organization.slug, self.member_om.id, teamRoles=[{"teamSlug": team.slug, "role": None}], status_code=400, ) assert ( response.data["detail"] == "The user with a 'member' role cannot have team-level permissions." ) @patch( "sentry.roles.organization_roles.get", wraps=mock_organization_roles_get_factory(organization_roles.get), ) def test_cannot_demote_team_member_to_role_where_team_roles_disabled( self, mock_get: MagicMock ) -> None: team = self.create_team(organization=self.organization, name="Team Foo") self.manager = self.create_user() self.manager_om = self.create_member( organization=self.organization, user=self.manager, role="manager", teams=[team] ) owner_user = self.create_user("owner@localhost") self.owner = self.create_member( user=owner_user, organization=self.organization, role="owner" ) self.login_as(user=owner_user) response = self.get_error_response( self.organization.slug, self.manager_om.id, orgRole="member", status_code=400 ) assert ( response.data["detail"] == "The user with a 'member' role cannot have team-level permissions." ) @patch( "sentry.roles.organization_roles.get", wraps=mock_organization_roles_get_factory(organization_roles.get), ) def test_can_demote_team_member_to_role_where_team_roles_disabled_with_team_removed( self, mock_get ): team = self.create_team(organization=self.organization, name="Team Foo") self.manager = self.create_user() self.manager_om = self.create_member( organization=self.organization, user=self.manager, role="manager", teams=[team] ) owner_user = self.create_user("owner@localhost") self.owner = self.create_member( user=owner_user, organization=self.organization, role="owner" ) self.login_as(user=owner_user) self.get_success_response( self.organization.slug, self.manager_om.id, orgRole="member", teamRoles=[] ) @patch( "sentry.roles.organization_roles.get", wraps=mock_organization_roles_get_factory(organization_roles.get), ) def test_can_promote_team_member_to_role_where_team_roles_enabled( self, mock_get: MagicMock ) -> None: team = self.create_team(organization=self.organization, name="Team Foo") self.member = self.create_user() self.member_om = self.create_member( organization=self.organization, user=self.member, role="member", teams=[] ) owner_user = self.create_user("owner@localhost") self.owner = self.create_member( user=owner_user, organization=self.organization, role="owner" ) self.login_as(user=owner_user) self.get_success_response( self.organization.slug, self.member_om.id, teamRoles=[{"teamSlug": team.slug, "role": None}], orgRole="manager", ) @patch("sentry.quotas.base.Quota.on_role_change") def test_on_role_change_called_when_role_updated(self, mock_on_role_change: MagicMock) -> None: member = self.create_user("baz@example.com") member_om = self.create_member( organization=self.organization, user=member, role="member", teams=[] ) with outbox_runner(): self.get_success_response(self.organization.slug, member_om.id, role="manager") mock_on_role_change.assert_called_once_with( organization=self.organization, organization_member=member_om, previous_role="member", new_role="manager", ) @patch("sentry.quotas.base.Quota.on_role_change") def test_on_role_change_not_called_when_role_unchanged( self, mock_on_role_change: MagicMock ) -> None: member = self.create_user("baz@example.com") member_om = self.create_member( organization=self.organization, user=member, role="member", teams=[] ) # Update something else but keep role the same self.get_success_response(self.organization.slug, member_om.id, teams=[]) mock_on_role_change.assert_not_called() @patch("sentry.quotas.base.Quota.on_role_change") def test_on_role_change_not_called_when_reinviting( self, mock_on_role_change: MagicMock ) -> None: member_om = self.create_member( organization=self.organization, email="foo@example.com", role="member" ) self.get_success_response(self.organization.slug, member_om.id, reinvite=1) mock_on_role_change.assert_not_called() def test_cannot_edit_placeholder_member(self) -> None: invite = self.create_member_invite(organization=self.organization) placeholder_om = invite.organization_member response = self.get_error_response(self.organization.slug, placeholder_om.id, role="member") assert response.data["detail"] == "The requested resource does not exist"
UpdateOrganizationMemberTest
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 6586, "end": 6979 }
class ____(sgqlc.types.Enum): """ See source code for more info. """ __schema__ = graphql_schema __choices__ = ( "ENTERPRISE_ORGANIZATIONS", "ENTERPRISE_ORGANIZATIONS_USER_ACCOUNTS", "EVERYWHERE", "SAME_ORGANIZATION", "SAME_ORGANIZATION_USER_ACCOUNTS", "USER_ACCOUNTS", )
EnterpriseAllowPrivateRepositoryForkingPolicyValue
python
getsentry__sentry
tests/sentry/sentry_apps/tasks/test_servicehooks.py
{ "start": 364, "end": 4252 }
class ____(TestCase): def setUp(self) -> None: self.hook = self.create_service_hook(project=self.project, events=("issue.created",)) @patch("sentry.sentry_apps.tasks.service_hooks.safe_urlopen") @responses.activate def test_verify_sentry_hook_signature(self, safe_urlopen: MagicMock) -> None: import hmac from hashlib import sha256 event = self.store_event( data={"timestamp": before_now(minutes=1).isoformat()}, project_id=self.project.id ) process_service_hook( self.hook.id, project_id=self.project.id, group_id=event.group_id, event_id=event.event_id, ) body = json.dumps(get_payload_v0(event)) expected = hmac.new( key=self.hook.secret.encode("utf-8"), msg=body.encode("utf-8"), digestmod=sha256 ).hexdigest() ((_, kwargs),) = safe_urlopen.call_args_list assert expected == kwargs["headers"]["X-ServiceHook-Signature"] @patch("sentry.sentry_apps.tasks.service_hooks.safe_urlopen") @responses.activate def test_event_created_sends_service_hook(self, safe_urlopen: MagicMock) -> None: self.hook.update(events=["event.created", "event.alert"]) event = self.store_event( data={"timestamp": before_now(minutes=1).isoformat()}, project_id=self.project.id ) process_service_hook( self.hook.id, project_id=self.project.id, group_id=event.group_id, event_id=event.event_id, ) ((_, kwargs),) = safe_urlopen.call_args_list data = json.loads(kwargs["data"]) assert kwargs["url"] == self.hook.url assert data == json.loads(json.dumps(get_payload_v0(event))) assert kwargs["headers"].keys() <= { "Content-Type", "X-ServiceHook-Timestamp", "X-ServiceHook-GUID", "X-ServiceHook-Signature", } @patch("sentry.sentry_apps.tasks.service_hooks.safe_urlopen") @responses.activate def test_event_created_sends_service_hook_with_event_id(self, safe_urlopen: MagicMock) -> None: self.hook.update(events=["event.created", "event.alert"]) event = self.store_event( data={"timestamp": before_now(minutes=1).isoformat()}, project_id=self.project.id ) assert event.group process_service_hook( self.hook.id, project_id=event.project_id, group_id=event.group.id, event_id=event.event_id, ) ((_, kwargs),) = safe_urlopen.call_args_list data = json.loads(kwargs["data"]) assert kwargs["url"] == self.hook.url assert data == json.loads(json.dumps(get_payload_v0(event))) assert kwargs["headers"].keys() <= { "Content-Type", "X-ServiceHook-Timestamp", "X-ServiceHook-GUID", "X-ServiceHook-Signature", } @responses.activate def test_v0_payload(self) -> None: responses.add(responses.POST, "https://example.com/sentry/webhook") event = self.store_event( data={"timestamp": before_now(minutes=1).isoformat()}, project_id=self.project.id ) assert event.group is not None process_service_hook( self.hook.id, project_id=self.project.id, group_id=event.group_id, event_id=event.event_id, ) body = get_payload_v0(event) assert ( body["group"]["url"] == f"http://testserver/organizations/{self.organization.slug}/issues/{event.group.id}/" ) assert ( body["event"]["url"] == f"http://testserver/organizations/{self.organization.slug}/issues/{event.group.id}/events/{event.event_id}/" )
TestServiceHooks
python
tensorflow__tensorflow
tensorflow/python/platform/resource_loader_test.py
{ "start": 797, "end": 1179 }
class ____(googletest.TestCase): def test_exception(self): with self.assertRaises(IOError): resource_loader.load_resource("/fake/file/path/dne") def test_exists(self): contents = resource_loader.load_resource( "python/platform/resource_loader.py") self.assertIn(b"tensorflow", contents) if __name__ == "__main__": googletest.main()
ResourceLoaderTest
python
tensorflow__tensorflow
tensorflow/python/saved_model/method_name_updater_test.py
{ "start": 2749, "end": 10356 }
class ____(test.TestCase): def setUp(self): super(MethodNameUpdaterTest, self).setUp() self._saved_model_path = tempfile.mkdtemp(prefix=test.get_temp_dir()) def testBasic(self): path = os.path.join( compat.as_bytes(self._saved_model_path), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB)) file_io.write_string_to_file( path, _SAVED_MODEL_PROTO.SerializeToString(deterministic=True)) updater = method_name_updater.MethodNameUpdater(self._saved_model_path) updater.replace_method_name( signature_key="serving_default", method_name="classify") updater.save() actual = loader.parse_saved_model(self._saved_model_path) self.assertProtoEquals( actual, text_format.Parse( """ saved_model_schema_version: 1 meta_graphs { meta_info_def { tags: "serve" } signature_def: { key: "serving_default" value: { inputs: { key: "inputs" value { name: "input_node:0" } } method_name: "classify" outputs: { key: "outputs" value { dtype: DT_FLOAT tensor_shape { dim { size: -1 } dim { size: 100 } } } } } } signature_def: { key: "foo" value: { inputs: { key: "inputs" value { name: "input_node:0" } } method_name: "predict" outputs: { key: "outputs" value { dtype: DT_FLOAT tensor_shape { dim { size: 1 } } } } } } } meta_graphs { meta_info_def { tags: "serve" tags: "gpu" } signature_def: { key: "serving_default" value: { inputs: { key: "inputs" value { name: "input_node:0" } } method_name: "classify" outputs: { key: "outputs" value { dtype: DT_FLOAT tensor_shape { dim { size: -1 } } } } } } signature_def: { key: "bar" value: { inputs: { key: "inputs" value { name: "input_node:0" } } method_name: "predict" outputs: { key: "outputs" value { dtype: DT_FLOAT tensor_shape { dim { size: 1 } } } } } } } """, saved_model_pb2.SavedModel())) def testTextFormatAndNewExportDir(self): path = os.path.join( compat.as_bytes(self._saved_model_path), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT)) file_io.write_string_to_file(path, str(_SAVED_MODEL_PROTO)) updater = method_name_updater.MethodNameUpdater(self._saved_model_path) updater.replace_method_name( signature_key="foo", method_name="regress", tags="serve") updater.replace_method_name( signature_key="bar", method_name="classify", tags=["gpu", "serve"]) new_export_dir = tempfile.mkdtemp(prefix=test.get_temp_dir()) updater.save(new_export_dir) self.assertTrue( file_io.file_exists( os.path.join( compat.as_bytes(new_export_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT)))) actual = loader.parse_saved_model(new_export_dir) self.assertProtoEquals( actual, text_format.Parse( """ saved_model_schema_version: 1 meta_graphs { meta_info_def { tags: "serve" } signature_def: { key: "serving_default" value: { inputs: { key: "inputs" value { name: "input_node:0" } } method_name: "predict" outputs: { key: "outputs" value { dtype: DT_FLOAT tensor_shape { dim { size: -1 } dim { size: 100 } } } } } } signature_def: { key: "foo" value: { inputs: { key: "inputs" value { name: "input_node:0" } } method_name: "regress" outputs: { key: "outputs" value { dtype: DT_FLOAT tensor_shape { dim { size: 1 } } } } } } } meta_graphs { meta_info_def { tags: "serve" tags: "gpu" } signature_def: { key: "serving_default" value: { inputs: { key: "inputs" value { name: "input_node:0" } } method_name: "predict" outputs: { key: "outputs" value { dtype: DT_FLOAT tensor_shape { dim { size: -1 } } } } } } signature_def: { key: "bar" value: { inputs: { key: "inputs" value { name: "input_node:0" } } method_name: "classify" outputs: { key: "outputs" value { dtype: DT_FLOAT tensor_shape { dim { size: 1 } } } } } } } """, saved_model_pb2.SavedModel())) def testExceptions(self): with self.assertRaises(IOError): updater = method_name_updater.MethodNameUpdater( tempfile.mkdtemp(prefix=test.get_temp_dir())) path = os.path.join( compat.as_bytes(self._saved_model_path), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB)) file_io.write_string_to_file( path, _SAVED_MODEL_PROTO.SerializeToString(deterministic=True)) updater = method_name_updater.MethodNameUpdater(self._saved_model_path) with self.assertRaisesRegex(ValueError, "`signature_key` must be defined"): updater.replace_method_name( signature_key=None, method_name="classify") with self.assertRaisesRegex(ValueError, "`method_name` must be defined"): updater.replace_method_name( signature_key="foobar", method_name="") with self.assertRaisesRegex( ValueError, r"MetaGraphDef associated with tags \['gpu'\] could not be found"): updater.replace_method_name( signature_key="bar", method_name="classify", tags=["gpu"]) with self.assertRaisesRegex( ValueError, r"MetaGraphDef associated with tags \['serve'\] does not " r"have a signature_def with key: 'baz'"): updater.replace_method_name( signature_key="baz", method_name="classify", tags=["serve"]) if __name__ == "__main__": test.main()
MethodNameUpdaterTest
python
weaviate__weaviate-python-client
weaviate/collections/classes/internal.py
{ "start": 4373, "end": 5658 }
class ____(Generic[P, R], Object[P, R]): """A single Weaviate object returned by a query within the `generate` namespace of a collection.""" __generated: Optional[str] generative: Optional[GenerativeSingle] # init required because of nuances of dataclass when defining @property generated and private var __generated def __init__( self, generated: Optional[str], generative: Optional[GenerativeSingle], uuid: uuid_package.UUID, metadata: MetadataReturn, properties: P, references: R, vector: Dict[str, Union[List[float], List[List[float]]]], collection: str, ) -> None: self.__generated = generated self.generative = generative super().__init__( uuid=uuid, metadata=metadata, properties=properties, references=references, vector=vector, collection=collection, ) @property @deprecated( "The generated field is deprecated. Use generative.text instead.", category=None ) # todo: turn into a runtime warning in the future def generated(self) -> Optional[str]: """The single generated text of the object.""" return self.__generated
GenerativeObject
python
HypothesisWorks__hypothesis
hypothesis-python/tests/cover/test_pretty.py
{ "start": 9152, "end": 13287 }
class ____(type): def __new__(metacls, name): return type.__new__(metacls, name, (object,), {"name": name}) def __repr__(cls): return f"[CUSTOM REPR FOR CLASS {cls.name}]" ClassWithMeta = MetaClass("ClassWithMeta") def test_metaclass_repr(): output = pretty.pretty(ClassWithMeta) assert output == "[CUSTOM REPR FOR CLASS ClassWithMeta]" def test_unicode_repr(): u = "üniçodé" class C: def __repr__(self): return u c = C() p = pretty.pretty(c) assert p == u p = pretty.pretty([c]) assert p == f"[{u}]" def test_basic_class(): def type_pprint_wrapper(obj, p, cycle): if obj is MyObj: type_pprint_wrapper.called = True return pretty._type_pprint(obj, p, cycle) type_pprint_wrapper.called = False printer = pretty.RepresentationPrinter() printer.type_pprinters[type] = type_pprint_wrapper printer.pretty(MyObj) output = printer.getvalue() assert output == f"{__name__}.MyObj" assert type_pprint_wrapper.called def test_collections_defaultdict(): # Create defaultdicts with cycles a = defaultdict() a.default_factory = a b = defaultdict(list) b["key"] = b # Dictionary order cannot be relied on, test against single keys. cases = [ (defaultdict(list), "defaultdict(list, {})"), ( defaultdict(list, {"key": "-" * 50}), "defaultdict(list,\n" " {'key': '-----------------------------------------" "---------'})", ), (a, "defaultdict(defaultdict(...), {})"), (b, "defaultdict(list, {'key': defaultdict(...)})"), ] for obj, expected in cases: assert pretty.pretty(obj) == expected @pytest.mark.skipif(PYPY, reason="slightly different on PyPy3") def test_collections_ordereddict(): # Create OrderedDict with cycle a = OrderedDict() a["key"] = a cases = [ (OrderedDict(), "OrderedDict()"), ( OrderedDict((i, i) for i in range(1000, 1010)), "OrderedDict([(1000, 1000),\n" " (1001, 1001),\n" " (1002, 1002),\n" " (1003, 1003),\n" " (1004, 1004),\n" " (1005, 1005),\n" " (1006, 1006),\n" " (1007, 1007),\n" " (1008, 1008),\n" " (1009, 1009)])", ), (a, "OrderedDict([('key', OrderedDict(...))])"), ] for obj, expected in cases: assert pretty.pretty(obj) == expected def test_collections_deque(): # Create deque with cycle a = deque() a.append(a) cases = [ (deque(), "deque([])"), (deque([1, 2, 3]), "deque([1, 2, 3])"), ( deque(i for i in range(1000, 1020)), "deque([1000,\n" " 1001,\n" " 1002,\n" " 1003,\n" " 1004,\n" " 1005,\n" " 1006,\n" " 1007,\n" " 1008,\n" " 1009,\n" " 1010,\n" " 1011,\n" " 1012,\n" " 1013,\n" " 1014,\n" " 1015,\n" " 1016,\n" " 1017,\n" " 1018,\n" " 1019])", ), (a, "deque([deque(...)])"), ] for obj, expected in cases: assert pretty.pretty(obj) == expected def test_collections_counter(): class MyCounter(Counter): pass cases = [ (Counter(), "Counter()"), (Counter(a=1), "Counter({'a': 1})"), (MyCounter(a=1), "MyCounter({'a': 1})"), ] for obj, expected in cases: assert pretty.pretty(obj) == expected def test_cyclic_list(): x = [] x.append(x) assert pretty.pretty(x) == "[[...]]" def test_cyclic_dequeue(): x = deque() x.append(x) assert pretty.pretty(x) == "deque([deque(...)])"
MetaClass
python
django-haystack__django-haystack
test_haystack/test_app_using_appconfig/apps.py
{ "start": 36, "end": 179 }
class ____(AppConfig): name = "test_haystack.test_app_using_appconfig" verbose_name = "Simple test app using AppConfig"
SimpleTestAppConfig
python
lxml__lxml
src/lxml/html/__init__.py
{ "start": 51786, "end": 53056 }
class ____(list): """ Represents a group of checkboxes (``<input type=checkbox>``) that have the same name. In addition to using this like a list, the ``.value`` attribute returns a set-like object that you can add to or remove from to check and uncheck checkboxes. You can also use ``.value_options`` to get the possible values. """ @property def value(self): """ Return a set-like object that can be modified to check or uncheck individual checkboxes according to their value. """ return CheckboxValues(self) @value.setter def value(self, value): values = self.value values.clear() if not hasattr(value, '__iter__'): raise ValueError( "A CheckboxGroup (name=%r) must be set to a sequence (not %r)" % (self[0].name, value)) values.update(value) @value.deleter def value(self): self.value.clear() @property def value_options(self): """ Returns a list of all the possible values. """ return [el.get('value') for el in self] def __repr__(self): return '%s(%s)' % ( self.__class__.__name__, list.__repr__(self))
CheckboxGroup
python
cherrypy__cherrypy
cherrypy/_cpwsgi.py
{ "start": 8832, "end": 14956 }
class ____(object): """WSGI response iterable for CherryPy applications.""" def __init__(self, environ, start_response, cpapp): """Initialize the WSGI app response.""" self.cpapp = cpapp try: self.environ = environ self.run() r = _cherrypy.serving.response outstatus = r.output_status if not isinstance(outstatus, bytes): raise TypeError('response.output_status is not a byte string.') outheaders = [] for k, v in r.header_list: if not isinstance(k, bytes): tmpl = 'response.header_list key %r is not a byte string.' raise TypeError(tmpl % k) if not isinstance(v, bytes): tmpl = ( 'response.header_list value %r is not a byte string.' ) raise TypeError(tmpl % v) outheaders.append((k, v)) if True: # According to PEP 3333, when using Python 3, the response # status and headers must be bytes masquerading as unicode; # that is, they must be of type "str" but are restricted to # code points in the "latin-1" set. outstatus = outstatus.decode('ISO-8859-1') outheaders = [ (k.decode('ISO-8859-1'), v.decode('ISO-8859-1')) for k, v in outheaders ] self.iter_response = iter(r.body) self.write = start_response(outstatus, outheaders) except BaseException: self.close() raise def __iter__(self): """Make an app response iterator.""" return self def __next__(self): """Iterate over the app response.""" return next(self.iter_response) def close(self): """Close and de-reference the current request and response. (Core) """ streaming = _cherrypy.serving.response.stream self.cpapp.release_serving() # We avoid the expense of examining the iterator to see if it's # closable unless we are streaming the response, as that's the # only situation where we are going to have an iterator which # may not have been exhausted yet. if streaming and is_closable_iterator(self.iter_response): iter_close = self.iter_response.close try: iter_close() except Exception: _cherrypy.log(traceback=True, severity=40) def run(self): """Create a Request object using environ.""" env = self.environ.get local = httputil.Host( '', int(env('SERVER_PORT', 80) or -1), env('SERVER_NAME', ''), ) remote = httputil.Host( env('REMOTE_ADDR', ''), int(env('REMOTE_PORT', -1) or -1), env('REMOTE_HOST', ''), ) scheme = env('wsgi.url_scheme') sproto = env('ACTUAL_SERVER_PROTOCOL', 'HTTP/1.1') request, resp = self.cpapp.get_serving(local, remote, scheme, sproto) # LOGON_USER is served by IIS, and is the name of the # user after having been mapped to a local account. # Both IIS and Apache set REMOTE_USER, when possible. request.login = env('LOGON_USER') or env('REMOTE_USER') or None request.multithread = self.environ['wsgi.multithread'] request.multiprocess = self.environ['wsgi.multiprocess'] request.wsgi_environ = self.environ request.prev = env('cherrypy.previous_request', None) meth = self.environ['REQUEST_METHOD'] path = httputil.urljoin( self.environ.get('SCRIPT_NAME', ''), self.environ.get('PATH_INFO', ''), ) qs = self.environ.get('QUERY_STRING', '') path, qs = self.recode_path_qs(path, qs) or (path, qs) rproto = self.environ.get('SERVER_PROTOCOL') headers = self.translate_headers(self.environ) rfile = self.environ['wsgi.input'] request.run(meth, path, qs, rproto, headers, rfile) headerNames = { 'HTTP_CGI_AUTHORIZATION': 'Authorization', 'CONTENT_LENGTH': 'Content-Length', 'CONTENT_TYPE': 'Content-Type', 'REMOTE_HOST': 'Remote-Host', 'REMOTE_ADDR': 'Remote-Addr', } def recode_path_qs(self, path, qs): """Recode app response path query string.""" # This isn't perfect; if the given PATH_INFO is in the # wrong encoding, it may fail to match the appropriate config # section URI. But meh. old_enc = self.environ.get('wsgi.url_encoding', 'ISO-8859-1') new_enc = self.cpapp.find_config( self.environ.get('PATH_INFO', ''), 'request.uri_encoding', 'utf-8', ) if new_enc.lower() == old_enc.lower(): return # Even though the path and qs are unicode, the WSGI server # is required by PEP 3333 to coerce them to ISO-8859-1 # masquerading as unicode. So we have to encode back to # bytes and then decode again using the "correct" encoding. try: return ( path.encode(old_enc).decode(new_enc), qs.encode(old_enc).decode(new_enc), ) except (UnicodeEncodeError, UnicodeDecodeError): # Just pass them through without transcoding and hope. pass def translate_headers(self, environ): """Translate CGI-environ header names to HTTP header names.""" for cgiName in environ: # We assume all incoming header keys are uppercase already. if cgiName in self.headerNames: yield self.headerNames[cgiName], environ[cgiName] elif cgiName[:5] == 'HTTP_': # Hackish attempt at recovering original header names. translatedHeader = cgiName[5:].replace('_', '-') yield translatedHeader, environ[cgiName]
AppResponse
python
openai__openai-python
src/openai/_module_client.py
{ "start": 1877, "end": 2004 }
class ____(LazyProxy["Images"]): @override def __load__(self) -> Images: return _load_client().images
ImagesProxy
python
Unity-Technologies__ml-agents
ml-agents-envs/mlagents_envs/environment.py
{ "start": 1773, "end": 23093 }
class ____(BaseEnv): # Communication protocol version. # When connecting to C#, this must be compatible with Academy.k_ApiVersion. # We follow semantic versioning on the communication version, so existing # functionality will work as long the major versions match. # This should be changed whenever a change is made to the communication protocol. # Revision history: # * 1.0.0 - initial version # * 1.1.0 - support concatenated PNGs for compressed observations. # * 1.2.0 - support compression mapping for stacked compressed observations. # * 1.3.0 - support action spaces with both continuous and discrete actions. # * 1.4.0 - support training analytics sent from python trainer to the editor. # * 1.5.0 - support variable length observation training and multi-agent groups. API_VERSION = "1.5.0" # Default port that the editor listens on. If an environment executable # isn't specified, this port will be used. DEFAULT_EDITOR_PORT = 5004 # Default base port for environments. Each environment will be offset from this # by it's worker_id. BASE_ENVIRONMENT_PORT = 5005 # Command line argument used to pass the port to the executable environment. _PORT_COMMAND_LINE_ARG = "--mlagents-port" @staticmethod def _raise_version_exception(unity_com_ver: str) -> None: raise UnityEnvironmentException( f"The communication API version is not compatible between Unity and python. " f"Python API: {UnityEnvironment.API_VERSION}, Unity API: {unity_com_ver}.\n " f"Please find the versions that work best together from our release page.\n" "https://github.com/Unity-Technologies/ml-agents/releases" ) @staticmethod def _check_communication_compatibility( unity_com_ver: str, python_api_version: str, unity_package_version: str ) -> bool: unity_communicator_version = StrictVersion(unity_com_ver) api_version = StrictVersion(python_api_version) if unity_communicator_version.version[0] == 0: if ( unity_communicator_version.version[0] != api_version.version[0] or unity_communicator_version.version[1] != api_version.version[1] ): # Minor beta versions differ. return False elif unity_communicator_version.version[0] != api_version.version[0]: # Major versions mismatch. return False else: # Major versions match, so either: # 1) The versions are identical, in which case there's no compatibility issues # 2) The Unity version is newer, in which case we'll warn or fail on the Unity side if trying to use # unsupported features # 3) The trainer version is newer, in which case new trainer features might be available but unused by C# # In any of the cases, there's no reason to warn about mismatch here. logger.info( f"Connected to Unity environment with package version {unity_package_version} " f"and communication version {unity_com_ver}" ) return True @staticmethod def _get_capabilities_proto() -> UnityRLCapabilitiesProto: capabilities = UnityRLCapabilitiesProto() capabilities.baseRLCapabilities = True capabilities.concatenatedPngObservations = True capabilities.compressedChannelMapping = True capabilities.hybridActions = True capabilities.trainingAnalytics = True capabilities.variableLengthObservation = True capabilities.multiAgentGroups = True return capabilities @staticmethod def _warn_csharp_base_capabilities( caps: UnityRLCapabilitiesProto, unity_package_ver: str, python_package_ver: str ) -> None: if not caps.baseRLCapabilities: logger.warning( "WARNING: The Unity process is not running with the expected base Reinforcement Learning" " capabilities. Please be sure upgrade the Unity Package to a version that is compatible with this " "python package.\n" f"Python package version: {python_package_ver}, C# package version: {unity_package_ver}" f"Please find the versions that work best together from our release page.\n" "https://github.com/Unity-Technologies/ml-agents/releases" ) def __init__( self, file_name: Optional[str] = None, worker_id: int = 0, base_port: Optional[int] = None, seed: int = 0, no_graphics: bool = False, no_graphics_monitor: bool = False, timeout_wait: int = 60, additional_args: Optional[List[str]] = None, side_channels: Optional[List[SideChannel]] = None, log_folder: Optional[str] = None, num_areas: int = 1, ): """ Starts a new unity environment and establishes a connection with the environment. Notice: Currently communication between Unity and Python takes place over an open socket without authentication. Ensure that the network where training takes place is secure. :string file_name: Name of Unity environment binary. :int base_port: Baseline port number to connect to Unity environment over. worker_id increments over this. If no environment is specified (i.e. file_name is None), the DEFAULT_EDITOR_PORT will be used. :int worker_id: Offset from base_port. Used for training multiple environments simultaneously. :bool no_graphics: Whether to run the Unity simulator in no-graphics mode :bool no_graphics_monitor: Whether to run the main worker in graphics mode, with the remaining in no-graphics mode :int timeout_wait: Time (in seconds) to wait for connection from environment. :list args: Addition Unity command line arguments :list side_channels: Additional side channel for no-rl communication with Unity :str log_folder: Optional folder to write the Unity Player log file into. Requires absolute path. """ atexit.register(self._close) self._additional_args = additional_args or [] self._no_graphics = no_graphics or no_graphics_monitor and worker_id != 0 # If base port is not specified, use BASE_ENVIRONMENT_PORT if we have # an environment, otherwise DEFAULT_EDITOR_PORT if base_port is None: base_port = ( self.BASE_ENVIRONMENT_PORT if file_name else self.DEFAULT_EDITOR_PORT ) self._port = base_port + worker_id self._buffer_size = 12000 # If true, this means the environment was successfully loaded self._loaded = False # The process that is started. If None, no process was started self._process: Optional[subprocess.Popen] = None self._timeout_wait: int = timeout_wait self._communicator = self._get_communicator(worker_id, base_port, timeout_wait) self._worker_id = worker_id if side_channels is None: side_channels = [] default_training_side_channel: Optional[ DefaultTrainingAnalyticsSideChannel ] = None if DefaultTrainingAnalyticsSideChannel.CHANNEL_ID not in [ _.channel_id for _ in side_channels ]: default_training_side_channel = DefaultTrainingAnalyticsSideChannel() side_channels.append(default_training_side_channel) self._side_channel_manager = SideChannelManager(side_channels) self._log_folder = log_folder self.academy_capabilities: UnityRLCapabilitiesProto = None # type: ignore # If the environment name is None, a new environment will not be launched # and the communicator will directly try to connect to an existing unity environment. # If the worker-id is not 0 and the environment name is None, an error is thrown if file_name is None and worker_id != 0: raise UnityEnvironmentException( "If the environment name is None, " "the worker-id must be 0 in order to connect with the Editor." ) if file_name is not None: try: self._process = env_utils.launch_executable( file_name, self._executable_args() ) except UnityEnvironmentException: self._close(0) raise else: logger.info( f"Listening on port {self._port}. " f"Start training by pressing the Play button in the Unity Editor." ) self._loaded = True rl_init_parameters_in = UnityRLInitializationInputProto( seed=seed, communication_version=self.API_VERSION, package_version=mlagents_envs.__version__, capabilities=UnityEnvironment._get_capabilities_proto(), num_areas=num_areas, ) try: aca_output = self._send_academy_parameters(rl_init_parameters_in) aca_params = aca_output.rl_initialization_output except UnityTimeOutException: self._close(0) raise if not UnityEnvironment._check_communication_compatibility( aca_params.communication_version, UnityEnvironment.API_VERSION, aca_params.package_version, ): self._close(0) UnityEnvironment._raise_version_exception(aca_params.communication_version) UnityEnvironment._warn_csharp_base_capabilities( aca_params.capabilities, aca_params.package_version, UnityEnvironment.API_VERSION, ) self._env_state: Dict[str, Tuple[DecisionSteps, TerminalSteps]] = {} self._env_specs: Dict[str, BehaviorSpec] = {} self._env_actions: Dict[str, ActionTuple] = {} self._is_first_message = True self._update_behavior_specs(aca_output) self.academy_capabilities = aca_params.capabilities if default_training_side_channel is not None: default_training_side_channel.environment_initialized() @staticmethod def _get_communicator(worker_id, base_port, timeout_wait): return RpcCommunicator(worker_id, base_port, timeout_wait) def _executable_args(self) -> List[str]: args: List[str] = [] if self._no_graphics: args += ["-nographics", "-batchmode"] args += [UnityEnvironment._PORT_COMMAND_LINE_ARG, str(self._port)] # If the logfile arg isn't already set in the env args, # try to set it to an output directory logfile_set = "-logfile" in (arg.lower() for arg in self._additional_args) if self._log_folder and not logfile_set: log_file_path = os.path.join( self._log_folder, f"Player-{self._worker_id}.log" ) args += ["-logFile", log_file_path] # Add in arguments passed explicitly by the user. args += self._additional_args return args def _update_behavior_specs(self, output: UnityOutputProto) -> None: init_output = output.rl_initialization_output for brain_param in init_output.brain_parameters: # Each BrainParameter in the rl_initialization_output should have at least one AgentInfo # Get that agent, because we need some of its observations. agent_infos = output.rl_output.agentInfos[brain_param.brain_name] if agent_infos.value: agent = agent_infos.value[0] new_spec = behavior_spec_from_proto(brain_param, agent) self._env_specs[brain_param.brain_name] = new_spec logger.info(f"Connected new brain: {brain_param.brain_name}") def _update_state(self, output: UnityRLOutputProto) -> None: """ Collects experience information from all external brains in environment at current step. """ for brain_name in self._env_specs.keys(): if brain_name in output.agentInfos: agent_info_list = output.agentInfos[brain_name].value self._env_state[brain_name] = steps_from_proto( agent_info_list, self._env_specs[brain_name] ) else: self._env_state[brain_name] = ( DecisionSteps.empty(self._env_specs[brain_name]), TerminalSteps.empty(self._env_specs[brain_name]), ) self._side_channel_manager.process_side_channel_message(output.side_channel) def reset(self) -> None: if self._loaded: outputs = self._communicator.exchange( self._generate_reset_input(), self._poll_process ) if outputs is None: raise UnityCommunicatorStoppedException("Communicator has exited.") self._update_behavior_specs(outputs) rl_output = outputs.rl_output self._update_state(rl_output) self._is_first_message = False self._env_actions.clear() else: raise UnityEnvironmentException("No Unity environment is loaded.") @timed def step(self) -> None: if self._is_first_message: return self.reset() if not self._loaded: raise UnityEnvironmentException("No Unity environment is loaded.") # fill the blanks for missing actions for group_name in self._env_specs: if group_name not in self._env_actions: n_agents = 0 if group_name in self._env_state: n_agents = len(self._env_state[group_name][0]) self._env_actions[group_name] = self._env_specs[ group_name ].action_spec.empty_action(n_agents) step_input = self._generate_step_input(self._env_actions) with hierarchical_timer("communicator.exchange"): outputs = self._communicator.exchange(step_input, self._poll_process) if outputs is None: raise UnityCommunicatorStoppedException("Communicator has exited.") self._update_behavior_specs(outputs) rl_output = outputs.rl_output self._update_state(rl_output) self._env_actions.clear() @property def behavior_specs(self) -> MappingType[str, BehaviorSpec]: return BehaviorMapping(self._env_specs) def _assert_behavior_exists(self, behavior_name: str) -> None: if behavior_name not in self._env_specs: raise UnityActionException( f"The group {behavior_name} does not correspond to an existing " f"agent group in the environment" ) def set_actions(self, behavior_name: BehaviorName, action: ActionTuple) -> None: self._assert_behavior_exists(behavior_name) if behavior_name not in self._env_state: return action_spec = self._env_specs[behavior_name].action_spec num_agents = len(self._env_state[behavior_name][0]) action = action_spec._validate_action(action, num_agents, behavior_name) self._env_actions[behavior_name] = action def set_action_for_agent( self, behavior_name: BehaviorName, agent_id: AgentId, action: ActionTuple ) -> None: self._assert_behavior_exists(behavior_name) if behavior_name not in self._env_state: return action_spec = self._env_specs[behavior_name].action_spec action = action_spec._validate_action(action, 1, behavior_name) if behavior_name not in self._env_actions: num_agents = len(self._env_state[behavior_name][0]) self._env_actions[behavior_name] = action_spec.empty_action(num_agents) try: index = np.where(self._env_state[behavior_name][0].agent_id == agent_id)[0][ 0 ] except IndexError as ie: raise IndexError( "agent_id {} is did not request a decision at the previous step".format( agent_id ) ) from ie if action_spec.continuous_size > 0: self._env_actions[behavior_name].continuous[index] = action.continuous[0, :] if action_spec.discrete_size > 0: self._env_actions[behavior_name].discrete[index] = action.discrete[0, :] def get_steps( self, behavior_name: BehaviorName ) -> Tuple[DecisionSteps, TerminalSteps]: self._assert_behavior_exists(behavior_name) return self._env_state[behavior_name] def _poll_process(self) -> None: """ Check the status of the subprocess. If it has exited, raise a UnityEnvironmentException :return: None """ if not self._process: return poll_res = self._process.poll() if poll_res is not None: exc_msg = self._returncode_to_env_message(self._process.returncode) raise UnityEnvironmentException(exc_msg) def close(self): """ Sends a shutdown signal to the unity environment, and closes the socket connection. """ if self._loaded: self._close() else: raise UnityEnvironmentException("No Unity environment is loaded.") def _close(self, timeout: Optional[int] = None) -> None: """ Close the communicator and environment subprocess (if necessary). :int timeout: [Optional] Number of seconds to wait for the environment to shut down before force-killing it. Defaults to `self.timeout_wait`. """ if timeout is None: timeout = self._timeout_wait self._loaded = False self._communicator.close() if self._process is not None: # Wait a bit for the process to shutdown, but kill it if it takes too long try: self._process.wait(timeout=timeout) logger.debug(self._returncode_to_env_message(self._process.returncode)) except subprocess.TimeoutExpired: logger.warning("Environment timed out shutting down. Killing...") self._process.kill() # Set to None so we don't try to close multiple times. self._process = None @timed def _generate_step_input( self, vector_action: Dict[str, ActionTuple] ) -> UnityInputProto: rl_in = UnityRLInputProto() for b in vector_action: n_agents = len(self._env_state[b][0]) if n_agents == 0: continue for i in range(n_agents): action = AgentActionProto() if vector_action[b].continuous is not None: action.vector_actions_deprecated.extend( vector_action[b].continuous[i] ) action.continuous_actions.extend(vector_action[b].continuous[i]) if vector_action[b].discrete is not None: action.vector_actions_deprecated.extend( vector_action[b].discrete[i] ) action.discrete_actions.extend(vector_action[b].discrete[i]) rl_in.agent_actions[b].value.extend([action]) rl_in.command = STEP rl_in.side_channel = bytes( self._side_channel_manager.generate_side_channel_messages() ) return self._wrap_unity_input(rl_in) def _generate_reset_input(self) -> UnityInputProto: rl_in = UnityRLInputProto() rl_in.command = RESET rl_in.side_channel = bytes( self._side_channel_manager.generate_side_channel_messages() ) return self._wrap_unity_input(rl_in) def _send_academy_parameters( self, init_parameters: UnityRLInitializationInputProto ) -> UnityOutputProto: inputs = UnityInputProto() inputs.rl_initialization_input.CopyFrom(init_parameters) return self._communicator.initialize(inputs, self._poll_process) @staticmethod def _wrap_unity_input(rl_input: UnityRLInputProto) -> UnityInputProto: result = UnityInputProto() result.rl_input.CopyFrom(rl_input) return result @staticmethod def _returncode_to_signal_name(returncode: int) -> Optional[str]: """ Try to convert return codes into their corresponding signal name. E.g. returncode_to_signal_name(-2) -> "SIGINT" """ try: # A negative value -N indicates that the child was terminated by signal N (POSIX only). s = signal.Signals(-returncode) return s.name except Exception: # Should generally be a ValueError, but catch everything just in case. return None @staticmethod def _returncode_to_env_message(returncode: int) -> str: signal_name = UnityEnvironment._returncode_to_signal_name(returncode) signal_name = f" ({signal_name})" if signal_name else "" return f"Environment shut down with return code {returncode}{signal_name}."
UnityEnvironment
python
explosion__spaCy
spacy/schemas.py
{ "start": 17198, "end": 17287 }
class ____(BaseModel): class Config: extra = "forbid"
ConfigSchemaPretrainEmpty
python
scipy__scipy
scipy/optimize/tests/test_cobyla.py
{ "start": 228, "end": 5691 }
class ____: def setup_method(self): # The algorithm is very fragile on 32 bit, so unfortunately we need to start # very near the solution in order for the test to pass. self.x0 = [np.sqrt(25 - (2.0/3)**2), 2.0/3 + 1e-4] self.solution = [math.sqrt(25 - (2.0/3)**2), 2.0/3] self.opts = {'disp': 0, 'rhobeg': 1, 'tol': 1e-6, 'maxiter': 100} def fun(self, x): return x[0]**2 + abs(x[1])**3 def con1(self, x): return x[0]**2 + x[1]**2 - 25 def con2(self, x): return -self.con1(x) def test_simple(self): # use disp=True as smoke test for gh-8118 x = fmin_cobyla(self.fun, self.x0, [self.con1, self.con2], rhobeg=1, rhoend=1e-5, maxfun=100, disp=1) assert_allclose(x, self.solution, atol=1e-4) def test_minimize_simple(self): class Callback: def __init__(self): self.n_calls = 0 self.last_x = None def __call__(self, x): self.n_calls += 1 self.last_x = x class CallbackNewSyntax: def __init__(self): self.n_calls = 0 def __call__(self, intermediate_result): assert isinstance(intermediate_result, OptimizeResult) self.n_calls += 1 callback = Callback() callback_new_syntax = CallbackNewSyntax() # Minimize with method='COBYLA' cons = (NonlinearConstraint(self.con1, 0, np.inf), {'type': 'ineq', 'fun': self.con2}) sol = minimize(self.fun, self.x0, method='cobyla', constraints=cons, callback=callback, options=self.opts) sol_new = minimize(self.fun, self.x0, method='cobyla', constraints=cons, callback=callback_new_syntax, options=self.opts) assert_allclose(sol.x, self.solution, atol=1e-4) assert sol.success, sol.message assert sol.maxcv < 1e-5, sol assert sol.nfev < 70, sol assert sol.fun < self.fun(self.solution) + 1e-3, sol assert_array_almost_equal( sol.x, callback.last_x, decimal=5, err_msg="Last design vector sent to the callback is not equal to" " returned value.", ) assert sol_new.success, sol_new.message assert sol.fun == sol_new.fun assert sol.maxcv == sol_new.maxcv assert sol.nfev == sol_new.nfev assert callback.n_calls == callback_new_syntax.n_calls, \ "Callback is not called the same amount of times for old and new syntax." def test_minimize_constraint_violation(self): # We set up conflicting constraints so that the algorithm will be # guaranteed to end up with maxcv > 0. cons = ({'type': 'ineq', 'fun': lambda x: 4 - x}, {'type': 'ineq', 'fun': lambda x: x - 5}) sol = minimize(lambda x: x, [0], method='cobyla', constraints=cons, options={'catol': 0.6}) assert sol.maxcv > 0.1 assert sol.success sol = minimize(lambda x: x, [0], method='cobyla', constraints=cons, options={'catol': 0.4}) assert sol.maxcv > 0.1 assert not sol.success def test_f_target(self): f_target = 250 sol = minimize(lambda x: x**2, [500], method='cobyla', options={'f_target': f_target}) assert sol.status == 1 assert sol.success assert sol.fun <= f_target def test_minimize_linear_constraints(self): constraints = LinearConstraint([1.0, 1.0], 1.0, 1.0) sol = minimize( self.fun, self.x0, method='cobyla', constraints=constraints, options=self.opts, ) solution = [(4 - np.sqrt(7)) / 3, (np.sqrt(7) - 1) / 3] assert_allclose(sol.x, solution, atol=1e-4) assert sol.success, sol.message assert sol.maxcv < 1e-8, sol assert sol.nfev <= 100, sol assert sol.fun < self.fun(solution) + 1e-3, sol def test_vector_constraints(): # test that fmin_cobyla and minimize can take a combination # of constraints, some returning a number and others an array def fun(x): return (x[0] - 1)**2 + (x[1] - 2.5)**2 def fmin(x): return fun(x) - 1 def cons1(x): a = np.array([[1, -2, 2], [-1, -2, 6], [-1, 2, 2]]) return np.array([a[i, 0] * x[0] + a[i, 1] * x[1] + a[i, 2] for i in range(len(a))]) def cons2(x): return x # identity, acts as bounds x > 0 x0 = np.array([2, 0]) cons_list = [fun, cons1, cons2] xsol = [1.4, 1.7] fsol = 0.8 # testing fmin_cobyla sol = fmin_cobyla(fun, x0, cons_list, rhoend=1e-5) assert_allclose(sol, xsol, atol=1e-4) sol = fmin_cobyla(fun, x0, fmin, rhoend=1e-5) assert_allclose(fun(sol), 1, atol=1e-4) # testing minimize constraints = [{'type': 'ineq', 'fun': cons} for cons in cons_list] sol = minimize(fun, x0, constraints=constraints, tol=1e-5) assert_allclose(sol.x, xsol, atol=1e-4) assert sol.success, sol.message assert_allclose(sol.fun, fsol, atol=1e-4) constraints = {'type': 'ineq', 'fun': fmin} sol = minimize(fun, x0, constraints=constraints, tol=1e-5) assert_allclose(sol.fun, 1, atol=1e-4)
TestCobyla
python
walkccc__LeetCode
solutions/1318. Minimum Flips to Make a OR b Equal to c/1318.py
{ "start": 0, "end": 296 }
class ____: def minFlips(self, a: int, b: int, c: int) -> int: MAX_BIT = 30 ans = 0 for i in range(MAX_BIT): if c >> i & 1: ans += (a >> i & 1) == 0 and (b >> i & 1) == 0 else: # (c >> i & 1) == 0 ans += (a >> i & 1) + (b >> i & 1) return ans
Solution
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/ext/automap.py
{ "start": 30703, "end": 31864 }
class ____(Protocol): def __call__( self, base: Type[Any], local_cls: Type[Any], referred_cls: Type[Any], constraint: ForeignKeyConstraint, ) -> str: ... def name_for_collection_relationship( base: Type[Any], local_cls: Type[Any], referred_cls: Type[Any], constraint: ForeignKeyConstraint, ) -> str: """Return the attribute name that should be used to refer from one class to another, for a collection reference. The default implementation is:: return referred_cls.__name__.lower() + "_collection" Alternate implementations can be specified using the :paramref:`.AutomapBase.prepare.name_for_collection_relationship` parameter. :param base: the :class:`.AutomapBase` class doing the prepare. :param local_cls: the class to be mapped on the local side. :param referred_cls: the class to be mapped on the referring side. :param constraint: the :class:`_schema.ForeignKeyConstraint` that is being inspected to produce this relationship. """ return referred_cls.__name__.lower() + "_collection"
NameForCollectionRelationshipType
python
run-llama__llama_index
llama-index-integrations/embeddings/llama-index-embeddings-oracleai/llama_index/embeddings/oracleai/base.py
{ "start": 643, "end": 6511 }
class ____(BaseEmbedding): """Get Embeddings.""" _conn: Any = PrivateAttr() _params: Dict[str, Any] = PrivateAttr() _proxy: Optional[str] = PrivateAttr() def __init__( self, conn: Connection, params: Dict[str, Any], proxy: Optional[str] = None, **kwargs: Any, ): super().__init__(**kwargs) self._conn = conn self._proxy = proxy self._params = params @classmethod def class_name(self) -> str: return "OracleEmbeddings" @staticmethod def load_onnx_model(conn: Connection, dir: str, onnx_file: str, model_name: str): """ Load an ONNX model to Oracle Database. Args: conn: Oracle Connection, dir: Oracle Directory, onnx_file: ONNX file name, model_name: Name of the model. Note: user needs to have create procedure, create mining model, create any directory privilege. """ try: if conn is None or dir is None or onnx_file is None or model_name is None: raise Exception("Invalid input") cursor = conn.cursor() cursor.execute( """ begin dbms_data_mining.drop_model(model_name => :model, force => true); SYS.DBMS_VECTOR.load_onnx_model(:path, :filename, :model, json('{"function" : "embedding", "embeddingOutput" : "embedding" , "input": {"input": ["DATA"]}}')); end;""", path=dir, filename=onnx_file, model=model_name, ) cursor.close() except Exception as ex: print(f"An exception occurred :: {ex}") cursor.close() raise def _get_embedding(self, text: str) -> List[float]: try: import oracledb except ImportError as e: raise ImportError( "Unable to import oracledb, please install with " "`pip install -U oracledb`." ) from e if text is None: return None embedding = None try: oracledb.defaults.fetch_lobs = False cursor = self._conn.cursor() if self._proxy: cursor.execute( "begin utl_http.set_proxy(:proxy); end;", proxy=self._proxy ) cursor.execute( "select t.* from dbms_vector_chain.utl_to_embeddings(:content, json(:params)) t", content=text, params=json.dumps(self._params), ) row = cursor.fetchone() if row is None: embedding = [] else: rdata = json.loads(row[0]) # dereference string as array embedding = json.loads(rdata["embed_vector"]) cursor.close() return embedding except Exception as ex: print(f"An exception occurred :: {ex}") cursor.close() raise def _get_embeddings(self, texts: List[str]) -> List[List[float]]: """ Compute doc embeddings using an OracleEmbeddings. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each input text. """ try: import oracledb except ImportError as e: raise ImportError( "Unable to import oracledb, please install with " "`pip install -U oracledb`." ) from e if texts is None: return None embeddings: List[List[float]] = [] try: # returns strings or bytes instead of a locator oracledb.defaults.fetch_lobs = False cursor = self._conn.cursor() if self._proxy: cursor.execute( "begin utl_http.set_proxy(:proxy); end;", proxy=self._proxy ) chunks = [] for i, text in enumerate(texts, start=1): chunk = {"chunk_id": i, "chunk_data": text} chunks.append(json.dumps(chunk)) vector_array_type = self._conn.gettype("SYS.VECTOR_ARRAY_T") inputs = vector_array_type.newobject(chunks) cursor.execute( "select t.* " + "from dbms_vector_chain.utl_to_embeddings(:content, " + "json(:params)) t", content=inputs, params=json.dumps(self._params), ) for row in cursor: if row is None: embeddings.append([]) else: rdata = json.loads(row[0]) # dereference string as array vec = json.loads(rdata["embed_vector"]) embeddings.append(vec) cursor.close() return embeddings except Exception as ex: print(f"An exception occurred :: {ex}") cursor.close() raise def _get_query_embedding(self, query: str) -> List[float]: return self._get_embedding(query) async def _aget_query_embedding(self, query: str) -> List[float]: return self._get_query_embedding(query) def _get_text_embedding(self, text: str) -> List[float]: return self._get_embedding(text) async def _aget_text_embedding(self, text: str) -> List[float]: return self._get_text_embedding(text) def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]: return self._get_embeddings(texts) async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]: return self._get_text_embeddings(texts)
OracleEmbeddings