language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
openai__openai-python
|
src/openai/resources/beta/assistants.py
|
{
"start": 1300,
"end": 23090
}
|
class ____(SyncAPIResource):
@cached_property
def with_raw_response(self) -> AssistantsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AssistantsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AssistantsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AssistantsWithStreamingResponse(self)
def create(
self,
*,
model: Union[str, ChatModel],
description: Optional[str] | Omit = omit,
instructions: Optional[str] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
name: Optional[str] | Omit = omit,
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
temperature: Optional[float] | Omit = omit,
tool_resources: Optional[assistant_create_params.ToolResources] | Omit = omit,
tools: Iterable[AssistantToolParam] | Omit = omit,
top_p: Optional[float] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Assistant:
"""
Create an assistant with a model and instructions.
Args:
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
[Model overview](https://platform.openai.com/docs/models) for descriptions of
them.
description: The description of the assistant. The maximum length is 512 characters.
instructions: The system instructions that the assistant uses. The maximum length is 256,000
characters.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
name: The name of the assistant. The maximum length is 256 characters.
reasoning_effort: Constrains effort on reasoning for
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
reasoning effort can result in faster responses and fewer tokens used on
reasoning in a response.
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
calls are supported for all reasoning values in gpt-5.1.
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
support `none`.
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
response_format: Specifies the format that the model must output. Compatible with
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
[GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
Outputs which ensures the model will match your supplied JSON schema. Learn more
in the
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
message the model generates is valid JSON.
**Important:** when using JSON mode, you **must** also instruct the model to
produce JSON yourself via a system or user message. Without this, the model may
generate an unending stream of whitespace until the generation reaches the token
limit, resulting in a long-running and seemingly "stuck" request. Also note that
the message content may be partially cut off if `finish_reason="length"`, which
indicates the generation exceeded `max_tokens` or the conversation exceeded the
max context length.
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
make the output more random, while lower values like 0.2 will make it more
focused and deterministic.
tool_resources: A set of resources that are used by the assistant's tools. The resources are
specific to the type of tool. For example, the `code_interpreter` tool requires
a list of file IDs, while the `file_search` tool requires a list of vector store
IDs.
tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per
assistant. Tools can be of types `code_interpreter`, `file_search`, or
`function`.
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or temperature but not both.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
"/assistants",
body=maybe_transform(
{
"model": model,
"description": description,
"instructions": instructions,
"metadata": metadata,
"name": name,
"reasoning_effort": reasoning_effort,
"response_format": response_format,
"temperature": temperature,
"tool_resources": tool_resources,
"tools": tools,
"top_p": top_p,
},
assistant_create_params.AssistantCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Assistant,
)
def retrieve(
self,
assistant_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Assistant:
"""
Retrieves an assistant.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not assistant_id:
raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get(
f"/assistants/{assistant_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Assistant,
)
def update(
self,
assistant_id: str,
*,
description: Optional[str] | Omit = omit,
instructions: Optional[str] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
model: Union[
str,
Literal[
"gpt-5",
"gpt-5-mini",
"gpt-5-nano",
"gpt-5-2025-08-07",
"gpt-5-mini-2025-08-07",
"gpt-5-nano-2025-08-07",
"gpt-4.1",
"gpt-4.1-mini",
"gpt-4.1-nano",
"gpt-4.1-2025-04-14",
"gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano-2025-04-14",
"o3-mini",
"o3-mini-2025-01-31",
"o1",
"o1-2024-12-17",
"gpt-4o",
"gpt-4o-2024-11-20",
"gpt-4o-2024-08-06",
"gpt-4o-2024-05-13",
"gpt-4o-mini",
"gpt-4o-mini-2024-07-18",
"gpt-4.5-preview",
"gpt-4.5-preview-2025-02-27",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-0125-preview",
"gpt-4-turbo-preview",
"gpt-4-1106-preview",
"gpt-4-vision-preview",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
],
]
| Omit = omit,
name: Optional[str] | Omit = omit,
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
temperature: Optional[float] | Omit = omit,
tool_resources: Optional[assistant_update_params.ToolResources] | Omit = omit,
tools: Iterable[AssistantToolParam] | Omit = omit,
top_p: Optional[float] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Assistant:
"""Modifies an assistant.
Args:
description: The description of the assistant.
The maximum length is 512 characters.
instructions: The system instructions that the assistant uses. The maximum length is 256,000
characters.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
model: ID of the model to use. You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
[Model overview](https://platform.openai.com/docs/models) for descriptions of
them.
name: The name of the assistant. The maximum length is 256 characters.
reasoning_effort: Constrains effort on reasoning for
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
reasoning effort can result in faster responses and fewer tokens used on
reasoning in a response.
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
calls are supported for all reasoning values in gpt-5.1.
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
support `none`.
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
response_format: Specifies the format that the model must output. Compatible with
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
[GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
Outputs which ensures the model will match your supplied JSON schema. Learn more
in the
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
message the model generates is valid JSON.
**Important:** when using JSON mode, you **must** also instruct the model to
produce JSON yourself via a system or user message. Without this, the model may
generate an unending stream of whitespace until the generation reaches the token
limit, resulting in a long-running and seemingly "stuck" request. Also note that
the message content may be partially cut off if `finish_reason="length"`, which
indicates the generation exceeded `max_tokens` or the conversation exceeded the
max context length.
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
make the output more random, while lower values like 0.2 will make it more
focused and deterministic.
tool_resources: A set of resources that are used by the assistant's tools. The resources are
specific to the type of tool. For example, the `code_interpreter` tool requires
a list of file IDs, while the `file_search` tool requires a list of vector store
IDs.
tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per
assistant. Tools can be of types `code_interpreter`, `file_search`, or
`function`.
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or temperature but not both.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not assistant_id:
raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
f"/assistants/{assistant_id}",
body=maybe_transform(
{
"description": description,
"instructions": instructions,
"metadata": metadata,
"model": model,
"name": name,
"reasoning_effort": reasoning_effort,
"response_format": response_format,
"temperature": temperature,
"tool_resources": tool_resources,
"tools": tools,
"top_p": top_p,
},
assistant_update_params.AssistantUpdateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Assistant,
)
def list(
self,
*,
after: str | Omit = omit,
before: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[Assistant]:
"""Returns a list of assistants.
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
starting with obj_foo, your subsequent call can include before=obj_foo in order
to fetch the previous page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
"/assistants",
page=SyncCursorPage[Assistant],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"limit": limit,
"order": order,
},
assistant_list_params.AssistantListParams,
),
),
model=Assistant,
)
def delete(
self,
assistant_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AssistantDeleted:
"""
Delete an assistant.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not assistant_id:
raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._delete(
f"/assistants/{assistant_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=AssistantDeleted,
)
|
Assistants
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/storage/compute_log_manager.py
|
{
"start": 2882,
"end": 3891
}
|
class ____(
NamedTuple(
"_CapturedLogMetadata",
[
("stdout_location", Optional[str]),
("stderr_location", Optional[str]),
("stdout_download_url", Optional[str]),
("stderr_download_url", Optional[str]),
],
)
):
"""Object representing metadata info for the captured log data.
It can contain:
- a display string for the location of the log data,
- a URL for direct download of the captured log data.
"""
def __new__(
cls,
stdout_location: Optional[str] = None,
stderr_location: Optional[str] = None,
stdout_download_url: Optional[str] = None,
stderr_download_url: Optional[str] = None,
):
return super().__new__(
cls,
stdout_location=stdout_location,
stderr_location=stderr_location,
stdout_download_url=stdout_download_url,
stderr_download_url=stderr_download_url,
)
|
CapturedLogMetadata
|
python
|
django-crispy-forms__django-crispy-forms
|
crispy_forms/layout.py
|
{
"start": 27145,
"end": 28923
}
|
class ____(Div):
"""
Layout object. It wraps fields in a ``<div>`` and the template adds the
appropriate class to render the contents in a column. e.g. ``col-md`` when
using the Bootstrap4 template pack.
Attributes
----------
template : str
The default template which this Layout Object will be rendered
with.
css_class : str, optional
CSS classes to be applied to the ``<div>``. By default None.
Parameters
----------
*fields : str, LayoutObject
Any number of fields as positional arguments to be rendered within
the ``<div>``.
css_id : str, optional
A DOM id for the layout object which will be added to the ``<div>`` if
provided. By default None.
css_class : str, optional
Additional CSS classes to be applied in addition to those declared by
the class itself. If using the Bootstrap4 template pack the default
``col-md`` is removed if this string contains another ``col-`` class.
By default None.
template : str, optional
Overrides the default template, if provided. By default None.
**kwargs : dict, optional
Additional attributes are passed to ``flatatt`` and converted into
key="value", pairs. These attributes are added to the ``<div>``.
Examples
--------
In your ``Layout`` you can::
Column('form_field_1', 'form_field_2', css_id='col-example')
It is also possible to nest Layout Objects within a Row::
Div(
Column(
Field('form_field', css_class='field-class'),
css_class='col-sm,
),
Column('form_field_2', css_class='col-sm'),
)
"""
template = "%s/layout/column.html"
|
Column
|
python
|
Textualize__textual
|
src/textual/color.py
|
{
"start": 3803,
"end": 20771
}
|
class ____(NamedTuple):
"""A class to represent a color.
Colors are stored as three values representing the degree of red, green, and blue in a color, and a
fourth "alpha" value which defines where the color lies on a gradient of opaque to transparent.
Example:
```python
>>> from textual.color import Color
>>> color = Color.parse("red")
>>> color
Color(255, 0, 0)
>>> color.darken(0.5)
Color(98, 0, 0)
>>> color + Color.parse("green")
Color(0, 128, 0)
>>> color_with_alpha = Color(100, 50, 25, 0.5)
>>> color_with_alpha
Color(100, 50, 25, a=0.5)
>>> color + color_with_alpha
Color(177, 25, 12)
```
"""
r: int
"""Red component in range 0 to 255."""
g: int
"""Green component in range 0 to 255."""
b: int
"""Blue component in range 0 to 255."""
a: float = 1.0
"""Alpha (opacity) component in range 0 to 1."""
ansi: int | None = None
"""ANSI color index. `-1` means default color. `None` if not an ANSI color."""
auto: bool = False
"""Is the color automatic? (automatic colors may be white or black, to provide maximum contrast)"""
@classmethod
def automatic(cls, alpha_percentage: float = 100.0) -> Color:
"""Create an automatic color."""
return cls(0, 0, 0, alpha_percentage / 100.0, auto=True)
@classmethod
@lru_cache(maxsize=1024)
def from_rich_color(
cls, rich_color: RichColor | None, theme: TerminalTheme | None = None
) -> Color:
"""Create a new color from Rich's Color class.
Args:
rich_color: An instance of [Rich color][rich.color.Color].
theme: Optional Rich [terminal theme][rich.terminal_theme.TerminalTheme].
Returns:
A new Color instance.
"""
if rich_color is None:
return TRANSPARENT
r, g, b = rich_color.get_truecolor(theme)
return cls(
r, g, b, ansi=rich_color.number if rich_color.is_system_defined else None
)
@classmethod
def from_hsl(cls, h: float, s: float, l: float) -> Color:
"""Create a color from HSL components.
Args:
h: Hue.
s: Saturation.
l: Lightness.
Returns:
A new color.
"""
r, g, b = hls_to_rgb(h, l, s)
return cls(int(r * 255 + 0.5), int(g * 255 + 0.5), int(b * 255 + 0.5))
@classmethod
def from_hsv(cls, h: float, s: float, v: float) -> Color:
"""Create a color from HSV components.
Args:
h: Hue.
s: Saturation.
v: Value.
Returns:
A new color.
"""
r, g, b = hsv_to_rgb(h, s, v)
return cls(int(r * 255 + 0.5), int(g * 255 + 0.5), int(b * 255 + 0.5))
@property
def inverse(self) -> Color:
"""The inverse of this color.
Returns:
Inverse color.
"""
r, g, b, a, _, _ = self
return Color(255 - r, 255 - g, 255 - b, a)
@property
def is_transparent(self) -> bool:
"""Is the color transparent (i.e. has 0 alpha)?"""
return self.a == 0 and self.ansi is None
@property
def clamped(self) -> Color:
"""A clamped color (this color with all values in expected range)."""
r, g, b, a, ansi, auto = self
_clamp = clamp
color = Color(
_clamp(r, 0, 255),
_clamp(g, 0, 255),
_clamp(b, 0, 255),
_clamp(a, 0.0, 1.0),
ansi,
auto,
)
return color
@property
@lru_cache(1024)
def rich_color(self) -> RichColor:
"""This color encoded in Rich's Color class.
Returns:
A color object as used by Rich.
"""
r, g, b, a, ansi, _ = self
if ansi is not None:
return RichColor.parse("default") if ansi < 0 else RichColor.from_ansi(ansi)
return RichColor(
f"#{r:02x}{g:02x}{b:02x}", _TRUECOLOR, None, ColorTriplet(r, g, b)
)
@property
def normalized(self) -> tuple[float, float, float]:
"""A tuple of the color components normalized to between 0 and 1.
Returns:
Normalized components.
"""
r, g, b, _a, _, _ = self
return (r / 255, g / 255, b / 255)
@property
def rgb(self) -> tuple[int, int, int]:
"""The red, green, and blue color components as a tuple of ints."""
r, g, b, _, _, _ = self
return (r, g, b)
@property
def hsl(self) -> HSL:
"""This color in HSL format.
HSL color is an alternative way of representing a color, which can be used in certain color calculations.
Returns:
Color encoded in HSL format.
"""
r, g, b = self.normalized
h, l, s = rgb_to_hls(r, g, b)
return HSL(h, s, l)
@property
def hsv(self) -> HSV:
"""This color in HSV format.
HSV color is an alternative way of representing a color, which can be used in certain color calculations.
Returns:
Color encoded in HSV format.
"""
r, g, b = self.normalized
h, s, v = rgb_to_hsv(r, g, b)
return HSV(h, s, v)
@property
def brightness(self) -> float:
"""The human perceptual brightness.
A value of 1 is returned for pure white, and 0 for pure black.
Other colors lie on a gradient between the two extremes.
"""
r, g, b = self.normalized
brightness = (299 * r + 587 * g + 114 * b) / 1000
return brightness
@property
def hex(self) -> str:
"""The color in CSS hex form, with 6 digits for RGB, and 8 digits for RGBA.
For example, `"#46B3DE"` for an RGB color, or `"#3342457F"` for a color with alpha.
"""
r, g, b, a, ansi, _ = self.clamped
if ansi is not None:
return "ansi_default" if ansi == -1 else f"ansi_{ANSI_COLORS[ansi]}"
return (
f"#{r:02X}{g:02X}{b:02X}"
if a == 1
else f"#{r:02X}{g:02X}{b:02X}{int(a*255):02X}"
)
@property
def hex6(self) -> str:
"""The color in CSS hex form, with 6 digits for RGB. Alpha is ignored.
For example, `"#46B3DE"`.
"""
r, g, b, _a, _, _ = self.clamped
return f"#{r:02X}{g:02X}{b:02X}"
@property
def css(self) -> str:
"""The color in CSS RGB or RGBA form.
For example, `"rgb(10,20,30)"` for an RGB color, or `"rgb(50,70,80,0.5)"` for an RGBA color.
"""
r, g, b, a, ansi, auto = self
if auto:
alpha_percentage = clamp(a, 0.0, 1.0) * 100.0
if alpha_percentage == 100:
return "auto"
if not alpha_percentage % 1:
return f"auto {int(alpha_percentage)}%"
return f"auto {alpha_percentage:.1f}%"
if ansi is not None:
return "ansi_default" if ansi == -1 else f"ansi_{ANSI_COLORS[ansi]}"
return f"rgb({r},{g},{b})" if a == 1 else f"rgba({r},{g},{b},{a})"
@property
def monochrome(self) -> Color:
"""A monochrome version of this color.
Returns:
The monochrome (black and white) version of this color.
"""
r, g, b, a, _, _ = self
gray = round(r * 0.2126 + g * 0.7152 + b * 0.0722)
return Color(gray, gray, gray, a)
def __rich_repr__(self) -> rich.repr.Result:
r, g, b, a, ansi, auto = self
yield r
yield g
yield b
yield "a", a, 1.0
yield "ansi", ansi, None
yield "auto", auto, False
def with_alpha(self, alpha: float) -> Color:
"""Create a new color with the given alpha.
Args:
alpha: New value for alpha.
Returns:
A new color.
"""
r, g, b, _, _, _ = self
return Color(r, g, b, alpha)
def multiply_alpha(self, alpha: float) -> Color:
"""Create a new color, multiplying the alpha by a constant.
Args:
alpha: A value to multiple the alpha by (expected to be in the range 0 to 1).
Returns:
A new color.
"""
if self.ansi is not None:
return self
r, g, b, a, _ansi, auto = self
return Color(r, g, b, a * alpha, auto=auto)
@lru_cache(maxsize=1024)
def blend(
self, destination: Color, factor: float, alpha: float | None = None
) -> Color:
"""Generate a new color between two colors.
This method calculates a new color on a gradient.
The position on the gradient is given by `factor`, which is a float between 0 and 1, where 0 is the original color, and 1 is the `destination` color.
A value of `gradient` between the two extremes produces a color somewhere between the two end points.
Args:
destination: Another color.
factor: A blend factor, 0 -> 1.
alpha: New alpha for result.
Returns:
A new color.
"""
if destination.auto:
destination = self.get_contrast_text(destination.a)
if destination.ansi is not None:
return destination
if factor <= 0:
return self
elif factor >= 1:
return destination
r1, g1, b1, a1, _, _ = self
r2, g2, b2, a2, _, _ = destination
if alpha is None:
new_alpha = a1 + (a2 - a1) * factor
else:
new_alpha = alpha
return Color(
int(r1 + (r2 - r1) * factor),
int(g1 + (g2 - g1) * factor),
int(b1 + (b2 - b1) * factor),
new_alpha,
)
@lru_cache(maxsize=1024)
def tint(self, color: Color) -> Color:
"""Apply a tint to a color.
Similar to blend, but combines color and alpha.
Args:
color: A color with alpha component.
Returns:
New color
"""
r1, g1, b1, a1, ansi1, _ = self
if ansi1 is not None:
return self
r2, g2, b2, a2, ansi2, _ = color
if ansi2 is not None:
return self
return Color(
int(r1 + (r2 - r1) * a2),
int(g1 + (g2 - g1) * a2),
int(b1 + (b2 - b1) * a2),
a1,
)
def __add__(self, other: object) -> Color:
if isinstance(other, Color):
return self.blend(other, other.a, 1.0)
elif other is None:
return self
return NotImplemented
def __radd__(self, other: object) -> Color:
if isinstance(other, Color):
return self.blend(other, other.a, 1.0)
elif other is None:
return self
return NotImplemented
@classmethod
@lru_cache(maxsize=1024 * 4)
def parse(cls, color_text: str | Color) -> Color:
"""Parse a string containing a named color or CSS-style color.
Colors may be parsed from the following formats:
- Text beginning with a `#` is parsed as a hexadecimal color code,
where R, G, B, and A must be hexadecimal digits (0-9A-F):
- `#RGB`
- `#RGBA`
- `#RRGGBB`
- `#RRGGBBAA`
- Alternatively, RGB colors can also be specified in the format
that follows, where R, G, and B must be numbers between 0 and 255
and A must be a value between 0 and 1:
- `rgb(R,G,B)`
- `rgb(R,G,B,A)`
- The HSL model can also be used, with a syntax similar to the above,
if H is a value between 0 and 360, S and L are percentages, and A
is a value between 0 and 1:
- `hsl(H,S,L)`
- `hsla(H,S,L,A)`
Any other formats will raise a `ColorParseError`.
Args:
color_text: Text with a valid color format. Color objects will
be returned unmodified.
Raises:
ColorParseError: If the color is not encoded correctly.
Returns:
Instance encoding the color specified by the argument.
"""
if isinstance(color_text, Color):
return color_text
if color_text == "ansi_default":
return cls(0, 0, 0, ansi=-1)
if color_text.startswith("ansi_"):
try:
ansi = ANSI_COLORS.index(color_text[5:])
except ValueError:
pass
else:
return cls(*COLOR_NAME_TO_RGB.get(color_text), ansi=ansi)
color_from_name = COLOR_NAME_TO_RGB.get(color_text)
if color_from_name is not None:
return cls(*color_from_name)
color_match = RE_COLOR.match(color_text)
if color_match is None:
error_message = f"failed to parse {color_text!r} as a color"
suggested_color = None
if not color_text.startswith(("#", "rgb", "hsl")):
# Seems like we tried to use a color name: let's try to find one that is close enough:
suggested_color = get_suggestion(
color_text, list(COLOR_NAME_TO_RGB.keys())
)
if suggested_color:
error_message += f"; did you mean '{suggested_color}'?"
raise ColorParseError(error_message, suggested_color)
(
rgb_hex_triple,
rgb_hex_quad,
rgb_hex,
rgba_hex,
rgb,
rgba,
hsl,
hsla,
) = color_match.groups()
if rgb_hex_triple is not None:
r, g, b = rgb_hex_triple # type: ignore[misc]
color = cls(int(f"{r}{r}", 16), int(f"{g}{g}", 16), int(f"{b}{b}", 16))
elif rgb_hex_quad is not None:
r, g, b, a = rgb_hex_quad # type: ignore[misc]
color = cls(
int(f"{r}{r}", 16),
int(f"{g}{g}", 16),
int(f"{b}{b}", 16),
int(f"{a}{a}", 16) / 255.0,
)
elif rgb_hex is not None:
r, g, b = [int(pair, 16) for pair in _split_pairs3(rgb_hex)]
color = cls(r, g, b, 1.0)
elif rgba_hex is not None:
r, g, b, a = [int(pair, 16) for pair in _split_pairs4(rgba_hex)]
color = cls(r, g, b, a / 255.0)
elif rgb is not None:
r, g, b = [clamp(int(float(value)), 0, 255) for value in rgb.split(",")]
color = cls(r, g, b, 1.0)
elif rgba is not None:
float_r, float_g, float_b, float_a = [
float(value) for value in rgba.split(",")
]
color = cls(
clamp(int(float_r), 0, 255),
clamp(int(float_g), 0, 255),
clamp(int(float_b), 0, 255),
clamp(float_a, 0.0, 1.0),
)
elif hsl is not None:
h, s, l = hsl.split(",")
h = float(h) % 360 / 360
s = percentage_string_to_float(s)
l = percentage_string_to_float(l)
color = Color.from_hsl(h, s, l)
elif hsla is not None:
h, s, l, a = hsla.split(",")
h = float(h) % 360 / 360
s = percentage_string_to_float(s)
l = percentage_string_to_float(l)
a = clamp(float(a), 0.0, 1.0)
color = Color.from_hsl(h, s, l).with_alpha(a)
else: # pragma: no-cover
raise AssertionError( # pragma: no-cover
"Can't get here if RE_COLOR matches"
)
return color
@lru_cache(maxsize=1024)
def darken(self, amount: float, alpha: float | None = None) -> Color:
"""Darken the color by a given amount.
Args:
amount: Value between 0-1 to reduce luminance by.
alpha: Alpha component for new color or None to copy alpha.
Returns:
New color.
"""
l, a, b = rgb_to_lab(self)
l -= amount * 100
return lab_to_rgb(Lab(l, a, b), self.a if alpha is None else alpha).clamped
def lighten(self, amount: float, alpha: float | None = None) -> Color:
"""Lighten the color by a given amount.
Args:
amount: Value between 0-1 to increase luminance by.
alpha: Alpha component for new color or None to copy alpha.
Returns:
New color.
"""
return self.darken(-amount, alpha)
@lru_cache(maxsize=1024)
def get_contrast_text(self, alpha: float = 0.95) -> Color:
"""Get a light or dark color that best contrasts this color, for use with text.
Args:
alpha: An alpha value to apply to the result.
Returns:
A new color, either an off-white or off-black.
"""
return (WHITE if self.brightness < 0.5 else BLACK).with_alpha(alpha)
|
Color
|
python
|
ray-project__ray
|
rllib/models/tests/test_catalog.py
|
{
"start": 789,
"end": 897
}
|
class ____(Preprocessor):
def _init_shape(self, obs_space, options):
return [1]
|
CustomPreprocessor
|
python
|
spyder-ide__spyder
|
spyder/widgets/collapsible.py
|
{
"start": 471,
"end": 4895
}
|
class ____(QCollapsible):
"""Collapsible widget to hide and show child widgets."""
def __init__(self, parent=None, title=""):
super().__init__(title=title, parent=parent)
# Align widget to the left to text before or after it (don't know why
# this is necessary).
self.layout().setContentsMargins(5, 0, 0, 0)
# Remove spacing between toggle button and contents area
self.layout().setSpacing(0)
# Set icons
self.setCollapsedIcon(ima.icon("collapsed"))
self.setExpandedIcon(ima.icon("expanded"))
# To change the style only of these widgets
self._toggle_btn.setObjectName("collapsible-toggle")
self.content().setObjectName("collapsible-content")
# Add padding to the inside content
self.content().layout().setContentsMargins(
*((AppStyle.InnerContentPadding,) * 4)
)
# Set stylesheet
self._css = self._generate_stylesheet()
self.setStyleSheet(self._css.toString())
# Signals
self.toggled.connect(self._on_toggled)
# Set our properties for the toggle button
self._set_toggle_btn_properties()
def set_content_bottom_margin(self, bottom_margin):
"""Set bottom margin of the content area to `bottom_margin`."""
margins = self.content().layout().contentsMargins()
margins.setBottom(bottom_margin)
self.content().layout().setContentsMargins(margins)
def set_content_right_margin(self, right_margin):
"""Set right margin of the content area to `right_margin`."""
margins = self.content().layout().contentsMargins()
margins.setRight(right_margin)
self.content().layout().setContentsMargins(margins)
def _generate_stylesheet(self):
"""Generate base stylesheet for this widget."""
css = qstylizer.style.StyleSheet()
# --- Style for the header button
css["QPushButton#collapsible-toggle"].setValues(
# Increase padding (the default one is too small).
padding=f"{2 * AppStyle.MarginSize}px",
# Make it a bit different from a default QPushButton to not drag
# the same amount of attention to it.
backgroundColor=SpyderPalette.COLOR_BACKGROUND_3
)
# Make hover color match the change of background color above
css["QPushButton#collapsible-toggle:hover"].setValues(
backgroundColor=SpyderPalette.COLOR_BACKGROUND_4,
)
# --- Style for the contents area
css["QWidget#collapsible-content"].setValues(
# Remove top border to make it appear attached to the header button
borderTop="0px",
# Add border to the other edges
border=f'1px solid {SpyderPalette.COLOR_BACKGROUND_4}',
# Add border radius to the bottom to make it match the style of our
# other widgets.
borderBottomLeftRadius=f'{SpyderPalette.SIZE_BORDER_RADIUS}',
borderBottomRightRadius=f'{SpyderPalette.SIZE_BORDER_RADIUS}',
)
return css
def _on_toggled(self, state):
"""Adjustments when the button is toggled."""
if state:
# Remove bottom rounded borders from the header when the widget is
# expanded.
self._css["QPushButton#collapsible-toggle"].setValues(
borderBottomLeftRadius='0px',
borderBottomRightRadius='0px',
)
else:
# Restore bottom rounded borders to the header when the widget is
# collapsed.
self._css["QPushButton#collapsible-toggle"].setValues(
borderBottomLeftRadius=f'{SpyderPalette.SIZE_BORDER_RADIUS}',
borderBottomRightRadius=f'{SpyderPalette.SIZE_BORDER_RADIUS}',
)
self.setStyleSheet(self._css.toString())
def _set_toggle_btn_properties(self):
"""Set properties for the toogle button."""
def enter_event(event):
self.setCursor(Qt.PointingHandCursor)
super(QPushButton, self._toggle_btn).enterEvent(event)
def leave_event(event):
self.setCursor(Qt.ArrowCursor)
super(QPushButton, self._toggle_btn).leaveEvent(event)
self.toggleButton().enterEvent = enter_event
self.toggleButton().leaveEvent = leave_event
|
CollapsibleWidget
|
python
|
pytorch__pytorch
|
test/test_fake_tensor.py
|
{
"start": 70703,
"end": 94599
}
|
class ____(TestCase):
def test_shape_env_settings(self):
"""
Validation that any boolean settings in ShapeEnv are present in the
ShapeEnvSettings. We hope to ensure that any new settings that might
affect FakeTensor dispatch are included in the cache key calculation.
If this test fails, consider updating ShapeEnvSettings or change this
test to omit checking for the new field.
"""
init_sig = inspect.signature(ShapeEnv._init)
args = [
name
for name, param in init_sig.parameters.items()
if type(param.default) is bool
]
settings = [f.name for f in dataclasses.fields(ShapeEnvSettings)]
for arg in args:
self.assertTrue(arg in settings)
def _test_cache_key(self, fm, x, y, z):
"""
Helper for all test_cache_key_* tests below. Assert that the
cache keys for inputs x and y are the same, but z is different.
"""
func = aten.add.Tensor
state = _CacheKeyState()
key_x = fm._cache_key(state, func, [x], {})
key_y = fm._cache_key(state, func, [y], {})
key_z = fm._cache_key(state, func, [z], {})
self.assertEqual(key_x, key_y)
self.assertNotEqual(key_x, key_z)
def test_cache_key_dtype(self):
with FakeTensorMode() as fm:
x = torch.randn(4, 3, dtype=torch.float16)
y = torch.randn(4, 3, dtype=torch.float16)
z = x.to(dtype=torch.float32)
self._test_cache_key(fm, x, y, z)
def test_cache_key_shape(self):
with FakeTensorMode() as fm:
x = torch.randn(4, 3)
y = torch.randn(4, 3)
z = torch.randn(4, 2)
self._test_cache_key(fm, x, y, z)
def test_cache_key_stride(self):
with FakeTensorMode() as fm:
x = torch.randn(4, 2)
y = torch.randn(4, 2)
z = x.as_strided((4, 2), (1, 2))
self._test_cache_key(fm, x, y, z)
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_cache_key_device(self):
with FakeTensorMode() as fm:
x = torch.randn(4, 3)
y = torch.randn(4, 3)
z = x.to(device="cuda")
self._test_cache_key(fm, x, y, z)
def test_cache_key_memory_format(self):
with FakeTensorMode() as fm:
x = torch.randn(1, 2, 3, 4)
y = torch.randn(1, 2, 3, 4)
z = x.to(memory_format=torch.channels_last)
self._test_cache_key(fm, x, y, z)
def test_cache_key_storage_offset(self):
with FakeTensorMode() as fm:
x = torch.randn(3)[1:]
y = torch.randn(3)[1:]
z = torch.randn(2)
self._test_cache_key(fm, x, y, z)
def test_cache_key_requires_grad(self):
with FakeTensorMode() as fm:
x = torch.randn(4, 3)
y = torch.randn(4, 3)
z = torch.randn(4, 3, requires_grad=True)
self._test_cache_key(fm, x, y, z)
def test_cache_key_is_conj(self):
with FakeTensorMode() as fm:
x = torch.randn(4, 3, dtype=torch.complex64)
y = torch.randn(4, 3, dtype=torch.complex64)
z = torch.randn(4, 3, dtype=torch.complex64)
torch._C._set_conj(z, not z.is_conj())
self._test_cache_key(fm, x, y, z)
def test_cache_key_is_neg(self):
with FakeTensorMode() as fm:
x = torch.randn(4, 3, dtype=torch.complex64)
y = torch.randn(4, 3, dtype=torch.complex64)
z = torch.randn(4, 3, dtype=torch.complex64)
torch._C._set_neg(z, not z.is_neg())
self._test_cache_key(fm, x, y, z)
def test_cache_key_is_inference(self):
with torch.inference_mode(True):
t = torch.randn(4, 3)
with FakeTensorMode() as fm:
x = torch.randn(4, 3)
y = torch.randn(4, 3)
z = fm.from_tensor(t)
self._test_cache_key(fm, x, y, z)
def test_cache_key_constants(self):
with FakeTensorMode() as fm:
# Python hashes 1.0 to the same value as 1. Make sure the
# cache key calculation differentiates them.
self._test_cache_key(fm, 1.0, 1.0, 1)
self._test_cache_key(fm, 0.0, 0.0, 0)
def test_empty_list(self):
with FakeTensorMode() as fm:
func = aten.any.dims
state = _CacheKeyState()
x = torch.ones((2, 3))
key_x = fm._cache_key(state, func, [x, []], {})
key_y = fm._cache_key(state, func, [x], {})
self.assertNotEqual(key_x, key_y)
def assertHitsMisses(self, hits, misses):
"""
Helper to assert on the number of recorded hits and misses.
"""
info = FakeTensorMode.cache_info()
self.assertEqual(info.hits, hits)
self.assertEqual(info.misses, misses)
def assertBypasses(self, reason, count):
"""
Helper to assert on the number of recorded bypasses.
"""
info = FakeTensorMode.cache_info()
if count > 0:
self.assertIn(reason, info.bypasses)
self.assertEqual(info.bypasses[reason], count)
else:
self.assertNotIn(reason, info.bypasses)
def test_cache_hit(self):
"""
Test that cache hit/miss counters are updated correctly.
"""
with FakeTensorMode():
x = torch.randn(4, 3)
y = torch.randn(4, 3)
FakeTensorMode.cache_clear()
self.assertHitsMisses(0, 0)
res1 = x + y
self.assertHitsMisses(0, 1)
res2 = x + y
self.assertHitsMisses(1, 1)
self.assertEqual(
extract_tensor_metadata(res1),
extract_tensor_metadata(res2),
)
def test_cache_bypass(self):
"""
Test that cache bypass counters are updated correctly.
"""
with FakeTensorMode():
x = torch.randn(1, 2)
FakeTensorMode.cache_clear()
self.assertBypasses("inplace view", 0)
x.unsqueeze_(0)
self.assertBypasses("inplace view", 1)
def test_cache_default_dtype(self):
"""
Test that the default dtype is respected when serving cached results.
"""
with FakeTensorMode():
x = torch.tensor([1, 2], dtype=torch.int32)
torch.set_default_dtype(torch.float32)
FakeTensorMode.cache_clear()
self.assertHitsMisses(0, 0)
y = x + 1.0
self.assertEqual(y.dtype, torch.float32)
self.assertHitsMisses(0, 1)
torch.set_default_dtype(torch.float16)
y = x + 1.0
self.assertEqual(y.dtype, torch.float16)
self.assertHitsMisses(0, 2)
torch.set_default_dtype(torch.float32)
y = x + 1.0
self.assertEqual(y.dtype, torch.float32)
self.assertHitsMisses(1, 2)
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_cache_default_device(self):
"""
Test that the default device is respected when serving cached results.
"""
with FakeTensorMode():
FakeTensorMode.cache_clear()
self.assertHitsMisses(0, 0)
torch.set_default_device("cpu")
x = torch.tensor([1, 2])
y = x + 1.0
self.assertEqual(y.device.type, "cpu")
self.assertHitsMisses(0, 1)
torch.set_default_device("cuda")
x = torch.tensor([1, 2])
y = x + 1.0
self.assertEqual(y.device.type, "cuda")
self.assertHitsMisses(0, 2)
torch.set_default_device("cpu")
x = torch.tensor([1, 2])
y = x + 1.0
self.assertEqual(y.device.type, "cpu")
self.assertHitsMisses(1, 2)
def test_cache_inplace_op(self):
"""
Test that inplace ops served from the cache correctly reference the
input parameter.
"""
with FakeTensorMode():
x = torch.randn(1, 2)
y = torch.randn(1, 2)
FakeTensorMode.cache_clear()
self.assertHitsMisses(0, 0)
z = x.add_(y)
self.assertHitsMisses(0, 1)
self.assertEqual(id(x), id(z))
w = x.add_(y)
self.assertHitsMisses(1, 1)
self.assertEqual(id(x), id(w))
def test_cache_view_op(self):
"""
Test that view ops are handled correctly when served from the cache.
"""
with FakeTensorMode():
x1 = torch.ones(2, requires_grad=True).clone()
x2 = torch.ones(2, requires_grad=True).clone()
y2 = x2.view(-1)
# Test operating on a non-view tensor, then the same operation
# on a view tensor. Assert that the view property is set correctly.
z1 = x1.mul_(2)
self.assertFalse(z1._is_view())
z2 = y2.mul_(2)
self.assertTrue(z2._is_view())
# Now the other way around: first operate on a view tensor, then
# the same operation on a non-view tensor.
z2 = y2.mul_(2)
self.assertTrue(z2._is_view())
z1 = x1.mul_(2)
self.assertFalse(z1._is_view())
def test_cache_dispatch_key_set(self):
"""
Test that operations that change the dispatch key set bypass caching.
"""
with FakeTensorMode():
FakeTensorMode.cache_clear()
self.assertBypasses("dispatch_key_set mismatch", 0)
x = torch._efficientzerotensor(3)
self.assertTrue(x._is_zerotensor())
self.assertBypasses("dispatch_key_set mismatch", 1)
y = torch._efficientzerotensor(3)
self.assertTrue(y._is_zerotensor())
self.assertBypasses("dispatch_key_set mismatch", 2)
def test_fft_hfft2_issue145522(self):
with FakeTensorMode():
s0 = 5
s1 = 6
s2 = 7
s3 = 3
s4 = 10
s5 = 2
x = torch.randn(s0, s1, s2)
out = torch.randn(s0, s3, s4)
kwargs = {
"s": (s3, s4),
"dim": (1, s5),
"norm": "ortho",
}
r = torch._C._fft.fft_hfft2(x, **kwargs, out=out)
self.assertEqual(r.shape, out.shape)
def test_inference_mode(self):
"""
Test that caching handles inference mode correctly.
"""
with FakeTensorMode():
x = torch.randn(4, 3)
y = torch.randn(4, 3)
FakeTensorMode.cache_clear()
self.assertHitsMisses(0, 0)
# Expect a miss when the inference mode is different
res1 = x + y
with torch.inference_mode():
res2 = x + y
self.assertHitsMisses(0, 2)
self.assertFalse(res1.is_inference())
self.assertTrue(res2.is_inference())
# Second tries should see hits
res3 = x + y
self.assertHitsMisses(1, 2)
self.assertFalse(res3.is_inference())
self.assertEqual(
extract_tensor_metadata(res1),
extract_tensor_metadata(res3),
)
with torch.inference_mode():
res4 = x + y
self.assertHitsMisses(2, 2)
self.assertTrue(res4.is_inference())
self.assertEqual(
extract_tensor_metadata(res2),
extract_tensor_metadata(res4),
)
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_wrapper_tensor_subclass_different_device(self):
class DifferentDeviceTensor(torch.Tensor):
@staticmethod
def __new__(cls, a):
kwargs = {}
kwargs["strides"] = a.stride()
kwargs["storage_offset"] = a.storage_offset()
kwargs["device"] = torch.device("cpu")
kwargs["layout"] = a.layout
kwargs["requires_grad"] = a.requires_grad
kwargs["dtype"] = a.dtype
out = torch.Tensor._make_wrapper_subclass(cls, a.size(), **kwargs)
return out
def __init__(self, a):
self.inner_tensor = a
def __repr__(self):
return f"DifferentDeviceTensor({repr(self.inner_tensor)})"
def __tensor_flatten__(self):
return ["inner_tensor"], None
@staticmethod
def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride):
assert meta is None
return DifferentDeviceTensor(inner_tensors["inner_tensor"])
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
if kwargs is None:
kwargs = {}
args = pytree.tree_map_only(
DifferentDeviceTensor, lambda x: x.inner_tensor, args
)
kwargs = pytree.tree_map_only(
DifferentDeviceTensor, lambda x: x.inner_tensor, kwargs
)
# Returns unwrapped tensor
return func(*args, **kwargs)
a = torch.ones(2, 2, 768, device="cuda")
wrapped_a = DifferentDeviceTensor(a)
# Outer Tensor is on cpu, inner is on cuda
self.assertTrue(wrapped_a.is_cpu)
self.assertFalse(wrapped_a.inner_tensor.is_cpu)
with FakeTensorMode() as fake_mode:
fake_wrapped_a = fake_mode.from_tensor(wrapped_a)
self.assertTrue(fake_wrapped_a.is_cpu)
assert isinstance(fake_wrapped_a, DifferentDeviceTensor)
self.assertFalse(fake_wrapped_a.inner_tensor.is_cpu)
def test__upsample_bilinear2d_aa_backward_dynamic_shapes(self):
def f(x):
return torch.nn.functional.interpolate(
x,
size=[256, 256],
mode="bilinear",
align_corners=False,
antialias=True,
)
shape_env = ShapeEnv()
fake_m = FakeTensorMode(shape_env=shape_env)
x = fake_m.from_tensor(
torch.randn(1, 3, 2005, 1920, requires_grad=True),
symbolic_context=StatelessSymbolicContext(
dynamic_sizes=[
DimDynamic.STATIC,
DimDynamic.STATIC,
DimDynamic.DYNAMIC,
DimDynamic.DYNAMIC,
],
constraint_sizes=[None, None, None, None],
),
)
with fake_m, enable_python_dispatcher():
out = f(x)
out.sum().backward()
self.assertEqual(x.shape, x.grad.shape)
def test_from_buffer(self):
with FakeTensorMode():
obj = [1, 2]
f = io.BytesIO()
pickle.Pickler(f).dump(obj)
storage = torch.UntypedStorage.from_buffer(f.getvalue(), dtype=torch.uint8)
t = torch.ByteTensor(storage)
self.assertTrue(isinstance(t, FakeTensor))
self.assertEqual(t.device, torch.device("cpu"))
def test_meta_tensor_to_fake_cpu(self):
x = torch.randn(4, 4, device="meta")
with FakeTensorMode(allow_non_fake_inputs=True):
x_cpu = x.to(device="cpu")
self.assertTrue(isinstance(x_cpu, FakeTensor))
self.assertEqual(x_cpu.device, torch.device("cpu"))
def test_cache_tuple_outputs(self):
"""
Test to check that ops with tuple outputs work.
"""
with FakeTensorMode():
x = torch.randn(6, 4)
y = torch.randn(6, 4)
FakeTensorMode.cache_clear()
self.assertHitsMisses(0, 0)
ref = torch.split(x, 2)
self.assertHitsMisses(0, 1)
res = torch.split(y, 2)
self.assertHitsMisses(1, 1)
self.assertEqual(len(ref), len(res))
for a, b in zip(ref, res):
self.assertEqual(
extract_tensor_metadata(a),
extract_tensor_metadata(b),
)
def test_cache_aten_index(self):
with FakeTensorMode():
x = torch.randn(4, 4, 4)
idx_tensor1 = torch.tensor([0, 2, 3])
idx_tensor2 = torch.tensor([0, 1, 2])
FakeTensorMode.cache_clear()
self.assertHitsMisses(0, 0)
ref = torch.ops.aten.index(x, [None, idx_tensor1, idx_tensor2])
self.assertHitsMisses(0, 3)
res = torch.ops.aten.index(x, [None, idx_tensor1, idx_tensor2])
self.assertHitsMisses(1, 3)
self.assertEqual(extract_tensor_metadata(ref), extract_tensor_metadata(res))
with FakeTensorMode():
x = torch.randn(4, 4, 4)
idx_tensor1 = torch.tensor([True, True, False, True])
self.assertRaises(
DynamicOutputShapeException,
lambda: torch.ops.aten.index(x, [None, idx_tensor1]),
)
idx_tensor1 = torch.tensor([1, -2, 3, -4], dtype=torch.int8)
self.assertRaises(
DynamicOutputShapeException,
lambda: torch.ops.aten.index(x, [None, idx_tensor1]),
)
@skipIfWindows(
msg="weird bug - cache may not be cleared after https://github.com/pytorch/pytorch/pull/154283"
)
@skipIfTorchDynamo("cache hit/miss changes with invoke_subgraph caching")
def test_invoke_subgraph(self):
"""
Tests invoke subgraph
"""
invoke_subgraph = torch._higher_order_ops.invoke_subgraph
def run():
def fn(x, y):
return (x + y * 2,)
# Ensure there is no caching for non-Fx graph module inputs
with FakeTensorMode():
x = torch.randn(6, 4)
y = torch.randn(6, 4)
FakeTensorMode.cache_clear()
self.assertHitsMisses(0, 0)
ref = invoke_subgraph(fn, "subgraph", x, y)
self.assertHitsMisses(0, 2)
self.assertBypasses("function argument", 1)
res = invoke_subgraph(fn, "subgraph", x, y)
# The hits are from the ops inside fn
self.assertHitsMisses(2, 2)
self.assertBypasses("function argument", 2)
res = invoke_subgraph(fn, "subgraph", x, y)
# The hits are from the ops inside fn
self.assertHitsMisses(4, 2)
self.assertBypasses("function argument", 3)
# Get the mod as if its going through torch.compile
backend = torch._dynamo.testing.AotEagerAndRecordGraphs()
x = torch.randn(6, 4)
y = torch.randn(6, 4)
torch.compile(fn, backend=backend, fullgraph=True)(x, y)
self.assertEqual(len(backend.fw_graphs), 1)
mod = backend.fw_graphs[0]
# Ensure that we see hits every time
with FakeTensorMode():
x = torch.randn(6, 4)
y = torch.randn(6, 4)
FakeTensorMode.cache_clear()
self.assertHitsMisses(0, 0)
ref = invoke_subgraph(mod, "subgraph", x, y)
self.assertHitsMisses(0, 3)
res = invoke_subgraph(mod, "subgraph", x, y)
# The hits are from re-running the subgraph
self.assertHitsMisses(1, 3)
res = invoke_subgraph(mod, "subgraph", x, y)
# The hits are from re-running the subgraph
self.assertHitsMisses(2, 3)
self.assertEqual(len(ref), len(res))
self.assertEqual(len(ref), len(res))
for a, b in zip(ref, res):
self.assertEqual(
extract_tensor_metadata(a),
extract_tensor_metadata(b),
)
self.assertTrue(count_invoke_subgraph_keys() > 0)
def count_invoke_subgraph_keys():
invoke_subgraph_keys = 0
for cache_key in FakeTensorMode.cache:
if isinstance(cache_key.key[0], torch._ops.HigherOrderOperator):
invoke_subgraph_keys += 1
return invoke_subgraph_keys
# Check that the graph gc clears the cache
run()
torch.compiler.reset()
gc.collect()
self.assertTrue(count_invoke_subgraph_keys() == 0)
@skipIfTorchDynamo("cache hit/miss changes with invoke_subgraph caching")
def test_invoke_subgraph_cacheable_inplace(self):
invoke_subgraph = torch._higher_order_ops.invoke_subgraph
def fn(x, y):
# aten ops are used so that eager backend graph is suitable for fake
# tensor testing
cos = torch.ops.aten.cos.default(x)
# inplace-view - this should cause the whole invoke_subgraph to not
# being able to cache
t = torch.ops.aten.t_.default(cos)
mul = torch.ops.aten.mul.Tensor(t, y)
return (mul,)
# Get the mod as if its going through torch.compile
backend = torch._dynamo.testing.AotEagerAndRecordGraphs()
x = torch.randn(4, 4)
y = torch.randn(4, 4)
torch.compile(fn, backend=backend, fullgraph=True)(x, y)
self.assertEqual(len(backend.graphs), 1)
mod = backend.graphs[0]
# Ensure that invoke_subgraph result is still cached
with FakeTensorMode():
x = torch.randn(4, 4)
y = torch.randn(4, 4)
FakeTensorMode.cache_clear()
self.assertHitsMisses(0, 0)
ref = invoke_subgraph(mod, "subgraph", x, y)
self.assertHitsMisses(0, 3)
res = invoke_subgraph(mod, "subgraph", x, y)
# The hits are from the ops inside fn and not the subgraph
self.assertHitsMisses(1, 3)
res = invoke_subgraph(mod, "subgraph", x, y)
# The hits are from the ops inside fn and not the subgraph
self.assertHitsMisses(2, 3)
self.assertEqual(len(ref), len(res))
self.assertEqual(len(ref), len(res))
for a, b in zip(ref, res):
self.assertEqual(
extract_tensor_metadata(a),
extract_tensor_metadata(b),
)
@skipIfTorchDynamo("cache hit/miss changes with invoke_subgraph caching")
def test_unbacked_output(self):
# The point of this test is to have an op which has no symbols as input
# but a symbol as an output and make sure that we skip caching it.
class LengthsGather(torch.nn.Module):
def forward(
self,
input: torch.Tensor,
lengths: torch.Tensor,
indices: torch.Tensor,
offsets: torch.Tensor,
) -> torch.Tensor:
bias = torch.gather(offsets, 0, indices)
lengths_selected = torch.gather(lengths, 0, indices)
index = torch.repeat_interleave(bias, lengths_selected, dim=0)
return index
input = torch.tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
lengths = torch.tensor([0, 2, 3, 1, 4])
indices = torch.tensor([2, 3, 4, 6, 7, 8, 9])
offsets = torch.cumsum(lengths, 0)
ep = torch.export.export(
LengthsGather(), (input, lengths, indices, offsets), strict=False
)
FakeTensorMode.cache_clear()
ep.run_decompositions({})
self.assertBypasses("unrepresented symbol in output", 2)
|
FakeTensorDispatchCache
|
python
|
pytorch__pytorch
|
torch/backends/cudnn/rnn.py
|
{
"start": 1047,
"end": 2304
}
|
class ____:
def __init__(self, inner):
self.inner = inner
def get(self):
return self.inner
def __getstate__(self):
# Note: can't return {}, because python2 won't call __setstate__
# if the value evaluates to False
return "<unserializable>"
def __setstate__(self, state):
self.inner = None
def init_dropout_state(dropout, train, dropout_seed, dropout_state):
dropout_desc_name = "desc_" + str(torch.cuda.current_device())
dropout_p = dropout if train else 0
if (dropout_desc_name not in dropout_state) or (
dropout_state[dropout_desc_name].get() is None
):
if dropout_p == 0:
dropout_state[dropout_desc_name] = Unserializable(None)
else:
dropout_state[dropout_desc_name] = Unserializable(
torch._cudnn_init_dropout_state( # type: ignore[call-arg]
dropout_p,
train,
dropout_seed,
# pyrefly: ignore [unexpected-keyword]
self_ty=torch.uint8,
device=torch.device("cuda"),
)
)
dropout_ts = dropout_state[dropout_desc_name].get()
return dropout_ts
|
Unserializable
|
python
|
pytest-dev__pytest
|
src/_pytest/fixtures.py
|
{
"start": 59028,
"end": 79621
}
|
class ____:
"""pytest fixture definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- config-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i.e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
def __init__(self, session: Session) -> None:
self.session = session
self.config: Config = session.config
# Maps a fixture name (argname) to all of the FixtureDefs in the test
# suite/plugins defined with this name. Populated by parsefactories().
# TODO: The order of the FixtureDefs list of each arg is significant,
# explain.
self._arg2fixturedefs: Final[dict[str, list[FixtureDef[Any]]]] = {}
self._holderobjseen: Final[set[object]] = set()
# A mapping from a nodeid to a list of autouse fixtures it defines.
self._nodeid_autousenames: Final[dict[str, list[str]]] = {
"": self.config.getini("usefixtures"),
}
session.config.pluginmanager.register(self, "funcmanage")
def getfixtureinfo(
self,
node: nodes.Item,
func: Callable[..., object] | None,
cls: type | None,
) -> FuncFixtureInfo:
"""Calculate the :class:`FuncFixtureInfo` for an item.
If ``func`` is None, or if the item sets an attribute
``nofuncargs = True``, then ``func`` is not examined at all.
:param node:
The item requesting the fixtures.
:param func:
The item's function.
:param cls:
If the function is a method, the method's class.
"""
if func is not None and not getattr(node, "nofuncargs", False):
argnames = getfuncargnames(func, name=node.name, cls=cls)
else:
argnames = ()
usefixturesnames = self._getusefixturesnames(node)
autousenames = self._getautousenames(node)
initialnames = deduplicate_names(autousenames, usefixturesnames, argnames)
direct_parametrize_args = _get_direct_parametrize_args(node)
names_closure, arg2fixturedefs = self.getfixtureclosure(
parentnode=node,
initialnames=initialnames,
ignore_args=direct_parametrize_args,
)
return FuncFixtureInfo(argnames, initialnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin: _PluggyPlugin, plugin_name: str) -> None:
# Fixtures defined in conftest plugins are only visible to within the
# conftest's directory. This is unlike fixtures in non-conftest plugins
# which have global visibility. So for conftests, construct the base
# nodeid from the plugin name (which is the conftest path).
if plugin_name and plugin_name.endswith("conftest.py"):
# Note: we explicitly do *not* use `plugin.__file__` here -- The
# difference is that plugin_name has the correct capitalization on
# case-insensitive systems (Windows) and other normalization issues
# (issue #11816).
conftestpath = absolutepath(plugin_name)
try:
nodeid = str(conftestpath.parent.relative_to(self.config.rootpath))
except ValueError:
nodeid = ""
if nodeid == ".":
nodeid = ""
if os.sep != nodes.SEP:
nodeid = nodeid.replace(os.sep, nodes.SEP)
else:
nodeid = None
self.parsefactories(plugin, nodeid)
def _getautousenames(self, node: nodes.Node) -> Iterator[str]:
"""Return the names of autouse fixtures applicable to node."""
for parentnode in node.listchain():
basenames = self._nodeid_autousenames.get(parentnode.nodeid)
if basenames:
yield from basenames
def _getusefixturesnames(self, node: nodes.Item) -> Iterator[str]:
"""Return the names of usefixtures fixtures applicable to node."""
for marker_node, mark in node.iter_markers_with_node(name="usefixtures"):
if not mark.args:
marker_node.warn(
PytestWarning(
f"usefixtures() in {node.nodeid} without arguments has no effect"
)
)
yield from mark.args
def getfixtureclosure(
self,
parentnode: nodes.Node,
initialnames: tuple[str, ...],
ignore_args: AbstractSet[str],
) -> tuple[list[str], dict[str, Sequence[FixtureDef[Any]]]]:
# Collect the closure of all fixtures, starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return an arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive).
fixturenames_closure = list(initialnames)
arg2fixturedefs: dict[str, Sequence[FixtureDef[Any]]] = {}
# Track the index for each fixture name in the simulated stack.
# Needed for handling override chains correctly, similar to _get_active_fixturedef.
# Using negative indices: -1 is the most specific (last), -2 is second to last, etc.
current_indices: dict[str, int] = {}
def process_argname(argname: str) -> None:
# Optimization: already processed this argname.
if current_indices.get(argname) == -1:
return
if argname not in fixturenames_closure:
fixturenames_closure.append(argname)
if argname in ignore_args:
return
fixturedefs = arg2fixturedefs.get(argname)
if not fixturedefs:
fixturedefs = self.getfixturedefs(argname, parentnode)
if not fixturedefs:
# Fixture not defined or not visible (will error during runtest).
return
arg2fixturedefs[argname] = fixturedefs
index = current_indices.get(argname, -1)
if -index > len(fixturedefs):
# Exhausted the override chain (will error during runtest).
return
fixturedef = fixturedefs[index]
current_indices[argname] = index - 1
for dep in fixturedef.argnames:
process_argname(dep)
current_indices[argname] = index
for name in initialnames:
process_argname(name)
def sort_by_scope(arg_name: str) -> Scope:
try:
fixturedefs = arg2fixturedefs[arg_name]
except KeyError:
return Scope.Function
else:
return fixturedefs[-1]._scope
fixturenames_closure.sort(key=sort_by_scope, reverse=True)
return fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc: Metafunc) -> None:
"""Generate new tests based on parametrized fixtures used by the given metafunc"""
for argname in metafunc.fixturenames:
# Get the FixtureDefs for the argname.
fixture_defs = metafunc._arg2fixturedefs.get(argname, ())
# In the common case we only look at the fixture def with the
# closest scope (last in the list). But if the fixture overrides
# another fixture, while requesting the super fixture, keep going
# in case the super fixture is parametrized (#1953).
for fixturedef in reversed(fixture_defs):
# Fixture is parametrized, apply it and stop.
if fixturedef.params is not None:
metafunc.parametrize(
argname,
fixturedef.params,
indirect=True,
scope=fixturedef.scope,
ids=fixturedef.ids,
)
break
# Not requesting the overridden super fixture, stop.
#
# TODO: Handle the case where the super-fixture is transitively
# requested (see #7737 and the xfail'd test
# test_override_parametrized_fixture_via_transitive_fixture).
if argname not in fixturedef.argnames:
break
# Try next super fixture, if any.
def pytest_collection_modifyitems(self, items: list[nodes.Item]) -> None:
# Separate parametrized setups.
items[:] = reorder_items(items)
def _register_fixture(
self,
*,
name: str,
func: _FixtureFunc[object],
nodeid: str | None,
scope: Scope | _ScopeName | Callable[[str, Config], _ScopeName] = "function",
params: Sequence[object] | None = None,
ids: tuple[object | None, ...] | Callable[[Any], object | None] | None = None,
autouse: bool = False,
) -> None:
"""Register a fixture
:param name:
The fixture's name.
:param func:
The fixture's implementation function.
:param nodeid:
The visibility of the fixture. The fixture will be available to the
node with this nodeid and its children in the collection tree.
None means that the fixture is visible to the entire collection tree,
e.g. a fixture defined for general use in a plugin.
:param scope:
The fixture's scope.
:param params:
The fixture's parametrization params.
:param ids:
The fixture's IDs.
:param autouse:
Whether this is an autouse fixture.
"""
fixture_def = FixtureDef(
config=self.config,
baseid=nodeid,
argname=name,
func=func,
scope=scope,
params=params,
ids=ids,
_ispytest=True,
_autouse=autouse,
)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixture_def.has_location:
faclist.append(fixture_def)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixture_def)
if autouse:
self._nodeid_autousenames.setdefault(nodeid or "", []).append(name)
@overload
def parsefactories(
self,
node_or_obj: nodes.Node,
) -> None:
raise NotImplementedError()
@overload
def parsefactories(
self,
node_or_obj: object,
nodeid: str | None,
) -> None:
raise NotImplementedError()
def parsefactories(
self,
node_or_obj: nodes.Node | object,
nodeid: str | NotSetType | None = NOTSET,
) -> None:
"""Collect fixtures from a collection node or object.
Found fixtures are parsed into `FixtureDef`s and saved.
If `node_or_object` is a collection node (with an underlying Python
object), the node's object is traversed and the node's nodeid is used to
determine the fixtures' visibility. `nodeid` must not be specified in
this case.
If `node_or_object` is an object (e.g. a plugin), the object is
traversed and the given `nodeid` is used to determine the fixtures'
visibility. `nodeid` must be specified in this case; None and "" mean
total visibility.
"""
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
assert isinstance(node_or_obj, nodes.Node)
holderobj = cast(object, node_or_obj.obj) # type: ignore[attr-defined]
assert isinstance(node_or_obj.nodeid, str)
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
# Avoid accessing `@property` (and other descriptors) when iterating fixtures.
if not safe_isclass(holderobj) and not isinstance(holderobj, types.ModuleType):
holderobj_tp: object = type(holderobj)
else:
holderobj_tp = holderobj
self._holderobjseen.add(holderobj)
for name in dir(holderobj):
# The attribute can be an arbitrary descriptor, so the attribute
# access below can raise. safe_getattr() ignores such exceptions.
obj_ub = safe_getattr(holderobj_tp, name, None)
if type(obj_ub) is FixtureFunctionDefinition:
marker = obj_ub._fixture_function_marker
if marker.name:
fixture_name = marker.name
else:
fixture_name = name
# OK we know it is a fixture -- now safe to look up on the _instance_.
try:
obj = getattr(holderobj, name)
# if the fixture is named in the decorator we cannot find it in the module
except AttributeError:
obj = obj_ub
func = obj._get_wrapped_function()
self._register_fixture(
name=fixture_name,
nodeid=nodeid,
func=func,
scope=marker.scope,
params=marker.params,
ids=marker.ids,
autouse=marker.autouse,
)
def getfixturedefs(
self, argname: str, node: nodes.Node
) -> Sequence[FixtureDef[Any]] | None:
"""Get FixtureDefs for a fixture name which are applicable
to a given node.
Returns None if there are no fixtures at all defined with the given
name. (This is different from the case in which there are fixtures
with the given name, but none applicable to the node. In this case,
an empty result is returned).
:param argname: Name of the fixture to search for.
:param node: The requesting Node.
"""
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
return tuple(self._matchfactories(fixturedefs, node))
def _matchfactories(
self, fixturedefs: Iterable[FixtureDef[Any]], node: nodes.Node
) -> Iterator[FixtureDef[Any]]:
parentnodeids = {n.nodeid for n in node.iter_parents()}
for fixturedef in fixturedefs:
if fixturedef.baseid in parentnodeids:
yield fixturedef
def show_fixtures_per_test(config: Config) -> int | ExitCode:
from _pytest.main import wrap_session
return wrap_session(config, _show_fixtures_per_test)
_PYTEST_DIR = Path(_pytest.__file__).parent
def _pretty_fixture_path(invocation_dir: Path, func) -> str:
loc = Path(getlocation(func, invocation_dir))
prefix = Path("...", "_pytest")
try:
return str(prefix / loc.relative_to(_PYTEST_DIR))
except ValueError:
return bestrelpath(invocation_dir, loc)
def _show_fixtures_per_test(config: Config, session: Session) -> None:
import _pytest.config
session.perform_collect()
invocation_dir = config.invocation_params.dir
tw = _pytest.config.create_terminal_writer(config)
verbose = config.get_verbosity()
def get_best_relpath(func) -> str:
loc = getlocation(func, invocation_dir)
return bestrelpath(invocation_dir, Path(loc))
def write_fixture(fixture_def: FixtureDef[object]) -> None:
argname = fixture_def.argname
if verbose <= 0 and argname.startswith("_"):
return
prettypath = _pretty_fixture_path(invocation_dir, fixture_def.func)
tw.write(f"{argname}", green=True)
tw.write(f" -- {prettypath}", yellow=True)
tw.write("\n")
fixture_doc = inspect.getdoc(fixture_def.func)
if fixture_doc:
write_docstring(
tw,
fixture_doc.split("\n\n", maxsplit=1)[0]
if verbose <= 0
else fixture_doc,
)
else:
tw.line(" no docstring available", red=True)
def write_item(item: nodes.Item) -> None:
# Not all items have _fixtureinfo attribute.
info: FuncFixtureInfo | None = getattr(item, "_fixtureinfo", None)
if info is None or not info.name2fixturedefs:
# This test item does not use any fixtures.
return
tw.line()
tw.sep("-", f"fixtures used by {item.name}")
# TODO: Fix this type ignore.
tw.sep("-", f"({get_best_relpath(item.function)})") # type: ignore[attr-defined]
# dict key not used in loop but needed for sorting.
for _, fixturedefs in sorted(info.name2fixturedefs.items()):
assert fixturedefs is not None
if not fixturedefs:
continue
# Last item is expected to be the one used by the test item.
write_fixture(fixturedefs[-1])
for session_item in session.items:
write_item(session_item)
def showfixtures(config: Config) -> int | ExitCode:
from _pytest.main import wrap_session
return wrap_session(config, _showfixtures_main)
def _showfixtures_main(config: Config, session: Session) -> None:
import _pytest.config
session.perform_collect()
invocation_dir = config.invocation_params.dir
tw = _pytest.config.create_terminal_writer(config)
verbose = config.get_verbosity()
fm = session._fixturemanager
available = []
seen: set[tuple[str, str]] = set()
for argname, fixturedefs in fm._arg2fixturedefs.items():
assert fixturedefs is not None
if not fixturedefs:
continue
for fixturedef in fixturedefs:
loc = getlocation(fixturedef.func, invocation_dir)
if (fixturedef.argname, loc) in seen:
continue
seen.add((fixturedef.argname, loc))
available.append(
(
len(fixturedef.baseid),
fixturedef.func.__module__,
_pretty_fixture_path(invocation_dir, fixturedef.func),
fixturedef.argname,
fixturedef,
)
)
available.sort()
currentmodule = None
for baseid, module, prettypath, argname, fixturedef in available:
if currentmodule != module:
if not module.startswith("_pytest."):
tw.line()
tw.sep("-", f"fixtures defined from {module}")
currentmodule = module
if verbose <= 0 and argname.startswith("_"):
continue
tw.write(f"{argname}", green=True)
if fixturedef.scope != "function":
tw.write(f" [{fixturedef.scope} scope]", cyan=True)
tw.write(f" -- {prettypath}", yellow=True)
tw.write("\n")
doc = inspect.getdoc(fixturedef.func)
if doc:
write_docstring(
tw, doc.split("\n\n", maxsplit=1)[0] if verbose <= 0 else doc
)
else:
tw.line(" no docstring available", red=True)
tw.line()
def write_docstring(tw: TerminalWriter, doc: str, indent: str = " ") -> None:
for line in doc.split("\n"):
tw.line(indent + line)
|
FixtureManager
|
python
|
pytorch__pytorch
|
torch/_inductor/utils.py
|
{
"start": 4240,
"end": 4644
}
|
class ____(sympy.Function):
"""Symbolically round up to the nearest multiple of ALIGN_BYTES"""
nargs = (1,)
is_integer = True
@classmethod
def eval(cls, value: sympy.Expr) -> Optional[sympy.Expr]:
if isinstance(value, (int, sympy.Integer)):
return _align(int(value))
if _is_aligned(value):
return value
@dataclasses.dataclass(frozen=True)
|
align
|
python
|
PyCQA__pylint
|
tests/functional/a/arguments_differ.py
|
{
"start": 5186,
"end": 5263
}
|
class ____:
def func(self, user_input: FooT1) -> None:
pass
|
ParentT3
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/constrainedTypeVar8.py
|
{
"start": 127,
"end": 560
}
|
class ____:
def __init__(self, x: Any) -> None:
pass
def f(self) -> None:
pass
T = TypeVar("T", str, int, A)
def factory(desired_type: type[T]) -> T:
return desired_type(1)
factory(str)
reveal_type(factory(str), expected_text="str")
factory(int)
reveal_type(factory(int), expected_text="int")
factory(A).f()
reveal_type(factory(A), expected_text="A")
# This should generate an error
factory(float)
|
A
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_lifecycle.py
|
{
"start": 383,
"end": 5331
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'post_start': 'V1LifecycleHandler',
'pre_stop': 'V1LifecycleHandler',
'stop_signal': 'str'
}
attribute_map = {
'post_start': 'postStart',
'pre_stop': 'preStop',
'stop_signal': 'stopSignal'
}
def __init__(self, post_start=None, pre_stop=None, stop_signal=None, local_vars_configuration=None): # noqa: E501
"""V1Lifecycle - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._post_start = None
self._pre_stop = None
self._stop_signal = None
self.discriminator = None
if post_start is not None:
self.post_start = post_start
if pre_stop is not None:
self.pre_stop = pre_stop
if stop_signal is not None:
self.stop_signal = stop_signal
@property
def post_start(self):
"""Gets the post_start of this V1Lifecycle. # noqa: E501
:return: The post_start of this V1Lifecycle. # noqa: E501
:rtype: V1LifecycleHandler
"""
return self._post_start
@post_start.setter
def post_start(self, post_start):
"""Sets the post_start of this V1Lifecycle.
:param post_start: The post_start of this V1Lifecycle. # noqa: E501
:type: V1LifecycleHandler
"""
self._post_start = post_start
@property
def pre_stop(self):
"""Gets the pre_stop of this V1Lifecycle. # noqa: E501
:return: The pre_stop of this V1Lifecycle. # noqa: E501
:rtype: V1LifecycleHandler
"""
return self._pre_stop
@pre_stop.setter
def pre_stop(self, pre_stop):
"""Sets the pre_stop of this V1Lifecycle.
:param pre_stop: The pre_stop of this V1Lifecycle. # noqa: E501
:type: V1LifecycleHandler
"""
self._pre_stop = pre_stop
@property
def stop_signal(self):
"""Gets the stop_signal of this V1Lifecycle. # noqa: E501
StopSignal defines which signal will be sent to a container when it is being stopped. If not specified, the default is defined by the container runtime in use. StopSignal can only be set for Pods with a non-empty .spec.os.name # noqa: E501
:return: The stop_signal of this V1Lifecycle. # noqa: E501
:rtype: str
"""
return self._stop_signal
@stop_signal.setter
def stop_signal(self, stop_signal):
"""Sets the stop_signal of this V1Lifecycle.
StopSignal defines which signal will be sent to a container when it is being stopped. If not specified, the default is defined by the container runtime in use. StopSignal can only be set for Pods with a non-empty .spec.os.name # noqa: E501
:param stop_signal: The stop_signal of this V1Lifecycle. # noqa: E501
:type: str
"""
self._stop_signal = stop_signal
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Lifecycle):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Lifecycle):
return True
return self.to_dict() != other.to_dict()
|
V1Lifecycle
|
python
|
jackfrued__Python-100-Days
|
公开课/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part01/example05.py
|
{
"start": 63,
"end": 327
}
|
class ____(SimpleHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.end_headers()
self.wfile.write('<h1>goodbye, world</h1>'.encode())
server = HTTPServer(('', 8000), RequestHandler)
server.serve_forever()
|
RequestHandler
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/dataclass6.py
|
{
"start": 664,
"end": 827
}
|
class ____(ParentA):
prop_2: str = "bye"
test = ChildA(prop_2="test", prop_4="hi")
assert test.prop_1 == "test"
assert test.prop_2 == "test"
@dataclass
|
ChildA
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 361910,
"end": 376165
}
|
class ____(VegaLiteSchema):
r"""
FacetFieldDef schema wrapper.
Parameters
----------
aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``,
``"min"``, ``"max"``, ``"count"``).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
bin : bool, dict, :class:`BinParams`, None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating
that the data for ``x`` or ``y`` channel are binned before they are imported into
Vega-Lite (``"binned"``).
* If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be
applied.
* If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are
already binned. You can map the bin-start field to ``x`` (or ``y``) and the
bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar
to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can
also set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef`
**Required.** A string defining the name of the field from which to pull a data
value or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If
field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details
about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required
if ``aggregate`` is ``count``.
header : dict, :class:`Header`, None
An object defining properties of a facet's header.
sort : dict, Sequence[str], Sequence[bool], Sequence[float], :class:`SortArray`, :class:`SortOrder`, :class:`EncodingSortField`, Sequence[dict, :class:`DateTime`], Literal['ascending', 'descending'], None
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
JavaScript.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects
<https://vega.github.io/vega-lite/docs/datetime.html>`__. In addition, for time
units ``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"``).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal
field. or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`, Literal['quantitative', 'ordinal', 'temporal', 'nominal']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_schema = {"$ref": "#/definitions/FacetFieldDef"}
def __init__(
self,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Map | None] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
header: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| SortOrder_T
| None
] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
**kwds,
):
super().__init__(
aggregate=aggregate,
bandPosition=bandPosition,
bin=bin,
field=field,
header=header,
sort=sort,
timeUnit=timeUnit,
title=title,
type=type,
**kwds,
)
|
FacetFieldDef
|
python
|
bokeh__bokeh
|
tests/support/plugins/managed_server_loop.py
|
{
"start": 1554,
"end": 2637
}
|
class ____(Protocol):
def __call__(self, application: Application, port: int | None = None, **server_kwargs: Any) -> ContextManager[Server]: ...
@pytest.fixture
def ManagedServerLoop(unused_tcp_port: int) -> MSL:
@contextmanager
def msl(application: Application, port: int | None = None, **server_kwargs: Any) -> Iterator[Server]:
if port is None:
port = unused_tcp_port
server = Server(application, port=port, **server_kwargs)
server.start()
yield server
server.unlisten()
server.stop()
return msl
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
MSL
|
python
|
django__django
|
tests/template_tests/filter_tests/test_truncatechars.py
|
{
"start": 68,
"end": 1074
}
|
class ____(SimpleTestCase):
@setup({"truncatechars01": "{{ a|truncatechars:3 }}"})
def test_truncatechars01(self):
output = self.engine.render_to_string(
"truncatechars01", {"a": "Testing, testing"}
)
self.assertEqual(output, "Te…")
@setup({"truncatechars02": "{{ a|truncatechars:7 }}"})
def test_truncatechars02(self):
output = self.engine.render_to_string("truncatechars02", {"a": "Testing"})
self.assertEqual(output, "Testing")
@setup({"truncatechars03": "{{ a|truncatechars:'e' }}"})
def test_fail_silently_incorrect_arg(self):
output = self.engine.render_to_string(
"truncatechars03", {"a": "Testing, testing"}
)
self.assertEqual(output, "Testing, testing")
@setup({"truncatechars04": "{{ a|truncatechars:3 }}"})
def test_truncatechars04(self):
output = self.engine.render_to_string("truncatechars04", {"a": "abc"})
self.assertEqual(output, "abc")
|
TruncatecharsTests
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/classes/config_named_vectors.py
|
{
"start": 3119,
"end": 64617
}
|
class ____:
@staticmethod
def none(
name: str, *, vector_index_config: Optional[_VectorIndexConfigCreate] = None
) -> _NamedVectorConfigCreate:
"""Create a named vector using no vectorizer. You will need to provide the vectors yourself.
Args:
name: The name of the named vector.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
"""
return _NamedVectorConfigCreate(
name=name,
vectorizer=_VectorizerConfigCreate(vectorizer=Vectorizers.NONE),
vector_index_config=vector_index_config,
)
@staticmethod
def custom(
name: str,
*,
module_name: str,
module_config: Optional[Dict[str, Any]] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
) -> _NamedVectorConfigCreate:
"""Create a named vector using no vectorizer. You will need to provide the vectors yourself.
Args:
name: The name of the named vector.
module_name: The name of the custom module to use.
module_config: The configuration of the custom module to use.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
"""
return _NamedVectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_VectorizerCustomConfig(
vectorizer=_EnumLikeStr(module_name), module_config=module_config
),
vector_index_config=vector_index_config,
)
@staticmethod
def text2colbert_jinaai(
name: str,
*,
dimensions: Optional[int] = None,
model: Optional[str] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `text2colbert_jinaai` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/jinaai/colbert)
for detailed usage.
Args:
name: The name of the named vector.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
model: The model to use. Defaults to `None`, which uses the server-defined default.
dimensions: Number of dimensions. Applicable to v3 OpenAI models only. Defaults to `None`, which uses the server-defined default.
"""
return _NamedVectorConfigCreate(
name=name,
source_properties=source_properties,
vector_index_config=vector_index_config,
vectorizer=_Text2ColbertJinaAIConfig(
model=model,
dimensions=dimensions,
vectorizeClassName=vectorize_collection_name,
),
)
@staticmethod
def text2vec_cohere(
name: str,
*,
base_url: Optional[AnyHttpUrl] = None,
model: Optional[Union[CohereModel, str]] = None,
truncate: Optional[CohereTruncation] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `text2vec_cohere` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/cohere/embeddings)
for detailed usage.
Args:
name: The name of the named vector.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
model: The model to use. Defaults to `None`, which uses the server-defined default.
truncate: The truncation strategy to use. Defaults to `None`, which uses the server-defined default.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
Raises:
pydantic.ValidationError: If `model` is not a valid value from the `CohereModel` type or if `truncate` is not a valid value from the `CohereTruncation` type.
"""
return _NamedVectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecCohereConfig(
baseURL=base_url,
model=model,
dimensions=None,
truncate=truncate,
vectorizeClassName=vectorize_collection_name,
),
vector_index_config=vector_index_config,
)
@staticmethod
def multi2vec_cohere(
name: str,
*,
base_url: Optional[AnyHttpUrl] = None,
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
model: Optional[Union[CohereMultimodalModel, str]] = None,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
truncate: Optional[CohereTruncation] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `multi2vec_cohere` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/cohere/embeddings-multimodal)
for detailed usage.
Args:
name: The name of the named vector.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
model: The model to use. Defaults to `None`, which uses the server-defined default.
truncate: The truncation strategy to use. Defaults to `None`, which uses the server-defined default.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
image_fields: The image fields to use in vectorization.
text_fields: The text fields to use in vectorization.
Raises:
pydantic.ValidationError: If `model` is not a valid value from the `CohereMultimodalModel` type or if `truncate` is not a valid value from the `CohereTruncation` type.
"""
return _NamedVectorConfigCreate(
name=name,
vectorizer=_Multi2VecCohereConfig(
baseURL=base_url,
model=model,
dimensions=None,
truncate=truncate,
imageFields=_map_multi2vec_fields(image_fields),
textFields=_map_multi2vec_fields(text_fields),
),
vector_index_config=vector_index_config,
)
@staticmethod
def text2vec_contextionary(
name: str,
*,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `text2vec_contextionary` model.
See the [documentation](https://weaviate.io/developers/weaviate/modules/retriever-vectorizer-modules/text2vec-contextionary)
for detailed usage.
Args:
name: The name of the named vector.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
"""
return _NamedVectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecContextionaryConfig(
vectorizeClassName=vectorize_collection_name,
),
vector_index_config=vector_index_config,
)
@staticmethod
def text2vec_databricks(
name: str,
*,
endpoint: str,
instruction: Optional[str] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `text2vec-databricks` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/databricks/embeddings)
for detailed usage.
Args:
name: The name of the named vector.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
endpoint: The endpoint to use.
instruction: The instruction strategy to use. Defaults to `None`, which uses the server-defined default.
"""
return _NamedVectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecDatabricksConfig(
endpoint=endpoint,
instruction=instruction,
vectorizeClassName=vectorize_collection_name,
),
vector_index_config=vector_index_config,
)
@staticmethod
def text2vec_mistral(
name: str,
*,
base_url: Optional[AnyHttpUrl] = None,
model: Optional[str] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `text2vec-mistral` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/mistral/embeddings)
for detailed usage.
Args:
name: The name of the named vector.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
model: The model to use. Defaults to `None`, which uses the server-defined default.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
"""
return _NamedVectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecMistralConfig(
baseURL=base_url,
model=model,
vectorizeClassName=vectorize_collection_name,
),
vector_index_config=vector_index_config,
)
@staticmethod
def text2vec_ollama(
name: str,
*,
api_endpoint: Optional[str] = None,
model: Optional[str] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `text2vec-ollama` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/ollama/embeddings)
for detailed usage.
Args:
name: The name of the named vector.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
model: The model to use. Defaults to `None`, which uses the server-defined default.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
api_endpoint: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
Docker users may need to specify an alias, such as `http://host.docker.internal:11434` so that the container can access the host machine.
"""
return _NamedVectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecOllamaConfig(
apiEndpoint=api_endpoint,
model=model,
vectorizeClassName=vectorize_collection_name,
),
vector_index_config=vector_index_config,
)
@staticmethod
def text2vec_openai(
name: str,
*,
base_url: Optional[AnyHttpUrl] = None,
dimensions: Optional[int] = None,
model: Optional[Union[OpenAIModel, str]] = None,
model_version: Optional[str] = None,
type_: Optional[OpenAIType] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `text2vec_openai` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/openai/embeddings)
for detailed usage.
Args:
name: The name of the named vector.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
model: The model to use. Defaults to `None`, which uses the server-defined default.
model_version: The model version to use. Defaults to `None`, which uses the server-defined default.
type_: The type of model to use. Defaults to `None`, which uses the server-defined default.
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
dimensions: Number of dimensions. Applicable to v3 OpenAI models only. Defaults to `None`, which uses the server-defined default.
Raises:
pydantic.ValidationError: If `type_` is not a valid value from the `OpenAIType` type.
"""
return _NamedVectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecOpenAIConfig(
baseURL=base_url,
model=model,
modelVersion=model_version,
type_=type_,
vectorizeClassName=vectorize_collection_name,
dimensions=dimensions,
),
vector_index_config=vector_index_config,
)
@staticmethod
def text2vec_aws(
name: str,
region: str,
*,
endpoint: Optional[str] = None,
model: Optional[Union[AWSModel, str]] = None,
service: Union[AWSService, str] = "bedrock",
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `text2vec_aws` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/aws/embeddings)
for detailed usage.
Args:
name: The name of the named vector.
region: The AWS region to run the model from, REQUIRED.
endpoint: The endpoint to use. Defaults to `None`, which uses the server-defined default.
model: The model to use.
service: The AWS service to use. Defaults to `bedrock`.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
"""
return _NamedVectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecAWSConfig(
model=model,
endpoint=endpoint,
region=region,
service=service,
vectorizeClassName=vectorize_collection_name,
),
vector_index_config=vector_index_config,
)
@staticmethod
def img2vec_neural(
name: str,
image_fields: List[str],
*,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
) -> _NamedVectorConfigCreate:
"""Create a `Img2VecNeuralConfig` object for use when vectorizing using the `img2vec-neural` model.
See the [documentation](https://weaviate.io/developers/weaviate/modules/retriever-vectorizer-modules/img2vec-neural)
for detailed usage.
Args:
name: The name of the named vector.
image_fields: The image fields to use. This is a required field and must match the property fields of the collection that are defined as `DataType.BLOB`.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
Raises:
pydantic.ValidationError: If `image_fields` is not a `list`.
"""
return _NamedVectorConfigCreate(
name=name,
vectorizer=_Img2VecNeuralConfig(imageFields=image_fields),
vector_index_config=vector_index_config,
)
@staticmethod
def multi2vec_clip(
name: str,
*,
inference_url: Optional[str] = None,
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `multi2vec_clip` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/transformers/embeddings-multimodal)
for detailed usage.
Args:
name: The name of the named vector.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
image_fields: The image fields to use in vectorization.
text_fields: The text fields to use in vectorization.
inference_url: The inference url to use where API requests should go. Defaults to `None`, which uses the server-defined default.
"""
return _NamedVectorConfigCreate(
name=name,
vectorizer=_Multi2VecClipConfig(
imageFields=_map_multi2vec_fields(image_fields),
textFields=_map_multi2vec_fields(text_fields),
inferenceUrl=inference_url,
),
vector_index_config=vector_index_config,
)
@staticmethod
@docstring_deprecated(
deprecated_in="4.9.0",
details="""
This method is deprecated and will be removed in Q2 '25. Please use :meth:`~weaviate.collections.classes.config._NamedVectors.multi2vec_google` instead.
""",
)
@typing_deprecated(
"This method is deprecated and will be removed in Q2 '25. Please use `multi2vec_google` instead."
)
def multi2vec_palm(
name: str,
*,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
location: str,
project_id: str,
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
video_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
dimensions: Optional[int] = None,
video_interval_seconds: Optional[int] = None,
model_id: Optional[str] = None,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `multi2vec_palm` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/google/embeddings-multimodal)
for detailed usage.
Args:
name: The name of the named vector.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
location: Where the model runs. REQUIRED.
project_id: The project ID to use, REQUIRED.
image_fields: The image fields to use in vectorization.
text_fields: The text fields to use in vectorization.
video_fields: The video fields to use in vectorization.
dimensions: The number of dimensions to use. Defaults to `None`, which uses the server-defined default.
video_interval_seconds: Length of a video interval. Defaults to `None`, which uses the server-defined default.
model_id: The model ID to use. Defaults to `None`, which uses the server-defined default.
"""
_Warnings.palm_to_google_m2v()
return _NamedVectorConfigCreate(
name=name,
vectorizer=_Multi2VecGoogleConfig(
projectId=project_id,
location=location,
imageFields=_map_multi2vec_fields(image_fields),
textFields=_map_multi2vec_fields(text_fields),
videoFields=_map_multi2vec_fields(video_fields),
dimensions=dimensions,
modelId=model_id,
videoIntervalSeconds=video_interval_seconds,
),
vector_index_config=vector_index_config,
)
@staticmethod
def multi2vec_google(
name: str,
*,
location: str,
project_id: str,
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
video_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
dimensions: Optional[int] = None,
video_interval_seconds: Optional[int] = None,
model_id: Optional[str] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `multi2vec_google` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/google/embeddings-multimodal)
for detailed usage.
Args:
name: The name of the named vector.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
location: Where the model runs. REQUIRED.
project_id: The project ID to use, REQUIRED.
image_fields: The image fields to use in vectorization.
text_fields: The text fields to use in vectorization.
video_fields: The video fields to use in vectorization.
dimensions: The number of dimensions to use. Defaults to `None`, which uses the server-defined default.
video_interval_seconds: Length of a video interval. Defaults to `None`, which uses the server-defined default.
model_id: The model ID to use. Defaults to `None`, which uses the server-defined default.
"""
return _NamedVectorConfigCreate(
name=name,
vectorizer=_Multi2VecGoogleConfig(
projectId=project_id,
location=location,
imageFields=_map_multi2vec_fields(image_fields),
textFields=_map_multi2vec_fields(text_fields),
videoFields=_map_multi2vec_fields(video_fields),
dimensions=dimensions,
modelId=model_id,
videoIntervalSeconds=video_interval_seconds,
),
vector_index_config=vector_index_config,
)
@staticmethod
def multi2vec_bind(
name: str,
*,
audio_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
depth_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
imu_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
thermal_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
video_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `multi2vec_bind` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/imagebind/embeddings-multimodal)
for detailed usage.
Args:
name: The name of the named vector.
audio_fields: The audio fields to use in vectorization.
depth_fields: The depth fields to use in vectorization.
image_fields: The image fields to use in vectorization.
imu_fields: The IMU fields to use in vectorization.
text_fields: The text fields to use in vectorization.
thermal_fields: The thermal fields to use in vectorization.
video_fields: The video fields to use in vectorization.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
"""
return _NamedVectorConfigCreate(
name=name,
vectorizer=_Multi2VecBindConfig(
audioFields=_map_multi2vec_fields(audio_fields),
depthFields=_map_multi2vec_fields(depth_fields),
imageFields=_map_multi2vec_fields(image_fields),
IMUFields=_map_multi2vec_fields(imu_fields),
textFields=_map_multi2vec_fields(text_fields),
thermalFields=_map_multi2vec_fields(thermal_fields),
videoFields=_map_multi2vec_fields(video_fields),
),
vector_index_config=vector_index_config,
)
@staticmethod
def multi2vec_voyageai(
name: str,
*,
base_url: Optional[AnyHttpUrl] = None,
model: Optional[Union[VoyageMultimodalModel, str]] = None,
truncation: Optional[bool] = None,
output_encoding: Optional[str] = None,
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `multi2vec_voyageai` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/voyageai/embeddings-multimodal)
for detailed usage.
Args:
name: The name of the named vector.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
model: The model to use. Defaults to `None`, which uses the server-defined default.
truncation: The truncation strategy to use. Defaults to `None`, which uses the server-defined default.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
image_fields: The image fields to use in vectorization.
text_fields: The text fields to use in vectorization.
Raises:
pydantic.ValidationError: If `model` is not a valid value from the `VoyageaiMultimodalModel` type.
"""
return _NamedVectorConfigCreate(
name=name,
vectorizer=_Multi2VecVoyageaiConfig(
baseURL=base_url,
model=model,
truncation=truncation,
imageFields=_map_multi2vec_fields(image_fields),
textFields=_map_multi2vec_fields(text_fields),
),
vector_index_config=vector_index_config,
)
@staticmethod
def multi2vec_nvidia(
name: str,
*,
base_url: Optional[AnyHttpUrl] = None,
model: Optional[str] = None,
truncation: Optional[bool] = None,
output_encoding: Optional[str] = None,
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `multi2vec_nvidia` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/nvidia/embeddings-multimodal)
for detailed usage.
Args:
name: The name of the named vector.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
model: The model to use. Defaults to `None`, which uses the server-defined default.
truncation: The truncation strategy to use. Defaults to `None`, which uses the server-defined default.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
image_fields: The image fields to use in vectorization.
text_fields: The text fields to use in vectorization.
Raises:
pydantic.ValidationError: If `model` is not a valid value from the `NvidiaMultimodalModel` type.
"""
return _NamedVectorConfigCreate(
name=name,
vectorizer=_Multi2VecNvidiaConfig(
baseURL=base_url,
model=model,
truncation=truncation,
imageFields=_map_multi2vec_fields(image_fields),
textFields=_map_multi2vec_fields(text_fields),
),
vector_index_config=vector_index_config,
)
@staticmethod
def ref2vec_centroid(
name: str,
reference_properties: List[str],
*,
method: Literal["mean"] = "mean",
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `ref2vec_centroid` model.
See the [documentation](https://weaviate.io/developers/weaviate/modules/retriever-vectorizer-modules/text2vec-gpt4all)
for detailed usage.
Args:
name: The name of the named vector.
reference_properties: The reference properties to use in vectorization, REQUIRED.
method: The method to use. Defaults to `mean`.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
"""
return _NamedVectorConfigCreate(
name=name,
vectorizer=_Ref2VecCentroidConfig(
referenceProperties=reference_properties,
method=method,
),
vector_index_config=vector_index_config,
)
@staticmethod
def text2vec_azure_openai(
name: str,
resource_name: str,
deployment_id: str,
*,
base_url: Optional[AnyHttpUrl] = None,
dimensions: Optional[int] = None,
model: Optional[str] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `text2vec_azure_openai` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/openai-azure/embeddings)
for detailed usage.
Args:
name: The name of the named vector.
resource_name: The resource name to use, REQUIRED.
deployment_id: The deployment ID to use, REQUIRED.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
dimensions: The dimensionality of the vectors. Defaults to `None`, which uses the server-defined default.
model: The model to use. Defaults to `None`, which uses the server-defined default.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
"""
return _NamedVectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecAzureOpenAIConfig(
baseURL=base_url,
dimensions=dimensions,
model=model,
resourceName=resource_name,
deploymentId=deployment_id,
vectorizeClassName=vectorize_collection_name,
),
vector_index_config=vector_index_config,
)
@staticmethod
def text2vec_gpt4all(
name: str,
*,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `text2vec_gpt4all` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/gpt4all/embeddings)
for detailed usage.
Args:
name: The name of the named vector.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
"""
return _NamedVectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecGPT4AllConfig(
vectorizeClassName=vectorize_collection_name,
),
vector_index_config=vector_index_config,
)
@staticmethod
def text2vec_huggingface(
name: str,
*,
model: Optional[str] = None,
passage_model: Optional[str] = None,
query_model: Optional[str] = None,
endpoint_url: Optional[AnyHttpUrl] = None,
wait_for_model: Optional[bool] = None,
use_gpu: Optional[bool] = None,
use_cache: Optional[bool] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `text2vec_huggingface` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/huggingface/embeddings)
for detailed usage.
Args:
name: The name of the named vector.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
model: The model to use. Defaults to `None`, which uses the server-defined default.
passage_model: The passage model to use. Defaults to `None`, which uses the server-defined default.
query_model: The query model to use. Defaults to `None`, which uses the server-defined default.
endpoint_url: The endpoint URL to use. Defaults to `None`, which uses the server-defined default.
wait_for_model: Whether to wait for the model to be loaded. Defaults to `None`, which uses the server-defined default.
use_gpu: Whether to use the GPU. Defaults to `None`, which uses the server-defined default.
use_cache: Whether to use the cache. Defaults to `None`, which uses the server-defined default.
Raises:
pydantic.ValidationError: If the arguments passed to the function are invalid.
It is important to note that some of these variables are mutually exclusive.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/huggingface/embeddings#vectorizer-parameters) for more details.
"""
return _NamedVectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecHuggingFaceConfig(
model=model,
passageModel=passage_model,
queryModel=query_model,
endpointURL=endpoint_url,
waitForModel=wait_for_model,
useGPU=use_gpu,
useCache=use_cache,
vectorizeClassName=vectorize_collection_name,
),
vector_index_config=vector_index_config,
)
@staticmethod
@docstring_deprecated(
deprecated_in="4.9.0",
details="""
This method is deprecated and will be removed in Q2 '25. Please use :meth:`~weaviate.collections.classes.config._NamedVectors.text2vec_google` instead.
""",
)
@typing_deprecated(
"This method is deprecated and will be removed in Q2 '25. Please use `text2vec_google` instead."
)
def text2vec_palm(
name: str,
project_id: str,
*,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
api_endpoint: Optional[str] = None,
model_id: Optional[str] = None,
title_property: Optional[str] = None,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `text2vec_palm` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/google/embeddings)
for detailed usage.
Args:
name: The name of the named vector.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
project_id: The project ID to use, REQUIRED.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
api_endpoint: The API endpoint to use without a leading scheme such as `http://`. Defaults to `None`, which uses the server-defined default
model_id: The model ID to use. Defaults to `None`, which uses the server-defined default.
title_property: The Weaviate property name for the `gecko-002` or `gecko-003` model to use as the title.
Raises:
pydantic.ValidationError: If `api_endpoint` is not a valid URL.
"""
_Warnings.palm_to_google_t2v()
return _NamedVectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecGoogleConfig(
projectId=project_id,
apiEndpoint=api_endpoint,
dimensions=None,
modelId=model_id,
vectorizeClassName=vectorize_collection_name,
titleProperty=title_property,
),
vector_index_config=vector_index_config,
)
@staticmethod
def text2vec_google(
name: str,
project_id: str,
*,
api_endpoint: Optional[str] = None,
model_id: Optional[str] = None,
title_property: Optional[str] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `text2vec_palm` model.
See the [documentation]https://weaviate.io/developers/weaviate/model-providers/google/embeddings)
for detailed usage.
Args:
name: The name of the named vector.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
project_id: The project ID to use, REQUIRED.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
api_endpoint: The API endpoint to use without a leading scheme such as `http://`. Defaults to `None`, which uses the server-defined default
model_id: The model ID to use. Defaults to `None`, which uses the server-defined default.
title_property: The Weaviate property name for the `gecko-002` or `gecko-003` model to use as the title.
Raises:
pydantic.ValidationError: If `api_endpoint` is not a valid URL.
"""
return _NamedVectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecGoogleConfig(
projectId=project_id,
apiEndpoint=api_endpoint,
dimensions=None,
modelId=model_id,
vectorizeClassName=vectorize_collection_name,
titleProperty=title_property,
),
vector_index_config=vector_index_config,
)
@staticmethod
def text2vec_google_aistudio(
name: str,
*,
model_id: Optional[str] = None,
title_property: Optional[str] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `text2vec_palm` model.
See the [documentation]https://weaviate.io/developers/weaviate/model-providers/google/embeddings)
for detailed usage.
Args:
name: The name of the named vector.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
model_id: The model ID to use. Defaults to `None`, which uses the server-defined default.
title_property: The Weaviate property name for the `gecko-002` or `gecko-003` model to use as the title.
Raises:
pydantic.ValidationError: If `api_endpoint` is not a valid URL.
"""
return _NamedVectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecGoogleConfig(
projectId=None,
apiEndpoint="generativelanguage.googleapis.com",
dimensions=None,
modelId=model_id,
vectorizeClassName=vectorize_collection_name,
titleProperty=title_property,
),
vector_index_config=vector_index_config,
)
@staticmethod
def text2vec_transformers(
name: str,
*,
dimensions: Optional[int] = None,
pooling_strategy: Literal["masked_mean", "cls"] = "masked_mean",
inference_url: Optional[str] = None,
passage_inference_url: Optional[str] = None,
query_inference_url: Optional[str] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `text2vec_transformers` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/transformers/embeddings)
for detailed usage.
Args:
name: The name of the named vector.
dimensions: The number of dimensions for the generated embeddings. Defaults to `None`, which uses the server-defined default.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
pooling_strategy: The pooling strategy to use. Defaults to `masked_mean`.
inference_url: The inferenceUrl to use where API requests should go. You can use either this OR passage/query_inference_url. Defaults to `None`, which uses the server-defined default.
passage_inference_url: The inferenceUrl to use where passage API requests should go. You can use either this and query_inference_url OR inference_url. Defaults to `None`, which uses the server-defined default.
query_inference_url: The inferenceUrl to use where query API requests should go. You can use either this and passage_inference_url OR inference_url. Defaults to `None`, which uses the server-defined default.
"""
return _NamedVectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecTransformersConfig(
dimensions=dimensions,
poolingStrategy=pooling_strategy,
vectorizeClassName=vectorize_collection_name,
inferenceUrl=inference_url,
passageInferenceUrl=passage_inference_url,
queryInferenceUrl=query_inference_url,
),
vector_index_config=vector_index_config,
)
@staticmethod
def text2vec_jinaai(
name: str,
*,
base_url: Optional[str] = None,
dimensions: Optional[int] = None,
model: Optional[Union[JinaModel, str]] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `text2vec-jinaai` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/jinaai/embeddings) for detailed usage.
Args:
name: The name of the named vector.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
base_url: The base URL to send the vectorization requests to. Defaults to `None`, which uses the server-defined default.
dimensions: The number of dimensions for the generated embeddings. Defaults to `None`, which uses the server-defined default.
model: The model to use. Defaults to `None`, which uses the server-defined default.
"""
return _NamedVectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecJinaConfig(
baseURL=base_url,
dimensions=dimensions,
model=model,
vectorizeClassName=vectorize_collection_name,
),
vector_index_config=vector_index_config,
)
@staticmethod
def multi2vec_jinaai(
name: str,
*,
base_url: Optional[AnyHttpUrl] = None,
model: Optional[Union[JinaMultimodalModel, str]] = None,
dimensions: Optional[int] = None,
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `multi2vec_jinaai` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/jinaai/embeddings-multimodal)
for detailed usage.
Args:
name: The name of the named vector.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
model: The model to use. Defaults to `None`, which uses the server-defined default.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
dimensions: The number of dimensions for the generated embeddings (only available for some models). Defaults to `None`, which uses the server-defined default.
image_fields: The image fields to use in vectorization.
text_fields: The text fields to use in vectorization.
Raises:
pydantic.ValidationError: If `model` is not a valid value from the `JinaMultimodalModel` type.
"""
return _NamedVectorConfigCreate(
name=name,
vectorizer=_Multi2VecJinaConfig(
baseURL=base_url,
model=model,
dimensions=dimensions,
imageFields=_map_multi2vec_fields(image_fields),
textFields=_map_multi2vec_fields(text_fields),
),
vector_index_config=vector_index_config,
)
@staticmethod
def text2vec_voyageai(
name: str,
*,
model: Optional[Union[VoyageModel, str]] = None,
base_url: Optional[str] = None,
truncate: Optional[bool] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `text2vec-jinaai` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/voyageai/embeddings)
for detailed usage.
Args:
name: The name of the named vector.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
model: The model to use. Defaults to `None`, which uses the server-defined default.
See the
[documentation](https://weaviate.io/developers/weaviate/model-providers/voyageai/embeddings#available-models) for more details.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
truncate: Whether to truncate the input texts to fit within the context length. Defaults to `None`, which uses the server-defined default.
"""
return _NamedVectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecVoyageConfig(
model=model,
vectorizeClassName=vectorize_collection_name,
baseURL=base_url,
truncate=truncate,
dimensions=None,
),
vector_index_config=vector_index_config,
)
@staticmethod
def text2vec_weaviate(
name: str,
*,
model: Optional[Union[WeaviateModel, str]] = None,
base_url: Optional[str] = None,
dimensions: Optional[int] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
return _NamedVectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecWeaviateConfig(
model=model,
vectorizeClassName=vectorize_collection_name,
baseURL=base_url,
dimensions=dimensions,
),
vector_index_config=vector_index_config,
)
@staticmethod
def text2vec_nvidia(
name: str,
*,
model: Optional[str] = None,
base_url: Optional[str] = None,
truncate: Optional[bool] = None,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `text2vec-nvidia` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/nvidia/embeddings)
for detailed usage.
Args:
name: The name of the named vector.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
model: The model to use. Defaults to `None`, which uses the server-defined default.
See the
[documentation](https://weaviate.io/developers/weaviate/model-providers/nvidia/embeddings#available-models) for more details.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
truncate: Whether to truncate the input texts to fit within the context length. Defaults to `None`, which uses the server-defined default.
"""
return _NamedVectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecNvidiaConfig(
model=model,
vectorizeClassName=vectorize_collection_name,
baseURL=base_url,
truncate=truncate,
),
vector_index_config=vector_index_config,
)
@staticmethod
def text2vec_model2vec(
name: str,
*,
source_properties: Optional[List[str]] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
inference_url: Optional[str] = None,
) -> _NamedVectorConfigCreate:
"""Create a named vector using the `text2vec-model2vec` model.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/model2vec/embeddings)
for detailed usage.
Args:
name: The name of the named vector.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
vector_index_config: The configuration for Weaviate's vector index. Use wvc.config.Configure.VectorIndex to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
inference_url: The inference url to use where API requests should go. Defaults to `None`, which uses the server-defined default.
"""
return _NamedVectorConfigCreate(
name=name,
source_properties=source_properties,
vectorizer=_Text2VecModel2VecConfig(
vectorizeClassName=vectorize_collection_name,
inferenceUrl=inference_url,
),
vector_index_config=vector_index_config,
)
|
_NamedVectors
|
python
|
encode__httpx
|
httpx/_transports/asgi.py
|
{
"start": 1132,
"end": 1352
}
|
class ____(AsyncByteStream):
def __init__(self, body: list[bytes]) -> None:
self._body = body
async def __aiter__(self) -> typing.AsyncIterator[bytes]:
yield b"".join(self._body)
|
ASGIResponseStream
|
python
|
Pylons__pyramid
|
docs/tutorials/wiki2/src/tests/tests/test_views.py
|
{
"start": 3523,
"end": 4944
}
|
class ____:
def _callFUT(self, request):
from tutorial.views.default import edit_page
return edit_page(request)
def _makeContext(self, page):
from tutorial.routes import PageResource
return PageResource(page)
def _addRoutes(self, config):
config.add_route('edit_page', '/{pagename}/edit_page')
config.add_route('view_page', '/{pagename}')
def test_get(self, dummy_config, dummy_request, dbsession):
user = makeUser('foo', 'editor')
page = makePage('abc', 'hello', user)
dbsession.add_all([page, user])
self._addRoutes(dummy_config)
dummy_request.context = self._makeContext(page)
info = self._callFUT(dummy_request)
assert info['pagename'] == 'abc'
assert info['save_url'] == 'http://example.com/abc/edit_page'
def test_submit_works(self, dummy_config, dummy_request, dbsession):
user = makeUser('foo', 'editor')
page = makePage('abc', 'hello', user)
dbsession.add_all([page, user])
self._addRoutes(dummy_config)
dummy_request.method = 'POST'
dummy_request.POST['body'] = 'Hello yo!'
setUser(dummy_config, user)
dummy_request.context = self._makeContext(page)
response = self._callFUT(dummy_request)
assert response.location == 'http://example.com/abc'
assert page.data == 'Hello yo!'
|
Test_edit_page
|
python
|
getsentry__sentry
|
tests/sentry/notifications/api/endpoints/test_user_notification_settings_options.py
|
{
"start": 363,
"end": 500
}
|
class ____(APITestCase):
endpoint = "sentry-api-0-user-notification-options"
@control_silo_test
|
UserNotificationSettingsOptionsBaseTest
|
python
|
getsentry__sentry
|
src/sentry/preprod/analytics.py
|
{
"start": 1103,
"end": 1306
}
|
class ____(analytics.Event):
organization_id: int
project_id: int
user_id: int | None = None
@analytics.eventclass("preprod_artifact.api.admin_rerun_analysis")
|
PreprodArtifactApiListBuildsEvent
|
python
|
huggingface__transformers
|
tests/models/vivit/test_image_processing_vivit.py
|
{
"start": 3004,
"end": 10210
}
|
class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = VivitImageProcessor if is_vision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = VivitImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "do_center_crop"))
self.assertTrue(hasattr(image_processing, "size"))
def test_image_processor_from_dict_with_kwargs(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"shortest_edge": 18})
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18})
image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84)
self.assertEqual(image_processor.size, {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
def test_rescale(self):
# ViVit optionally rescales between -1 and 1 instead of the usual 0 and 1
image = np.arange(0, 256, 1, dtype=np.uint8).reshape(1, 8, 32)
image_processor = self.image_processing_class(**self.image_processor_dict)
rescaled_image = image_processor.rescale(image, scale=1 / 127.5)
expected_image = (image * (1 / 127.5)).astype(np.float32) - 1
self.assertTrue(np.allclose(rescaled_image, expected_image))
rescaled_image = image_processor.rescale(image, scale=1 / 255, offset=False)
expected_image = (image / 255.0).astype(np.float32)
self.assertTrue(np.allclose(rescaled_image, expected_image))
def test_call_pil(self):
# Initialize image_processing
image_processing = self.image_processing_class(**self.image_processor_dict)
# create random PIL videos
video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False)
for video in video_inputs:
self.assertIsInstance(video, list)
self.assertIsInstance(video[0], Image.Image)
# Test not batched input
encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values
expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]])
self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape))
# Test batched
encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values
expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos)
self.assertEqual(
tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape)
)
def test_call_numpy(self):
# Initialize image_processing
image_processing = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True)
for video in video_inputs:
self.assertIsInstance(video, list)
self.assertIsInstance(video[0], np.ndarray)
# Test not batched input
encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values
expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]])
self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape))
# Test batched
encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values
expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos)
self.assertEqual(
tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape)
)
def test_call_numpy_4_channels(self):
# Initialize image_processing
image_processing = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
self.image_processor_tester.num_channels = 4
video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True)
for video in video_inputs:
self.assertIsInstance(video, list)
self.assertIsInstance(video[0], np.ndarray)
# Test not batched input
encoded_videos = image_processing(
video_inputs[0],
return_tensors="pt",
image_mean=(0.0, 0.0, 0.0, 0.0),
image_std=(1.0, 1.0, 1.0, 1.0),
input_data_format="channels_first",
).pixel_values
expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]])
self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape))
# Test batched
encoded_videos = image_processing(
video_inputs,
return_tensors="pt",
image_mean=(0.0, 0.0, 0.0, 0.0),
image_std=(1.0, 1.0, 1.0, 1.0),
input_data_format="channels_first",
).pixel_values
expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos)
self.assertEqual(
tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape)
)
self.image_processor_tester.num_channels = 3
def test_call_pytorch(self):
# Initialize image_processing
image_processing = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, torchify=True)
for video in video_inputs:
self.assertIsInstance(video, list)
self.assertIsInstance(video[0], torch.Tensor)
# Test not batched input
encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values
expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]])
self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape))
# Test batched
encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values
expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos)
self.assertEqual(
tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape)
)
|
VivitImageProcessingTest
|
python
|
ApeWorX__ape
|
src/ape_ethereum/transactions.py
|
{
"start": 1252,
"end": 1591
}
|
class ____(IntEnum):
"""
An ``Enum`` class representing the status of a transaction.
"""
FAILING = 0
"""The transaction has failed or is in the process of failing."""
NO_ERROR = 1
"""
The transaction is successful and is confirmed or is in the process
of getting confirmed.
"""
|
TransactionStatusEnum
|
python
|
kamyu104__LeetCode-Solutions
|
Python/number-of-burgers-with-no-waste-of-ingredients.py
|
{
"start": 29,
"end": 742
}
|
class ____(object):
def numOfBurgers(self, tomatoSlices, cheeseSlices):
"""
:type tomatoSlices: int
:type cheeseSlices: int
:rtype: List[int]
"""
# let the number of jumbo burger be x, the number of small burger be y:
# 4x + 2y = t
# x + y = c
# =>
# x = t/2-c
# y = 2c-t/2
# since x, y are natural numbers
# => t/2 is integer, t/2-c >= 0, 2c-t/2 >= 0
# => t%2 == 0, 2c <= t <= 4c
return [tomatoSlices//2-cheeseSlices, 2*cheeseSlices - tomatoSlices//2] \
if tomatoSlices%2 == 0 and 2*cheeseSlices <= tomatoSlices <= 4*cheeseSlices \
else []
|
Solution
|
python
|
joke2k__faker
|
faker/providers/job/pt_BR/__init__.py
|
{
"start": 130,
"end": 20212
}
|
class ____(BaseProvider):
jobs = [
"Acompanhante",
"Açougueiro",
"Acupunturista",
"Adestrador de animais",
"Administrador",
"Administrador de banco de dados DBA",
"Administrador de redes",
"Administrador público",
"Advogado",
"Aeromoça",
"Aeronauta",
"Aeroviário",
"Afiador de ferramentas",
"Agente comunitário de saúde",
"Agente de combate à endemias",
"Agente de defesa sanitária",
"Agente de defesa sanitária animal",
"Agente de viagens",
"Agente funerário",
"Agente penitenciário",
"Agricultor",
"Agrimensor",
"Agrônomo",
"Ajudante de produção",
"Alergologista",
"Alfaiate",
"Almirante",
"Almoxarife",
"Alpinista",
"Ambientalista",
"Ambulante",
"Amolador de ferramentas",
"Analista de sistemas",
"Anestesiologista",
"Angiologista",
"Antropólogo",
"Apicultor",
"Apontador de mão-de-obra",
"Apresentador",
"Árbitro e mediador",
"Argumentista",
"Armador",
"Armeiro",
"Arqueólogo",
"Arquiteto",
"Arquivista",
"Arranjador musical",
"Arrumadeira",
"Artesão",
"Artista de circo",
"Artista plástico",
"Artista/Técnico em espetáculos de diversões",
"Ascensorista",
"Assessor de imprensa",
"Assessor parlamentar",
"Assistente administrativo",
"Assistente de câmera",
"Assistente de direção",
"Assistente de produção",
"Assistente social",
"Astrofísico",
"Astrólogo",
"Astronauta",
"Astrônomo",
"Atendente",
"Atleta de arremesso de peso",
"Atleta de canoagem",
"Atleta de nado sincronizado",
"Atleta de tiro com arco",
"Ator",
"Atuário",
"Auditor",
"Auxiliar administrativo",
"Auxiliar de reprografia",
"Auxiliar de serviços gerais",
"Avalista",
"Aviador",
"Babá",
"Babysitter",
"Bailarina",
"Baixista",
"Balconista",
"Bancário",
"Barbeiro",
"Barman",
"Bartender",
"Baterista",
"Bedel",
"Berçarista",
"Bibliotecário",
"Biblioteconomista",
"Biólogo",
"Biomédico",
"Bioquímico",
"Biotecnólogo",
"Bóia-fria",
"Bombeiro",
"Borracheiro",
"Boticário",
"Boxeador",
"Brigadeiro",
"Broker/Corretor da bolsa de valores",
"Cabeleireiro",
"Cabo",
"Caça-talentos/Olheiro",
"Cadeirinha",
"Cadista",
"Caixa",
"Caldeireiro",
"Cambista",
"Camelô",
"Cameraman",
"Caminhoneiro",
"Cancerologista ou Oncologista",
"Cantor",
"Capataz",
"Capelão",
"Capitão",
"Capoeirista",
"Cardiologista",
"Carnavalesco",
"Carpinteiro",
"Cartazeiro",
"Carteiro",
"Cartógrafo",
"Cartunista",
"Catador de carangueijos",
"Catador de material reciclável",
"Cenógrafo",
"Cenotécnico",
"Ceramista",
"Cerimonialista",
"Chapeiro",
"Chargista",
"Chaveiro",
"Chefe de cozinha",
"Ciclista",
"Cientista",
"Cientista da informação e documentação",
"Cientista de alimentos",
"Cientista político",
"Cientista social",
"Cineasta",
"Cinegrafista",
"Cinematográfo",
"Cirurgião bucal",
"Cirurgião dentista",
"Clap loader",
"Clarinetista",
"Classificador contábil",
"Clínico geral",
"Co-piloto",
"Coach",
"Cobaia Médica",
"Cobrador de ônibus",
"Cobrador de pedágio",
"Coloproctologista",
"Comandante",
"Comerciante",
"Comissário de bordo",
"Compositor",
"Comprador",
"Confeiteiro",
"Conferente de carga e descarga",
"Conferente de expedição",
"Conferente de recebimento",
"Construtor",
"Consultor",
"Consultor de moda",
"Consultor de radiestesia",
"Contábil",
"Contabilista",
"Contador",
"Contatólogo",
"Continuista",
"Contra regra",
"Contramestre em transporte marítimo",
"Controlador de vôo",
"Controller",
"Coordenador",
"Copeiro",
"Coreógrafo",
"Coronel",
"Corredor de atletismo",
"Corregedor de justiça",
"Corretor da bolsa de valores",
"Corretor de imóveis",
"Corretor de seguros",
"Cortador de cana-de-açucar",
"Costureira",
"Coveiro/Sepultador",
"Cozinheira",
"Crítico",
"Cumim",
"Dançarino",
"Datilógrafo",
"Dedetizador",
"Defensor Público",
"Degustador",
"Delegado",
"Dentista",
"Deputado",
"Dermatologista",
"Desembargador de justiça",
"Desenhista",
"Designer de interiores",
"Designer de jóia",
"Designer de moda",
"Designer de produto ou desenhista industrial",
"Designer gráfico",
"Despachante",
"Diagramador",
"Dialoguista",
"Diarista",
"Digitador",
"Diplomata",
"Diretor de cinema",
"Diretor de fotografia",
"Diretor de produção",
"DJ",
"Dogueiro",
"Dublador",
"Dublê",
"Ecólogo",
"Economista",
"Economista doméstico",
"Editor",
"Editor de mesa de corte",
"Educador",
"Educador integrado à saúde pública",
"Eletricista",
"Eletricista de automóveis",
"Embaixador",
"Embalador",
"Embalsamador",
"Empacotador",
"Empregado doméstico",
"Empresário",
"Encanador",
"Encarregado de manutenção predial",
"Endocrinologista",
"Endodontista",
"Enfermeiro",
"Engenheiro acústico",
"Engenheiro aeronáutico",
"Engenheiro agrícola",
"Engenheiro agrimensor",
"Engenheiro agrônomo",
"Engenheiro ambiental",
"Engenheiro cartográfico",
"Engenheiro civil",
"Engenheiro de alimentos",
"Engenheiro de aquicultura",
"Engenheiro de computação",
"Engenheiro de controle e automação",
"Engenheiro de energia",
"Engenheiro de ergonomia",
"Engenheiro de horticultura",
"Engenheiro de iluminação",
"Engenheiro de manufatura",
"Engenheiro de materiais",
"Engenheiro de minas",
"Engenheiro de petróleo",
"Engenheiro de processos",
"Engenheiro de produção agroindustrial",
"Engenheiro de produto ou produção",
"Engenheiro de projetos",
"Engenheiro de segurança do trabalho",
"Engenheiro de som",
"Engenheiro de supply chain ou logística",
"Engenheiro de telecomunicações",
"Engenheiro de transportes",
"Engenheiro elétrico",
"Engenheiro físico",
"Engenheiro florestal",
"Engenheiro industrial",
"Engenheiro mecânico",
"Engenheiro mecatrônico",
"Engenheiro metalúrgico",
"Engenheiro naval",
"Engenheiro petroquímico",
"Engenheiro químico",
"Engenheiro sanitarista",
"Engenheiro têxtil",
"Engraxate",
"Enólogo",
"Entalhador",
"Epidemiólogo",
"Escoteiro",
"Escritor",
"Escriturário",
"Escrivão",
"Escultor",
"Esgrimista",
"Especialista em agronegócios",
"Espeleologista",
"Estampador de tecidos",
"Estatístico",
"Esteticista",
"Estilista",
"Estivador",
"Estofador",
"Estoquista",
"Farmacêutico",
"Faturista",
"Faxineiro",
"Feirante",
"Ferramenteiro",
"Ferreiro",
"Ferroviário",
"Figurante",
"Figurinista",
"Filósofo",
"Fiscal",
"Físico",
"Físico nuclear",
"Fisiculturista",
"Fisioterapeuta",
"Flanelinha",
"Flautista",
"Florista",
"Fonoaudiólogo",
"Forneiro",
"Fotógrafo",
"Frentista",
"Fresador",
"Fundidor",
"Fundidor de placa de gesso",
"Funileiro",
"Gagsman",
"Gandula",
"Garçom",
"Gari",
"Garimpeiro",
"Gastroenterologista",
"Gastrônomo",
"General",
"Geofísico",
"Geógrafo",
"Geólogo",
"Geradorista",
"Gerente de banco",
"Gerente de inovações ou novos negócios",
"Gerente de riscos em seguros",
"Gerente de vendas",
"Geriatra",
"Gestor ambiental",
"Gestor de qualidade",
"Gestor de recursos humanos",
"Gestor de tecnologia da informação",
"Gestor público",
"Ginasta artística",
"Ginasta rítmica",
"Ginecologista",
"Gourmet",
"Governador",
"Governanta",
"Grafologista",
"Gravurista",
"Guarda ou policial rodoviário",
"Guarda roupeiro",
"Guardador de veículos",
"Guia turistico",
"Guincheiro",
"Guitarrista",
"Harpista",
"Headhunter",
"Hematologista",
"Historiador",
"Homeopata",
"Hostess",
"Ilustrador",
"Implantodontista",
"Impressor",
"Imunologista",
"Infectologista",
"Inspetor",
"Instalador de linha telefônica",
"Instalador de painéis",
"Instrumentador cirúrgico",
"Instrumentista musical",
"Instrutor",
"Intérprete",
"Intérprete de Bíblias",
"Intérprete e tradutor de língua de sinais",
"Investigador de Polícia",
"Investigador particular",
"Jangadeiro",
"Jardineiro",
"Jogador de badminton",
"Jogador de basquete",
"Jogador de bocha",
"Jogador de boliche",
"Jogador de futebol",
"Jogador de golfe",
"Jogador de handebol",
"Jogador de hóquei",
"Jogador de tênis de mesa",
"Jogador de vôlei",
"Jóquei",
"Jornaleiro",
"Jornalista",
"Judoca",
"Juiz de direito",
"Juiz de futebol",
"Juiz ou árbitro de futebol",
"Karateca",
"Kite-surfer",
"Laboratorista",
"Lactarista hospitalar",
"Lamboteiro",
"Lancheiro",
"Lanterneiro",
"Lapidário",
"Lavador",
"Lavador de veículos",
"Leão de chácara",
"Leiloeiro",
"Leiteiro",
"Lenhador",
"Letrista",
"Levantador de peso",
"Líder comunitário",
"Limpador de vidros",
"Lixeiro/Coletor de lixo",
"Locutor",
"Lubrificador de máquinas",
"Lutador de jiu-jitsu",
"Lutador de karatê",
"Lutador de kung fu",
"Lutador de luta livre",
"Lutador de taekwondo",
"Luthier",
"Mãe social",
"Maestro",
"Mágico",
"Maitre",
"Major",
"Manicure",
"Manobrista",
"Maquiador",
"Maquinista",
"Marcador de luz",
"Marceneiro",
"Marechal",
"Marinheiro",
"Marketeiro",
"Massagista",
"Massoterapeuta",
"Matemático",
"Mecânico",
"Mecânico de vôo",
"Mecanógrafo",
"Médico",
"Médico cirurgião",
"Médico geneticista",
"Médico legista",
"Médico nuclear",
"Meeiro",
"Mensageiro",
"Meredeira",
"Mergulhador",
"Mestre cervejeiro",
"Mestre-de-obras",
"Metalúrgico",
"Meteorologista",
"Microfonista",
"Militar da Aeronáutica",
"Militar da Marinha",
"Militar do Exército",
"Ministro",
"Modelista",
"Modelo",
"Moldador",
"Moldureiro",
"Moleiro",
"Montador",
"Montador de negativos",
"Motofrete",
"Motorista",
"Mototaxista",
"Museólogo",
"Músico",
"Musicoterapeuta",
"Nadador",
"Naturólogo",
"Navegador",
"Necromaquiador",
"Nefrologista",
"Neonatologista",
"Neurocirurgião",
"Neurologista",
"Notário",
"Numerólogo",
"Nutricionista",
"Nutrologista",
"Obstetra",
"Oceanógrafo",
"Oculista",
"Odontologista estético",
"Odontologista legal",
"Odontologista preventivo e social",
"Odontopediatra",
"Office-boy",
"Oficial de justiça",
"Oftalmologista",
"Ombudsman",
"Operador de bombas",
"Operador de telemarketing",
"Operador de vídeo",
"Optometrista",
"Orçamentista",
"Orientador educacional",
"Ortesista",
"Ortodontista",
"Ortopedista",
"Ortoptista",
"Otorrinolaringologista",
"Ourives",
"Paginador",
"Paisagista",
"Panfleteiro",
"Panificador/Padeiro",
"Paparazzo",
"Papiloscopista",
"Pára-quedista",
"Paramédico",
"Parteira tradicional",
"Passador",
"Pastilheiro",
"Patinador",
"Patologista",
"Patologista oral",
"Peão de rodeiro",
"Pecuarista",
"Pedagogo",
"Pediatra",
"Pedicure",
"Pedreiro",
"Peixeiro",
"Penhorista",
"Percursionista",
"Perfumista",
"Perfusionista",
"Perito criminal",
"Perito judicial",
"Personal stylist",
"Personal trainer",
"Pescador",
"Pesquisador",
"Petroleiro",
"Pianista",
"Piloto automobilístico",
"Piloto de avião",
"Pintor",
"Pizzaiolo",
"Plastimodelista",
"Pneumologista",
"Podólogo",
"Policial civil",
"Policial federal",
"Policial militar",
"Polidor de produção",
"Político",
"Porteiro",
"Portuário",
"Prático",
"Prefeito",
"Prensista",
"Preparador de máquinas",
"Presidente da República",
"Procurador de justiça",
"Produtor de audio visual",
"Produtor de eventos",
"Produtor de multimídia",
"Produtor editorial",
"Produtor fonográfico",
"Produtor musical",
"Professor",
"Profissional de áudio",
"Profissional de cinema",
"Profissional de comércio exterior",
"Profissional de educação física",
"Profissional de efeitos especiais",
"Profissional de hotelaria",
"Profissional de informática",
"Profissional de linguística",
"Profissional de logística",
"Profissional de manutenção industrial",
"Profissional de marketing",
"Profissional de rádio e tv",
"Profissional de reciclagem",
"Profissional de recursos humanos",
"Profissional de relacionamento com investidores RI",
"Profissional de relações internacionais",
"Profissional de relações públicas",
"Profissional de tecnologia de laticínios",
"Programador",
"Projetista mecânico",
"Promotor de eventos",
"Promotor de vendas",
"Promotor público/de justiça",
"Protesista",
"Protético dentário",
"Psicólogo",
"Psicomotricista",
"Psicopedagogo",
"Psiquiatra",
"Publicitário",
"Quadrinista",
"Químico",
"Químico farmacêutico",
"Quiropraxista",
"Quitandeiro",
"Radialista",
"Radialista programador",
"Radiologista",
"Radiooperador de vôo",
"Radioterapêutico",
"Rebarbador de metal",
"Recepcionista",
"Recreador",
"Redator",
"Regente",
"Relações públicas",
"Remador",
"Repórter",
"Repositor",
"Representante comercial",
"Restaurador",
"Retificador",
"Reumatologista",
"Revendedor",
"Revisor",
"Roteirista",
"Sacoleira",
"Salgadeira",
"Salva-vidas",
"Sapateiro",
"Sargento",
"Saxofonista",
"Secretária",
"Seguidor de compras",
"Segurança particular",
"Selecionador de pessoal",
"Senador",
"Separador",
"Seringueiro",
"Serralheiro",
"Servente-de-obras",
"Serventuário",
"Sexólogo",
"Síndico",
"Skatista",
"Sociólogo",
"Soldado",
"Soldador",
"Somelier",
"Sonoplasta",
"Subprefeito",
"Supervisor",
"Surfista",
"Sushiman",
"Tabelião",
"Taifeiro",
"Tapeceiro",
"Tatuador",
"Taxidermista/Embalsamador",
"Taxista",
"Tecelão",
"Técnico de gesso",
"Técnico de som",
"Técnico em agropecuária",
"Técnico em arquivo",
"Técnico em aviação",
"Técnico em desporto",
"Técnico em documentação",
"Técnico em edificações",
"Técnico em hardware",
"Técnico em higiene dentária",
"Técnico em óptica",
"Técnico em radiologia",
"Técnico em rede",
"Técnico em segurança do trabalho",
"Técnico em taquigrafia",
"Técnico em tratamento de água",
"Técnico tributarista",
"Tecnólogo em automação industrial",
"Tecnólogo em Ciências das plantas medicinais",
"Tecnólogo em desenvolvimento social",
"Tecnólogo em esporte e lazer",
"Tecnólogo em geoprocessamento",
"Tecnólogo em irrigação e drenagem",
"Tecnólogo em jogos digitais",
"Tecnólogo em navegação fluvial",
"Tecnólogo em negócios imobiliários",
"Tecnólogo em papel e celulose",
"Tecnólogo em processos químicos",
"Tecnólogo em produção de bebidas",
"Tecnólogo em produção moveleira",
"Tecnólogo em produção Sucroalcooleira",
"Tecnólogo em recursos pesqueiros",
"Tecnólogo em rochas ornamentais",
"Tecnólogo em silvicultura",
"Tecnólogo em tecnologia da madeira",
"Telefonista",
"Telegrafista",
"Tenente",
"Tenista",
"Teólogo",
"Terapeuta floral",
"Terapeuta Holístico",
"Terapeuta ocupacional",
"Tesoureiro",
"Timoneiro",
"Tintureiro",
"Topógrafo",
"Torneiro mecânico",
"Torreiro/Torrista",
"Tosador",
"Toxicologista",
"Tradutor",
"Transcritor",
"Transportador",
"Traumatologista",
"Treinador",
"Triatleta",
"Trilheiro ou músico de cinema",
"Trompetista",
"Turismólogo",
"Ufólogo",
"Urbanista",
"Urologista",
"Velejador",
"Vendedor",
"Ventríloquo",
"Vereador",
"Veterinário",
"Vigia parlamentar",
"Vigilante noturno/diurno",
"Violonista",
"Vistoriador de sinistros",
"Viveirista",
"Webdesigner",
"Webmaster",
"Windsurfer",
"Xilógrafo",
"Zelador",
"Zootecnista",
]
|
Provider
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_iana_timezone.py
|
{
"start": 761,
"end": 1810
}
|
class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.iana_timezone"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_timezone(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
@column_condition_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column, **kwargs):
tz_udf = F.udf(is_valid_timezone, pyspark.types.BooleanType())
return tz_udf(column)
# This class defines the Expectation itself
|
ColumnValuesIanaTimezone
|
python
|
spack__spack
|
lib/spack/spack/vendor/macholib/mach_o.py
|
{
"start": 13047,
"end": 14639
}
|
class ____(Structure):
_fields_ = (("cmd", p_uint32), ("cmdsize", p_uint32))
def get_cmd_name(self):
return LC_NAMES.get(self.cmd, self.cmd)
LC_REQ_DYLD = 0x80000000
(
LC_SEGMENT,
LC_SYMTAB,
LC_SYMSEG,
LC_THREAD,
LC_UNIXTHREAD,
LC_LOADFVMLIB,
LC_IDFVMLIB,
LC_IDENT,
LC_FVMFILE,
LC_PREPAGE,
LC_DYSYMTAB,
LC_LOAD_DYLIB,
LC_ID_DYLIB,
LC_LOAD_DYLINKER,
LC_ID_DYLINKER,
LC_PREBOUND_DYLIB,
LC_ROUTINES,
LC_SUB_FRAMEWORK,
LC_SUB_UMBRELLA,
LC_SUB_CLIENT,
LC_SUB_LIBRARY,
LC_TWOLEVEL_HINTS,
LC_PREBIND_CKSUM,
) = range(0x1, 0x18)
LC_LOAD_WEAK_DYLIB = LC_REQ_DYLD | 0x18
LC_SEGMENT_64 = 0x19
LC_ROUTINES_64 = 0x1A
LC_UUID = 0x1B
LC_RPATH = 0x1C | LC_REQ_DYLD
LC_CODE_SIGNATURE = 0x1D
LC_CODE_SEGMENT_SPLIT_INFO = 0x1E
LC_REEXPORT_DYLIB = 0x1F | LC_REQ_DYLD
LC_LAZY_LOAD_DYLIB = 0x20
LC_ENCRYPTION_INFO = 0x21
LC_DYLD_INFO = 0x22
LC_DYLD_INFO_ONLY = 0x22 | LC_REQ_DYLD
LC_LOAD_UPWARD_DYLIB = 0x23 | LC_REQ_DYLD
LC_VERSION_MIN_MACOSX = 0x24
LC_VERSION_MIN_IPHONEOS = 0x25
LC_FUNCTION_STARTS = 0x26
LC_DYLD_ENVIRONMENT = 0x27
LC_MAIN = 0x28 | LC_REQ_DYLD
LC_DATA_IN_CODE = 0x29
LC_SOURCE_VERSION = 0x2A
LC_DYLIB_CODE_SIGN_DRS = 0x2B
LC_ENCRYPTION_INFO_64 = 0x2C
LC_LINKER_OPTION = 0x2D
LC_LINKER_OPTIMIZATION_HINT = 0x2E
LC_VERSION_MIN_TVOS = 0x2F
LC_VERSION_MIN_WATCHOS = 0x30
LC_NOTE = 0x31
LC_BUILD_VERSION = 0x32
LC_DYLD_EXPORTS_TRIE = 0x33 | LC_REQ_DYLD
LC_DYLD_CHAINED_FIXUPS = 0x34 | LC_REQ_DYLD
LC_FILESET_ENTRY = 0x35 | LC_REQ_DYLD
# this is really a union.. but whatever
|
load_command
|
python
|
great-expectations__great_expectations
|
great_expectations/types/connect_args.py
|
{
"start": 41,
"end": 168
}
|
class ____(TypedDict, total=False):
"""Type definition for connection arguments."""
private_key: Optional[str]
|
ConnectArgs
|
python
|
fluentpython__example-code
|
10-seq-hacking/vector_v3.py
|
{
"start": 3146,
"end": 5542
}
|
class ____:
typecode = 'd'
def __init__(self, components):
self._components = array(self.typecode, components)
def __iter__(self):
return iter(self._components)
def __repr__(self):
components = reprlib.repr(self._components)
components = components[components.find('['):-1]
return 'Vector({})'.format(components)
def __str__(self):
return str(tuple(self))
def __bytes__(self):
return (bytes([ord(self.typecode)]) +
bytes(self._components))
def __eq__(self, other):
return tuple(self) == tuple(other)
def __abs__(self):
return math.sqrt(sum(x * x for x in self))
def __bool__(self):
return bool(abs(self))
def __len__(self):
return len(self._components)
def __getitem__(self, index):
cls = type(self)
if isinstance(index, slice):
return cls(self._components[index])
elif isinstance(index, numbers.Integral):
return self._components[index]
else:
msg = '{.__name__} indices must be integers'
raise TypeError(msg.format(cls))
# BEGIN VECTOR_V3_GETATTR
shortcut_names = 'xyzt'
def __getattr__(self, name):
cls = type(self) # <1>
if len(name) == 1: # <2>
pos = cls.shortcut_names.find(name) # <3>
if 0 <= pos < len(self._components): # <4>
return self._components[pos]
msg = '{.__name__!r} object has no attribute {!r}' # <5>
raise AttributeError(msg.format(cls, name))
# END VECTOR_V3_GETATTR
# BEGIN VECTOR_V3_SETATTR
def __setattr__(self, name, value):
cls = type(self)
if len(name) == 1: # <1>
if name in cls.shortcut_names: # <2>
error = 'readonly attribute {attr_name!r}'
elif name.islower(): # <3>
error = "can't set attributes 'a' to 'z' in {cls_name!r}"
else:
error = '' # <4>
if error: # <5>
msg = error.format(cls_name=cls.__name__, attr_name=name)
raise AttributeError(msg)
super().__setattr__(name, value) # <6>
# END VECTOR_V3_SETATTR
@classmethod
def frombytes(cls, octets):
typecode = chr(octets[0])
memv = memoryview(octets[1:]).cast(typecode)
return cls(memv)
|
Vector
|
python
|
walkccc__LeetCode
|
solutions/1101. The Earliest Moment When Everyone Become Friends/1101.py
|
{
"start": 0,
"end": 609
}
|
class ____:
def __init__(self, n: int):
self.count = n
self.id = list(range(n))
self.rank = [0] * n
def unionByRank(self, u: int, v: int) -> None:
i = self._find(u)
j = self._find(v)
if i == j:
return
if self.rank[i] < self.rank[j]:
self.id[i] = j
elif self.rank[i] > self.rank[j]:
self.id[j] = i
else:
self.id[i] = j
self.rank[j] += 1
self.count -= 1
def getCount(self) -> int:
return self.count
def _find(self, u: int) -> int:
if self.id[u] != u:
self.id[u] = self._find(self.id[u])
return self.id[u]
|
UnionFind
|
python
|
spyder-ide__spyder
|
spyder/plugins/application/widgets/about.py
|
{
"start": 1059,
"end": 13189
}
|
class ____(QDialog, SvgToScaledPixmap):
PADDING = 5 if MAC else 15
def __init__(self, parent):
"""Create About Spyder dialog with general information."""
QDialog.__init__(self, parent)
self.setWindowFlags(
self.windowFlags() & ~Qt.WindowContextHelpButtonHint
)
self.setWindowTitle(_("About Spyder"))
self.setWindowIcon(ima.icon("MessageBoxInformation"))
versions = get_versions()
# -- Show Git revision for development version
revlink = ''
if versions['revision']:
rev = versions['revision']
revlink = ("<a href='https://github.com/spyder-ide/spyder/"
"commit/%s'>%s</a>" % (rev, rev))
# -- Style attributes
font_family = self.font().family()
font_size = DialogStyle.ContentFontSize
# -- Labels
twitter_url = "https://twitter.com/Spyder_IDE",
facebook_url = "https://www.facebook.com/SpyderIDE",
youtube_url = "https://www.youtube.com/Spyder-IDE",
instagram_url = "https://www.instagram.com/spyderide/",
self.label_overview = QLabel(
f"""
<style>
p, h1 {{margin-bottom: 2em}}
h1 {{margin-top: 0}}
</style>
<div style='font-family: "{font_family}";
font-size: {font_size};
font-weight: normal;
'>
<br>
<h1>Spyder IDE</h1>
<p>
The Scientific Python Development Environment
<br>
<a href="{website_url}">Spyder-IDE.org</a>
</p>
<p>
Python {versions['python']} {versions['bitness']}-bit |
Qt {versions['qt']} |
{versions['qt_api']} {versions['qt_api_ver']}
<br>
{versions['system']} {versions['release']} ({versions['machine']})
</p>
<p>
<a href="{project_url}">GitHub</a> | <a href="{twitter_url}">
Twitter</a> |
<a href="{facebook_url}">Facebook</a> | <a href="{youtube_url}">
YouTube</a> |
<a href="{instagram_url}">Instagram</a>
</p>
</div>"""
)
self.label_community = QLabel(
f"""
<div style='font-family: "{font_family}";
font-size: {font_size};
font-weight: normal;
'>
<br>
Created by Pierre Raybaut; current maintainer is Carlos Cordoba.
Developed by the
<a href="{project_url}/graphs/contributors">international
Spyder community</a>. Many thanks to all the Spyder beta testers
and dedicated users.
<p>For help with Spyder errors and crashes, please read our
<a href="{trouble_url}">Troubleshooting Guide</a>, and for bug
reports and feature requests, visit our
<a href="{project_url}">Github site</a>. For project discussion,
see our <a href="{forum_url}">Google Group</a>.
</p>
<p>
This project is part of a larger effort to promote and
facilitate the use of Python for scientific and engineering
software development.
The popular Python distributions
<a href="https://www.anaconda.com/download/">Anaconda</a> and
<a href="https://winpython.github.io/">WinPython</a>
also contribute to this plan.
</p>
</div>""")
self.label_legal = QLabel(
f"""
<div style='font-family: "{font_family}";
font-size: {font_size};
font-weight: normal;
'>
<br>
Copyright © 2009-2020 Spyder Project Contributors and
<a href="{project_url}/blob/master/AUTHORS.txt">others</a>.
Distributed under the terms of the
<a href="{project_url}/blob/master/LICENSE.txt">MIT License</a>.
<p>
<p>Certain source files under other compatible permissive
licenses and/or originally by other authors.
Spyder 3 theme icons derived from
<a href="https://fontawesome.com/">Font Awesome</a> 4.7
(© 2016 David Gandy; SIL OFL 1.1) and
<a href="http://materialdesignicons.com/">Material Design</a>
(© 2014 Austin Andrews; SIL OFL 1.1).
</p>
<p>
Splash screen photo by
<a href="https://unsplash.com/@benchaccounting?utm_source=
unsplash&utm_medium=referral&utm_content=creditCopyText">Bench
Accounting</a> on <a href="https://unsplash.com/?utm_source=
unsplash&utm_medium=referral&utm_content=creditCopyText">Unsplash
</a>.
</p>
<p>
See the
<a href="{project_url}/blob/master/NOTICE.txt">NOTICE</a>
file for full legal information.
</p>
</div>
""")
for label in [self.label_overview, self.label_community,
self.label_legal]:
label.setWordWrap(True)
label.setAlignment(Qt.AlignTop)
label.setOpenExternalLinks(True)
label.setTextInteractionFlags(Qt.TextBrowserInteraction)
label.setContentsMargins(
(3 if MAC else 1) * self.PADDING,
0,
(3 if MAC else 1) * self.PADDING,
(3 if MAC else 1) * self.PADDING,
)
self.label_pic = QLabel(self)
self.label_pic.setPixmap(
self.svg_to_scaled_pixmap("spyder_about", rescale=0.45)
)
self.label_pic.setAlignment(Qt.AlignBottom)
self.info = QLabel(
f"""
<div style='font-family: "{font_family}";
font-size: {font_size};
font-weight: normal;
'>
{versions['spyder']}
<br>{revlink}
<br>({versions['installer']})
<br>
"""
)
self.info.setAlignment(Qt.AlignHCenter)
# -- Scroll areas
scroll_overview = QScrollArea(self)
scroll_overview.setWidgetResizable(True)
scroll_overview.setWidget(self.label_overview)
scroll_community = QScrollArea(self)
scroll_community.setWidgetResizable(True)
scroll_community.setWidget(self.label_community)
scroll_legal = QScrollArea(self)
scroll_legal.setWidgetResizable(True)
scroll_legal.setWidget(self.label_legal)
# Style for scroll areas needs to be applied after creating them.
# Otherwise it doesn't have effect.
for scroll_area in [scroll_overview, scroll_community, scroll_legal]:
scroll_area.setStyleSheet(self._scrollarea_stylesheet)
# -- Tabs
self.tabs = QTabWidget(self)
self.tabs.addTab(scroll_overview, _('Overview'))
self.tabs.addTab(scroll_community, _('Community'))
self.tabs.addTab(scroll_legal, _('Legal'))
self.tabs.setElideMode(Qt.ElideNone)
self.tabs.setStyleSheet(self._tabs_stylesheet)
# -- Buttons
bbox = SpyderDialogButtonBox(QDialogButtonBox.Ok)
info_btn = QPushButton(_("Copy version info"))
bbox.addButton(info_btn, QDialogButtonBox.ActionRole)
# Apply style to buttons
bbox.setStyleSheet(self._button_stylesheet)
# -- Widget setup
self.setWindowIcon(ima.icon('MessageBoxInformation'))
self.setModal(False)
# -- Layout
piclayout = QVBoxLayout()
piclayout.addStretch()
piclayout.addWidget(self.label_pic)
piclayout.addSpacing(-5)
piclayout.addWidget(self.info)
piclayout.addStretch()
piclayout.setContentsMargins(
# This makes the left and right margins around the image and info
# to be the same on Linux and Windows.
self.PADDING - (0 if MAC else 1) * AppStyle.MarginSize,
0,
self.PADDING,
0
)
tabslayout = QHBoxLayout()
tabslayout.addWidget(self.tabs)
tabslayout.setSizeConstraint(QLayout.SizeConstraint.SetFixedSize)
tabslayout.setContentsMargins(0, self.PADDING, 0, 0)
btmhlayout = QHBoxLayout()
btmhlayout.addStretch(1)
btmhlayout.addWidget(bbox)
btmhlayout.setContentsMargins(0, 0, self.PADDING, self.PADDING)
btmhlayout.addStretch()
vlayout = QVBoxLayout()
vlayout.addLayout(tabslayout)
vlayout.addLayout(btmhlayout)
vlayout.setSizeConstraint(QLayout.SizeConstraint.SetFixedSize)
mainlayout = QHBoxLayout(self)
mainlayout.addLayout(piclayout)
# This compensates the margin set for scroll areas to center them on
# the tabbar
mainlayout.addSpacing(-self.PADDING)
mainlayout.addLayout(vlayout)
# -- Signals
info_btn.clicked.connect(self.copy_to_clipboard)
bbox.accepted.connect(self.accept)
# -- Style
size = (600, 460) if MAC else ((585, 450) if WIN else (610, 455))
self.setFixedSize(*size)
self.setStyleSheet(self._main_stylesheet)
def copy_to_clipboard(self):
QApplication.clipboard().setText(get_versions_text())
@property
def _main_stylesheet(self):
tabs_stylesheet = PREFERENCES_TABBAR_STYLESHEET.get_copy()
css = tabs_stylesheet.get_stylesheet()
# Set background color
for widget in ["QDialog", "QLabel"]:
css[widget].setValues(
backgroundColor=DialogStyle.BackgroundColor
)
return css.toString()
@property
def _scrollarea_stylesheet(self):
css = qstylizer.style.StyleSheet()
# This is the only way to make the scroll areas to have the same
# background color as the other widgets in the dialog.
css.setValues(
backgroundColor=DialogStyle.BackgroundColor
)
css.QScrollArea.setValues(
# Default border color doesn't have enough contrast with the
# background.
border=f"1px solid {DialogStyle.BorderColor}",
# This is necessary to center the tabbar on the scroll area
marginLeft=f"{self.PADDING}px"
)
css.QScrollBar.setValues(
# Default border color doesn't have enough contrast with the
# background.
border=f"1px solid {DialogStyle.BorderColor}",
)
return css.toString()
@property
def _button_stylesheet(self):
css = qstylizer.style.StyleSheet()
# Increase font size and padding
css.QPushButton.setValues(
fontSize=DialogStyle.ButtonsFontSize,
padding=DialogStyle.ButtonsPadding
)
return css.toString()
@property
def _tabs_stylesheet(self):
css = qstylizer.style.StyleSheet()
# This fixes a visual glitch with the tabbar background color
css.setValues(
backgroundColor=DialogStyle.BackgroundColor
)
css['QTabWidget::pane'].setValues(
# Set tab pane margins according to the dialog contents and layout
marginTop=f"{(3 if MAC else 2) * AppStyle.MarginSize}px",
marginRight=f"{self.PADDING}px",
marginBottom=f"{(0 if MAC else 2) * AppStyle.MarginSize}px",
marginLeft="0px",
# Padding is not necessary in this case because we set a border for
# the scroll areas.
padding="0px",
)
return css.toString()
def test():
"""Run about widget test"""
from spyder.utils.qthelpers import qapplication
app = qapplication() # noqa
abt = AboutDialog(None)
abt.show()
sys.exit(abt.exec_())
if __name__ == '__main__':
test()
|
AboutDialog
|
python
|
PrefectHQ__prefect
|
tests/utilities/test_templating.py
|
{
"start": 413,
"end": 4846
}
|
class ____:
def test_empty_template(self):
template = ""
placeholders = find_placeholders(template)
assert len(placeholders) == 0
def test_single_placeholder(self):
template = "Hello {{name}}!"
placeholders = find_placeholders(template)
assert len(placeholders) == 1
assert placeholders.pop().name == "name"
def test_multiple_placeholders(self):
template = "Hello {{first_name}} {{last_name}}!"
placeholders = find_placeholders(template)
assert len(placeholders) == 2
names = set(p.name for p in placeholders)
assert names == {"first_name", "last_name"}
def test_nested_placeholders(self):
template = {"greeting": "Hello {{name}}!", "message": "{{greeting}}"}
placeholders = find_placeholders(template)
assert len(placeholders) == 2
names = set(p.name for p in placeholders)
assert names == {"name", "greeting"}
def test_mixed_template(self):
template = "Hello {{name}}! Your balance is ${{balance}}."
placeholders = find_placeholders(template)
assert len(placeholders) == 2
names = set(p.name for p in placeholders)
assert names == {"name", "balance"}
def test_invalid_template(self):
template = ("{{name}}!",)
with pytest.raises(ValueError):
find_placeholders(template)
def test_nested_templates(self):
template = {"greeting": "Hello {{name}}!", "message": {"text": "{{greeting}}"}}
placeholders = find_placeholders(template)
assert len(placeholders) == 2
names = set(p.name for p in placeholders)
assert names == {"name", "greeting"}
def test_template_with_duplicates(self):
template = "{{x}}{{x}}"
placeholders = find_placeholders(template)
assert len(placeholders) == 1
assert placeholders.pop().name == "x"
def test_template_with_unconventional_spacing(self):
template = "Hello {{ first_name }} {{ last_name }}!"
placeholders = find_placeholders(template)
assert len(placeholders) == 2
names = set(p.name for p in placeholders)
assert names == {"first_name", "last_name"}
def test_finds_block_document_placeholders(self):
template = "Hello {{prefect.blocks.document.name}}!"
placeholders = find_placeholders(template)
assert len(placeholders) == 1
placeholder = placeholders.pop()
assert placeholder.name == "prefect.blocks.document.name"
assert placeholder.type is PlaceholderType.BLOCK_DOCUMENT
def test_finds_env_var_placeholders(self, monkeypatch):
monkeypatch.setenv("MY_ENV_VAR", "VALUE")
template = "Hello {{$MY_ENV_VAR}}!"
placeholders = find_placeholders(template)
assert len(placeholders) == 1
placeholder = placeholders.pop()
assert placeholder.name == "$MY_ENV_VAR"
assert placeholder.type is PlaceholderType.ENV_VAR
def test_apply_values_clears_placeholder_for_missing_env_vars(self):
template = "{{ $MISSING_ENV_VAR }}"
values = {"ANOTHER_ENV_VAR": "test_value"}
result = apply_values(template, values)
assert result == ""
def test_finds_nested_env_var_placeholders(self, monkeypatch):
monkeypatch.setenv("GREETING", "VALUE")
template = {"greeting": "Hello {{name}}!", "message": {"text": "{{$GREETING}}"}}
placeholders = find_placeholders(template)
assert len(placeholders) == 2
names = set(p.name for p in placeholders)
assert names == {"name", "$GREETING"}
types = set(p.type for p in placeholders)
assert types == {PlaceholderType.STANDARD, PlaceholderType.ENV_VAR}
@pytest.mark.parametrize(
"template,expected",
[
(
'{"greeting": "Hello {{name}}!", "message": {"text": "{{$$}}"}}',
'{"greeting": "Hello Dan!", "message": {"text": ""}}',
),
(
'{"greeting": "Hello {{name}}!", "message": {"text": "{{$GREETING}}"}}',
'{"greeting": "Hello Dan!", "message": {"text": ""}}',
),
],
)
def test_invalid_env_var_placeholder(self, template, expected):
values = {"name": "Dan"}
result = apply_values(template, values)
assert result == expected
|
TestFindPlaceholders
|
python
|
scikit-learn__scikit-learn
|
sklearn/multioutput.py
|
{
"start": 30671,
"end": 40251
}
|
class ____(MetaEstimatorMixin, ClassifierMixin, _BaseChain):
"""A multi-label model that arranges binary classifiers into a chain.
Each model makes a prediction in the order specified by the chain using
all of the available features provided to the model plus the predictions
of models that are earlier in the chain.
For an example of how to use ``ClassifierChain`` and benefit from its
ensemble, see
:ref:`ClassifierChain on a yeast dataset
<sphx_glr_auto_examples_multioutput_plot_classifier_chain_yeast.py>` example.
Read more in the :ref:`User Guide <classifierchain>`.
.. versionadded:: 0.19
Parameters
----------
estimator : estimator
The base estimator from which the classifier chain is built.
order : array-like of shape (n_outputs,) or 'random', default=None
If `None`, the order will be determined by the order of columns in
the label matrix Y.::
order = [0, 1, 2, ..., Y.shape[1] - 1]
The order of the chain can be explicitly set by providing a list of
integers. For example, for a chain of length 5.::
order = [1, 3, 2, 4, 0]
means that the first model in the chain will make predictions for
column 1 in the Y matrix, the second model will make predictions
for column 3, etc.
If order is `random` a random ordering will be used.
cv : int, cross-validation generator or an iterable, default=None
Determines whether to use cross validated predictions or true
labels for the results of previous estimators in the chain.
Possible inputs for cv are:
- None, to use true labels when fitting,
- integer, to specify the number of folds in a (Stratified)KFold,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
chain_method : {'predict', 'predict_proba', 'predict_log_proba', \
'decision_function'} or list of such str's, default='predict'
Prediction method to be used by estimators in the chain for
the 'prediction' features of previous estimators in the chain.
- if `str`, name of the method;
- if a list of `str`, provides the method names in order of
preference. The method used corresponds to the first method in
the list that is implemented by `base_estimator`.
.. versionadded:: 1.5
random_state : int, RandomState instance or None, optional (default=None)
If ``order='random'``, determines random number generation for the
chain order.
In addition, it controls the random seed given at each `base_estimator`
at each chaining iteration. Thus, it is only used when `base_estimator`
exposes a `random_state`.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
verbose : bool, default=False
If True, chain progress is output as each model is completed.
.. versionadded:: 1.2
base_estimator : estimator, default="deprecated"
Use `estimator` instead.
.. deprecated:: 1.7
`base_estimator` is deprecated and will be removed in 1.9.
Use `estimator` instead.
Attributes
----------
classes_ : list
A list of arrays of length ``len(estimators_)`` containing the
class labels for each estimator in the chain.
estimators_ : list
A list of clones of base_estimator.
order_ : list
The order of labels in the classifier chain.
chain_method_ : str
Prediction method used by estimators in the chain for the prediction
features.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying `base_estimator` exposes such an attribute when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
RegressorChain : Equivalent for regression.
MultiOutputClassifier : Classifies each output independently rather than
chaining.
References
----------
Jesse Read, Bernhard Pfahringer, Geoff Holmes, Eibe Frank, "Classifier
Chains for Multi-label Classification", 2009.
Examples
--------
>>> from sklearn.datasets import make_multilabel_classification
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.multioutput import ClassifierChain
>>> X, Y = make_multilabel_classification(
... n_samples=12, n_classes=3, random_state=0
... )
>>> X_train, X_test, Y_train, Y_test = train_test_split(
... X, Y, random_state=0
... )
>>> base_lr = LogisticRegression(solver='lbfgs', random_state=0)
>>> chain = ClassifierChain(base_lr, order='random', random_state=0)
>>> chain.fit(X_train, Y_train).predict(X_test)
array([[1., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
>>> chain.predict_proba(X_test)
array([[0.8387, 0.9431, 0.4576],
[0.8878, 0.3684, 0.2640],
[0.0321, 0.9935, 0.0626]])
"""
_parameter_constraints: dict = {
**_BaseChain._parameter_constraints,
"chain_method": [
list,
tuple,
StrOptions(
{"predict", "predict_proba", "predict_log_proba", "decision_function"}
),
],
}
# TODO(1.9): Remove base_estimator from __init__
def __init__(
self,
estimator=None,
*,
order=None,
cv=None,
chain_method="predict",
random_state=None,
verbose=False,
base_estimator="deprecated",
):
super().__init__(
estimator,
order=order,
cv=cv,
random_state=random_state,
verbose=verbose,
base_estimator=base_estimator,
)
self.chain_method = chain_method
@_fit_context(
# ClassifierChain.base_estimator is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, Y, **fit_params):
"""Fit the model to data matrix X and targets Y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Y : array-like of shape (n_samples, n_classes)
The target values.
**fit_params : dict of string -> object
Parameters passed to the `fit` method of each step.
Only available if `enable_metadata_routing=True`. See the
:ref:`User Guide <metadata_routing>`.
.. versionadded:: 1.3
Returns
-------
self : object
Class instance.
"""
_raise_for_params(fit_params, self, "fit")
super().fit(X, Y, **fit_params)
self.classes_ = [estimator.classes_ for estimator in self.estimators_]
return self
@_available_if_base_estimator_has("predict_proba")
def predict_proba(self, X):
"""Predict probability estimates.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
Y_prob : array-like of shape (n_samples, n_classes)
The predicted probabilities.
"""
return self._get_predictions(X, output_method="predict_proba")
def predict_log_proba(self, X):
"""Predict logarithm of probability estimates.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
Y_log_prob : array-like of shape (n_samples, n_classes)
The predicted logarithm of the probabilities.
"""
return np.log(self.predict_proba(X))
@_available_if_base_estimator_has("decision_function")
def decision_function(self, X):
"""Evaluate the decision_function of the models in the chain.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data.
Returns
-------
Y_decision : array-like of shape (n_samples, n_classes)
Returns the decision function of the sample for each model
in the chain.
"""
return self._get_predictions(X, output_method="decision_function")
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.3
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self).add(
estimator=self._get_estimator(),
method_mapping=MethodMapping().add(caller="fit", callee="fit"),
)
return router
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
# FIXME
tags._skip_test = True
tags.target_tags.single_output = False
tags.target_tags.multi_output = True
return tags
|
ClassifierChain
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-oracleai/llama_index/readers/oracleai/base.py
|
{
"start": 13133,
"end": 14581
}
|
class ____:
"""Splitting text using Oracle chunker."""
def __init__(self, conn: Connection, params: Dict[str, Any]):
self.conn = conn
self.params = params
try:
import oracledb
except ImportError as e:
raise ImportError(
"Unable to import oracledb, please install with "
"`pip install -U oracledb`."
) from e
self._oracledb = oracledb
self._json = json
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
splits = []
try:
cursor = self.conn.cursor()
# returns strings or bytes instead of a locator
self._oracledb.defaults.fetch_lobs = False
cursor.setinputsizes(content=self._oracledb.CLOB)
cursor.execute(
"select t.* from dbms_vector_chain.utl_to_chunks(:content, json(:params)) t",
content=text,
params=self._json.dumps(self.params),
)
while True:
row = cursor.fetchone()
if row is None:
break
d = self._json.loads(row[0])
splits.append(d["chunk_data"])
return splits
except Exception as ex:
print(f"An exception occurred :: {ex}")
traceback.print_exc()
raise
|
OracleTextSplitter
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/elements/alert_test.py
|
{
"start": 960,
"end": 2370
}
|
class ____(DeltaGeneratorTestCase):
"""Test ability to marshall Alert proto."""
@parameterized.expand([(st.error,), (st.warning,), (st.info,), (st.success,)])
def test_st_alert_exceptions(self, alert_func):
"""Test that alert functions throw an exception when a non-emoji is given as an icon."""
with pytest.raises(StreamlitAPIException):
alert_func("some alert", icon="hello world")
@parameterized.expand([(st.error,), (st.warning,), (st.info,), (st.success,)])
def test_st_alert_width_validation(self, alert_func):
"""Test that alert functions throw an exception when an invalid width is provided."""
with pytest.raises(StreamlitInvalidWidthError) as e:
alert_func("some alert", width="invalid")
assert "Invalid width value" in str(e.value)
assert "Width must be either an integer (pixels) or 'stretch'" in str(e.value)
@parameterized.expand([(st.error,), (st.warning,), (st.info,), (st.success,)])
def test_st_alert_negative_width(self, alert_func):
"""Test that alert functions throw an exception when a negative width is provided."""
with pytest.raises(StreamlitInvalidWidthError) as e:
alert_func("some alert", width=-100)
assert "Invalid width value" in str(e.value)
assert "Width must be either an integer (pixels) or 'stretch'" in str(e.value)
|
AlertAPITest
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_type_expressions.py
|
{
"start": 541,
"end": 3560
}
|
class ____:
def _test_table(self, type_):
test_table = Table(
"test_table", MetaData(), Column("x", String), Column("y", type_)
)
return test_table
def _fixture(self):
class MyString(String):
# supersedes any processing that might be on
# String
def bind_expression(self, bindvalue):
return func.lower(bindvalue)
def column_expression(self, col):
return func.lower(col)
return self._test_table(MyString)
def _type_decorator_outside_fixture(self):
class MyString(TypeDecorator):
impl = String
cache_ok = True
def bind_expression(self, bindvalue):
return func.outside_bind(bindvalue)
def column_expression(self, col):
return func.outside_colexpr(col)
return self._test_table(MyString)
def _type_decorator_inside_fixture(self):
class MyInsideString(String):
def bind_expression(self, bindvalue):
return func.inside_bind(bindvalue)
def column_expression(self, col):
return func.inside_colexpr(col)
class MyString(TypeDecorator):
impl = MyInsideString
cache_ok = True
return self._test_table(MyString)
def _type_decorator_both_fixture(self):
class MyDialectString(String):
def bind_expression(self, bindvalue):
return func.inside_bind(bindvalue)
def column_expression(self, col):
return func.inside_colexpr(col)
class MyString(TypeDecorator):
impl = String
cache_ok = True
# this works because when the compiler calls dialect_impl(),
# a copy of MyString is created which has just this impl
# as self.impl
def load_dialect_impl(self, dialect):
return MyDialectString()
# user-defined methods need to invoke explicitly on the impl
# for now...
def bind_expression(self, bindvalue):
return func.outside_bind(self.impl.bind_expression(bindvalue))
def column_expression(self, col):
return func.outside_colexpr(self.impl.column_expression(col))
return self._test_table(MyString)
def _variant_fixture(self, inner_fixture):
type_ = inner_fixture.c.y.type
variant = String(30).with_variant(type_, "default")
return self._test_table(variant)
def _dialect_level_fixture(self):
class ImplString(String):
def bind_expression(self, bindvalue):
return func.dialect_bind(bindvalue)
def column_expression(self, col):
return func.dialect_colexpr(col)
from sqlalchemy.engine import default
dialect = default.DefaultDialect()
dialect.colspecs = {String: ImplString}
return dialect
|
_ExprFixture
|
python
|
pallets__werkzeug
|
src/werkzeug/middleware/http_proxy.py
|
{
"start": 551,
"end": 7834
}
|
class ____:
"""Proxy requests under a path to an external server, routing other
requests to the app.
This middleware can only proxy HTTP requests, as HTTP is the only
protocol handled by the WSGI server. Other protocols, such as
WebSocket requests, cannot be proxied at this layer. This should
only be used for development, in production a real proxy server
should be used.
The middleware takes a dict mapping a path prefix to a dict
describing the host to be proxied to::
app = ProxyMiddleware(app, {
"/static/": {
"target": "http://127.0.0.1:5001/",
}
})
Each host has the following options:
``target``:
The target URL to dispatch to. This is required.
``remove_prefix``:
Whether to remove the prefix from the URL before dispatching it
to the target. The default is ``False``.
``host``:
``"<auto>"`` (default):
The host header is automatically rewritten to the URL of the
target.
``None``:
The host header is unmodified from the client request.
Any other value:
The host header is overwritten with the value.
``headers``:
A dictionary of headers to be sent with the request to the
target. The default is ``{}``.
``ssl_context``:
A :class:`ssl.SSLContext` defining how to verify requests if the
target is HTTPS. The default is ``None``.
In the example above, everything under ``"/static/"`` is proxied to
the server on port 5001. The host header is rewritten to the target,
and the ``"/static/"`` prefix is removed from the URLs.
:param app: The WSGI application to wrap.
:param targets: Proxy target configurations. See description above.
:param chunk_size: Size of chunks to read from input stream and
write to target.
:param timeout: Seconds before an operation to a target fails.
.. versionadded:: 0.14
"""
def __init__(
self,
app: WSGIApplication,
targets: t.Mapping[str, dict[str, t.Any]],
chunk_size: int = 2 << 13,
timeout: int = 10,
) -> None:
def _set_defaults(opts: dict[str, t.Any]) -> dict[str, t.Any]:
opts.setdefault("remove_prefix", False)
opts.setdefault("host", "<auto>")
opts.setdefault("headers", {})
opts.setdefault("ssl_context", None)
return opts
self.app = app
self.targets = {
f"/{k.strip('/')}/": _set_defaults(v) for k, v in targets.items()
}
self.chunk_size = chunk_size
self.timeout = timeout
def proxy_to(
self, opts: dict[str, t.Any], path: str, prefix: str
) -> WSGIApplication:
target = urlsplit(opts["target"])
# socket can handle unicode host, but header must be ascii
host = target.hostname.encode("idna").decode("ascii")
def application(
environ: WSGIEnvironment, start_response: StartResponse
) -> t.Iterable[bytes]:
headers = list(EnvironHeaders(environ).items())
headers[:] = [
(k, v)
for k, v in headers
if not is_hop_by_hop_header(k)
and k.lower() not in ("content-length", "host")
]
headers.append(("Connection", "close"))
if opts["host"] == "<auto>":
headers.append(("Host", host))
elif opts["host"] is None:
headers.append(("Host", environ["HTTP_HOST"]))
else:
headers.append(("Host", opts["host"]))
headers.extend(opts["headers"].items())
remote_path = path
if opts["remove_prefix"]:
remote_path = remote_path[len(prefix) :].lstrip("/")
remote_path = f"{target.path.rstrip('/')}/{remote_path}"
content_length = environ.get("CONTENT_LENGTH")
chunked = False
if content_length not in ("", None):
headers.append(("Content-Length", content_length)) # type: ignore
elif content_length is not None:
headers.append(("Transfer-Encoding", "chunked"))
chunked = True
try:
if target.scheme == "http":
con = client.HTTPConnection(
host, target.port or 80, timeout=self.timeout
)
elif target.scheme == "https":
con = client.HTTPSConnection(
host,
target.port or 443,
timeout=self.timeout,
context=opts["ssl_context"],
)
else:
raise RuntimeError(
"Target scheme must be 'http' or 'https', got"
f" {target.scheme!r}."
)
con.connect()
# safe = https://url.spec.whatwg.org/#url-path-segment-string
# as well as percent for things that are already quoted
remote_url = quote(remote_path, safe="!$&'()*+,/:;=@%")
querystring = environ["QUERY_STRING"]
if querystring:
remote_url = f"{remote_url}?{querystring}"
con.putrequest(environ["REQUEST_METHOD"], remote_url, skip_host=True)
for k, v in headers:
if k.lower() == "connection":
v = "close"
con.putheader(k, v)
con.endheaders()
stream = get_input_stream(environ)
while True:
data = stream.read(self.chunk_size)
if not data:
break
if chunked:
con.send(b"%x\r\n%s\r\n" % (len(data), data))
else:
con.send(data)
resp = con.getresponse()
except OSError:
from ..exceptions import BadGateway
return BadGateway()(environ, start_response)
start_response(
f"{resp.status} {resp.reason}",
[
(k.title(), v)
for k, v in resp.getheaders()
if not is_hop_by_hop_header(k)
],
)
def read() -> t.Iterator[bytes]:
while True:
try:
data = resp.read(self.chunk_size)
except OSError:
break
if not data:
break
yield data
return read()
return application
def __call__(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> t.Iterable[bytes]:
path = environ["PATH_INFO"]
app = self.app
for prefix, opts in self.targets.items():
if path.startswith(prefix):
app = self.proxy_to(opts, path, prefix)
break
return app(environ, start_response)
|
ProxyMiddleware
|
python
|
getsentry__sentry
|
src/sentry/profiles/task.py
|
{
"start": 23451,
"end": 41450
}
|
class ____(Exception):
pass
@metrics.wraps("process_profile.symbolicate.request")
def run_symbolicate(
project: Project,
profile: Profile,
modules: list[Any],
stacktraces: list[Any],
frame_order: FrameOrder,
platform: str,
) -> tuple[list[Any], list[Any], bool]:
symbolication_start_time = time()
def on_symbolicator_request() -> None:
duration = time() - symbolication_start_time
if duration > settings.SYMBOLICATOR_PROCESS_EVENT_HARD_TIMEOUT:
raise SymbolicationTimeout
if platform in SHOULD_SYMBOLICATE_JS:
symbolicator_platform = SymbolicatorPlatform.js
else:
symbolicator_platform = SymbolicatorPlatform.native
symbolicator = Symbolicator(
task_kind=SymbolicatorTaskKind(platform=symbolicator_platform),
on_request=on_symbolicator_request,
project=project,
event_id=get_event_id(profile),
)
try:
with sentry_sdk.start_span(op="task.profiling.symbolicate.process_payload"):
response = symbolicate(
symbolicator=symbolicator,
profile=profile,
stacktraces=stacktraces,
modules=modules,
frame_order=frame_order,
platform=platform,
)
if not response:
profile["symbolicator_error"] = {
"type": EventError.NATIVE_INTERNAL_FAILURE,
}
return modules, stacktraces, False
elif response["status"] == "completed":
return (
response.get("modules", modules),
response.get("stacktraces", stacktraces),
True,
)
elif response["status"] == "failed":
profile["symbolicator_error"] = {
"type": EventError.NATIVE_SYMBOLICATOR_FAILED,
"status": response.get("status"),
"message": response.get("message"),
}
return modules, stacktraces, False
else:
profile["symbolicator_error"] = {
"status": response.get("status"),
"type": EventError.NATIVE_INTERNAL_FAILURE,
}
return modules, stacktraces, False
except SymbolicationTimeout:
metrics.incr("process_profile.symbolicate.timeout", sample_rate=1.0)
# returns the unsymbolicated data to avoid errors later
return modules, stacktraces, False
@metrics.wraps("process_profile.symbolicate.process")
def _process_symbolicator_results(
profile: Profile,
modules: list[Any],
stacktraces: list[Any],
frames_sent: set[int],
platform: str,
) -> None:
with sentry_sdk.start_span(op="task.profiling.symbolicate.process_results"):
# update images with status after symbolication
profile["debug_meta"]["images"] = modules
if "version" in profile:
_process_symbolicator_results_for_sample(
profile,
stacktraces,
frames_sent,
platform,
)
return
if platform == "rust":
_process_symbolicator_results_for_rust(profile, stacktraces)
elif platform == "cocoa":
_process_symbolicator_results_for_cocoa(profile, stacktraces)
# rename the profile key to suggest it has been processed
profile["profile"] = profile.pop("sampled_profile")
def _process_symbolicator_results_for_sample(
profile: Profile, stacktraces: list[Any], frames_sent: set[int], platform: str
) -> None:
if platform == "rust":
def truncate_stack_needed(frames: list[dict[str, Any]], stack: list[Any]) -> list[Any]:
# remove top frames related to the profiler (top of the stack)
if frames[stack[0]].get("function", "") == "perf_signal_handler":
stack = stack[2:]
# remove unsymbolicated frames before the runtime calls (bottom of the stack)
if frames[stack[len(stack) - 2]].get("function", "") == "":
stack = stack[:-2]
return stack
elif platform == "cocoa":
def truncate_stack_needed(
frames: list[dict[str, Any]],
stack: list[Any],
) -> list[Any]:
# remove bottom frames we can't symbolicate
if frames[stack[-1]].get("instruction_addr", "") == "0xffffffffc":
return stack[:-2]
return stack
else:
def truncate_stack_needed(
frames: list[dict[str, Any]],
stack: list[Any],
) -> list[Any]:
return stack
symbolicated_frames = stacktraces[0]["frames"]
symbolicated_frames_dict = get_frame_index_map(symbolicated_frames)
if len(frames_sent) > 0:
raw_frames = profile["profile"]["frames"]
new_frames = []
symbolicated_frame_idx = 0
for idx in range(len(raw_frames)):
# If we didn't send the frame to symbolicator, add the raw frame.
if idx not in frames_sent:
new_frames.append(raw_frames[idx])
continue
# If we sent it to symbolicator, add the current symbolicated frame
# to new_frames.
# This works since symbolicated_frames are in the same order
# as raw_frames (except some frames are not sent).
for frame_idx in symbolicated_frames_dict[symbolicated_frame_idx]:
f = symbolicated_frames[frame_idx]
f["platform"] = platform
new_frames.append(f)
# go to the next symbolicated frame result
symbolicated_frame_idx += 1
new_frames_count = (
len(raw_frames)
+ sum(len(frames) for frames in symbolicated_frames_dict.values())
- len(symbolicated_frames_dict)
)
# in case we're dealing with a cocoa stack, we previously made a copy
# of the leaf frame with adjust_instruction_addr = False.
# If the original frame doesn't happen to shows up in the middle
# of another stack, then it'll never be used.
# Therefore we skip this sanity check for cocoa stacks
if platform in SHOULD_SYMBOLICATE_JS:
assert len(new_frames) == new_frames_count
profile["profile"]["frames"] = new_frames
elif symbolicated_frames:
profile["profile"]["frames"] = symbolicated_frames
if platform in SHOULD_SYMBOLICATE:
def get_stack(stack: list[int]) -> list[int]:
new_stack: list[int] = []
for index in stack:
if index in symbolicated_frames_dict:
# the new stack extends the older by replacing
# a specific frame index with the indices of
# the frames originated from the original frame
# should inlines be present
new_stack.extend(symbolicated_frames_dict[index])
else:
new_stack.append(index)
return new_stack
else:
def get_stack(stack: list[int]) -> list[int]:
return stack
stacks = []
for stack in profile["profile"]["stacks"]:
new_stack = get_stack(stack)
if len(new_stack) >= 2:
# truncate some unneeded frames in the stack (related to the profiler itself or impossible to symbolicate)
new_stack = truncate_stack_needed(profile["profile"]["frames"], new_stack)
stacks.append(new_stack)
profile["profile"]["stacks"] = stacks
def _process_symbolicator_results_for_cocoa(profile: Profile, stacktraces: list[Any]) -> None:
for original, symbolicated in zip(profile["sampled_profile"]["samples"], stacktraces):
# remove bottom frames we can't symbolicate
if (
len(symbolicated["frames"]) > 1
and symbolicated["frames"][-1].get("instruction_addr", "") == "0xffffffffc"
):
original["frames"] = symbolicated["frames"][:-2]
else:
original["frames"] = symbolicated["frames"]
def _process_symbolicator_results_for_rust(profile: Profile, stacktraces: list[Any]) -> None:
for original, symbolicated in zip(profile["sampled_profile"]["samples"], stacktraces):
for frame in symbolicated["frames"]:
frame.pop("pre_context", None)
frame.pop("context_line", None)
frame.pop("post_context", None)
# exclude the top frames of the stack as it's related to the profiler itself and we don't want them.
if (
len(symbolicated["frames"]) > 1
and symbolicated["frames"][0].get("function", "") == "perf_signal_handler"
):
original["frames"] = symbolicated["frames"][2:]
else:
original["frames"] = symbolicated["frames"]
"""
This function returns a map {index: [indexes]} that will let us replace a specific
frame index with (potentially) a list of frames indices that originated from that frame.
The reason for this is that the frame from the SDK exists "physically",
and symbolicator then synthesizes other frames for calls that have been inlined
into the physical frame.
Example:
`
fn a() {
b()
}
fb b() {
fn c_inlined() {}
c_inlined()
}
`
this would yield the following from the SDK:
b -> a
after symbolication you would have:
c_inlined -> b -> a
The sorting order is callee to caller (child to parent)
"""
def get_frame_index_map(frames: list[dict[str, Any]]) -> dict[int, list[int]]:
index_map: dict[int, list[int]] = {}
for i, frame in enumerate(frames):
# In case we don't have an `original_index` field, we default to using
# the index of the frame in order to still produce a data structure
# with the right shape.
index_map.setdefault(frame.get("original_index", i), []).append(i)
return index_map
@metrics.wraps("process_profile.deobfuscate_using_symbolicator")
def _deobfuscate_using_symbolicator(project: Project, profile: Profile, debug_file_id: str) -> bool:
symbolication_start_time = time()
def on_symbolicator_request() -> None:
duration = time() - symbolication_start_time
if duration > settings.SYMBOLICATOR_PROCESS_EVENT_HARD_TIMEOUT:
raise SymbolicationTimeout
symbolicator = Symbolicator(
task_kind=SymbolicatorTaskKind(platform=SymbolicatorPlatform.jvm),
on_request=on_symbolicator_request,
project=project,
event_id=get_event_id(profile),
)
try:
with sentry_sdk.start_span(op="task.profiling.deobfuscate.process_payload"):
response = symbolicate(
symbolicator=symbolicator,
profile=profile,
modules=[
{
"uuid": debug_file_id,
"type": "proguard",
}
],
stacktraces=[
{
"frames": convert_android_methods_to_jvm_frames(
profile["profile"]["methods"]
)
},
],
# Methods in a profile aren't inherently ordered, but the order of returned
# inlinees should be caller first.
frame_order=FrameOrder.caller_first,
platform=profile["platform"],
)
if response:
deobfuscation_context = {}
if response["status"] == "failed":
deobfuscation_context["status"] = response["status"]
deobfuscation_context["message"] = response["message"]
if "errors" in response:
deobfuscation_context["errors"] = response["errors"]
sentry_sdk.set_context("profile deobfuscation", deobfuscation_context)
if "stacktraces" in response:
merge_jvm_frames_with_android_methods(
frames=response["stacktraces"][0]["frames"],
methods=profile["profile"]["methods"],
)
return True
else:
sentry_sdk.capture_message("No response from Symbolicator")
except SymbolicationTimeout:
metrics.incr("process_profile.symbolicate.timeout", sample_rate=1.0)
sentry_sdk.capture_message("Deobfuscation via Symbolicator failed")
return False
def get_debug_file_id(profile: Profile) -> str | None:
debug_file_id = profile.get("build_id")
if debug_file_id is None or debug_file_id == "":
return None
try:
return UUID(debug_file_id).hex
except ValueError:
return None
@metrics.wraps("process_profile.deobfuscate")
def _deobfuscate(profile: Profile, project: Project) -> None:
debug_file_id = get_debug_file_id(profile)
if debug_file_id is None:
# we still need to decode signatures
for m in profile["profile"]["methods"]:
if m.get("signature"):
types = deobfuscate_signature(m["signature"])
m["signature"] = format_signature(types)
return
try:
with sentry_sdk.start_span(op="deobfuscate_with_symbolicator"):
success = _deobfuscate_using_symbolicator(
project=project,
profile=profile,
debug_file_id=debug_file_id,
)
sentry_sdk.set_tag("deobfuscated_with_symbolicator_with_success", success)
if success:
return
except Exception as e:
sentry_sdk.capture_exception(e)
def get_event_id(profile: Profile) -> str:
if "chunk_id" in profile:
return profile["chunk_id"]
elif "profile_id" in profile:
return profile["profile_id"]
return profile["event_id"]
def get_data_category(profile: Profile) -> DataCategory:
if profile.get("version") == "2":
return (
DataCategory.PROFILE_CHUNK_UI
if profile["platform"] in UI_PROFILE_PLATFORMS
else DataCategory.PROFILE_CHUNK
)
return DataCategory.PROFILE_INDEXED
@metrics.wraps("process_profile.track_outcome")
def _track_outcome(
profile: Profile,
project: Project,
outcome: Outcome,
categories: list[DataCategory],
reason: str | None = None,
quantity: int = 1,
) -> None:
for category in categories:
track_outcome(
org_id=project.organization_id,
project_id=project.id,
key_id=None,
outcome=outcome,
reason=reason,
timestamp=datetime.now(timezone.utc),
event_id=get_event_id(profile),
category=category,
quantity=quantity,
)
def _track_failed_outcome(profile: Profile, project: Project, reason: str) -> None:
categories = []
if "profiler_id" not in profile:
categories.append(DataCategory.PROFILE)
if profile.get("sampled"):
categories.append(DataCategory.PROFILE_INDEXED)
else:
categories.append(DataCategory.PROFILE_CHUNK)
_track_outcome(
profile=profile,
project=project,
outcome=Outcome.INVALID,
categories=categories,
reason=reason,
)
@metrics.wraps("process_profile.insert_vroom_profile")
def _insert_vroom_profile(profile: Profile) -> bool:
with sentry_sdk.start_span(op="task.profiling.insert_vroom"):
try:
path = "/chunk" if "profiler_id" in profile else "/profile"
response = get_from_profiling_service(
method="POST",
path=path,
json_data=profile,
metric=(
"profiling.profile.payload.size",
{
"type": "chunk" if "profiler_id" in profile else "profile",
"platform": profile["platform"],
},
),
)
sentry_sdk.set_tag("vroom.response.status_code", str(response.status))
reason = "bad status"
if response.status == 204:
return True
elif response.status == 429:
reason = "gcs timeout"
elif response.status == 412:
reason = "duplicate profile"
metrics.incr(
"process_profile.insert_vroom_profile.error",
tags={
"platform": profile["platform"],
"reason": reason,
"status_code": response.status,
},
sample_rate=1.0,
)
return False
except Exception as e:
sentry_sdk.capture_exception(e)
metrics.incr(
"process_profile.insert_vroom_profile.error",
tags={"platform": profile["platform"], "reason": "encountered error"},
sample_rate=1.0,
)
return False
def _push_profile_to_vroom(profile: Profile, project: Project) -> bool:
if _insert_vroom_profile(profile=profile):
return True
_track_failed_outcome(profile, project, "profiling_failed_vroom_insertion")
return False
def prepare_android_js_profile(profile: Profile) -> None:
profile["js_profile"] = {"profile": profile["js_profile"]}
p = profile["js_profile"]
p["platform"] = "javascript"
p["debug_meta"] = profile["debug_meta"]
p["version"] = "1"
p["event_id"] = get_event_id(profile)
p["release"] = profile["release"]
p["dist"] = profile["dist"]
def clean_android_js_profile(profile: Profile) -> None:
p = profile["js_profile"]
del p["platform"]
del p["debug_meta"]
del p["version"]
del p["event_id"]
del p["release"]
del p["dist"]
|
SymbolicationTimeout
|
python
|
openai__openai-python
|
src/openai/types/responses/response_audio_done_event.py
|
{
"start": 199,
"end": 414
}
|
class ____(BaseModel):
sequence_number: int
"""The sequence number of the delta."""
type: Literal["response.audio.done"]
"""The type of the event. Always `response.audio.done`."""
|
ResponseAudioDoneEvent
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/dialects/postgresql/pg_catalog.py
|
{
"start": 1340,
"end": 10417
}
|
class ____:
def result_processor(
self, dialect: Dialect, coltype: object
) -> _ResultProcessorType[list[int]]:
def process(value: Any) -> Optional[list[int]]:
if value is None:
return value
return [int(p) for p in value.split(" ")]
return process
REGPROC = REGCLASS # seems an alias
# functions
_pg_cat = func.pg_catalog
quote_ident = _pg_cat.quote_ident
pg_table_is_visible = _pg_cat.pg_table_is_visible
pg_type_is_visible = _pg_cat.pg_type_is_visible
pg_get_viewdef = _pg_cat.pg_get_viewdef
pg_get_serial_sequence = _pg_cat.pg_get_serial_sequence
format_type = _pg_cat.format_type
pg_get_expr = _pg_cat.pg_get_expr
pg_get_constraintdef = _pg_cat.pg_get_constraintdef
pg_get_indexdef = _pg_cat.pg_get_indexdef
# constants
RELKINDS_TABLE_NO_FOREIGN = ("r", "p")
RELKINDS_TABLE = RELKINDS_TABLE_NO_FOREIGN + ("f",)
RELKINDS_VIEW = ("v",)
RELKINDS_MAT_VIEW = ("m",)
RELKINDS_ALL_TABLE_LIKE = RELKINDS_TABLE + RELKINDS_VIEW + RELKINDS_MAT_VIEW
# tables
pg_catalog_meta = MetaData(schema="pg_catalog")
pg_namespace = Table(
"pg_namespace",
pg_catalog_meta,
Column("oid", OID),
Column("nspname", NAME),
Column("nspowner", OID),
)
pg_class = Table(
"pg_class",
pg_catalog_meta,
Column("oid", OID, info={"server_version": (9, 3)}),
Column("relname", NAME),
Column("relnamespace", OID),
Column("reltype", OID),
Column("reloftype", OID),
Column("relowner", OID),
Column("relam", OID),
Column("relfilenode", OID),
Column("reltablespace", OID),
Column("relpages", Integer),
Column("reltuples", Float),
Column("relallvisible", Integer, info={"server_version": (9, 2)}),
Column("reltoastrelid", OID),
Column("relhasindex", Boolean),
Column("relisshared", Boolean),
Column("relpersistence", CHAR, info={"server_version": (9, 1)}),
Column("relkind", CHAR),
Column("relnatts", SmallInteger),
Column("relchecks", SmallInteger),
Column("relhasrules", Boolean),
Column("relhastriggers", Boolean),
Column("relhassubclass", Boolean),
Column("relrowsecurity", Boolean),
Column("relforcerowsecurity", Boolean, info={"server_version": (9, 5)}),
Column("relispopulated", Boolean, info={"server_version": (9, 3)}),
Column("relreplident", CHAR, info={"server_version": (9, 4)}),
Column("relispartition", Boolean, info={"server_version": (10,)}),
Column("relrewrite", OID, info={"server_version": (11,)}),
Column("reloptions", ARRAY(Text)),
)
pg_type = Table(
"pg_type",
pg_catalog_meta,
Column("oid", OID, info={"server_version": (9, 3)}),
Column("typname", NAME),
Column("typnamespace", OID),
Column("typowner", OID),
Column("typlen", SmallInteger),
Column("typbyval", Boolean),
Column("typtype", CHAR),
Column("typcategory", CHAR),
Column("typispreferred", Boolean),
Column("typisdefined", Boolean),
Column("typdelim", CHAR),
Column("typrelid", OID),
Column("typelem", OID),
Column("typarray", OID),
Column("typinput", REGPROC),
Column("typoutput", REGPROC),
Column("typreceive", REGPROC),
Column("typsend", REGPROC),
Column("typmodin", REGPROC),
Column("typmodout", REGPROC),
Column("typanalyze", REGPROC),
Column("typalign", CHAR),
Column("typstorage", CHAR),
Column("typnotnull", Boolean),
Column("typbasetype", OID),
Column("typtypmod", Integer),
Column("typndims", Integer),
Column("typcollation", OID, info={"server_version": (9, 1)}),
Column("typdefault", Text),
)
pg_index = Table(
"pg_index",
pg_catalog_meta,
Column("indexrelid", OID),
Column("indrelid", OID),
Column("indnatts", SmallInteger),
Column("indnkeyatts", SmallInteger, info={"server_version": (11,)}),
Column("indisunique", Boolean),
Column("indnullsnotdistinct", Boolean, info={"server_version": (15,)}),
Column("indisprimary", Boolean),
Column("indisexclusion", Boolean, info={"server_version": (9, 1)}),
Column("indimmediate", Boolean),
Column("indisclustered", Boolean),
Column("indisvalid", Boolean),
Column("indcheckxmin", Boolean),
Column("indisready", Boolean),
Column("indislive", Boolean, info={"server_version": (9, 3)}), # 9.3
Column("indisreplident", Boolean),
Column("indkey", INT2VECTOR),
Column("indcollation", OIDVECTOR, info={"server_version": (9, 1)}), # 9.1
Column("indclass", OIDVECTOR),
Column("indoption", INT2VECTOR),
Column("indexprs", PG_NODE_TREE),
Column("indpred", PG_NODE_TREE),
)
pg_attribute = Table(
"pg_attribute",
pg_catalog_meta,
Column("attrelid", OID),
Column("attname", NAME),
Column("atttypid", OID),
Column("attstattarget", Integer),
Column("attlen", SmallInteger),
Column("attnum", SmallInteger),
Column("attndims", Integer),
Column("attcacheoff", Integer),
Column("atttypmod", Integer),
Column("attbyval", Boolean),
Column("attstorage", CHAR),
Column("attalign", CHAR),
Column("attnotnull", Boolean),
Column("atthasdef", Boolean),
Column("atthasmissing", Boolean, info={"server_version": (11,)}),
Column("attidentity", CHAR, info={"server_version": (10,)}),
Column("attgenerated", CHAR, info={"server_version": (12,)}),
Column("attisdropped", Boolean),
Column("attislocal", Boolean),
Column("attinhcount", Integer),
Column("attcollation", OID, info={"server_version": (9, 1)}),
)
pg_constraint = Table(
"pg_constraint",
pg_catalog_meta,
Column("oid", OID), # 9.3
Column("conname", NAME),
Column("connamespace", OID),
Column("contype", CHAR),
Column("condeferrable", Boolean),
Column("condeferred", Boolean),
Column("convalidated", Boolean, info={"server_version": (9, 1)}),
Column("conrelid", OID),
Column("contypid", OID),
Column("conindid", OID),
Column("conparentid", OID, info={"server_version": (11,)}),
Column("confrelid", OID),
Column("confupdtype", CHAR),
Column("confdeltype", CHAR),
Column("confmatchtype", CHAR),
Column("conislocal", Boolean),
Column("coninhcount", Integer),
Column("connoinherit", Boolean, info={"server_version": (9, 2)}),
Column("conkey", ARRAY(SmallInteger)),
Column("confkey", ARRAY(SmallInteger)),
)
pg_sequence = Table(
"pg_sequence",
pg_catalog_meta,
Column("seqrelid", OID),
Column("seqtypid", OID),
Column("seqstart", BigInteger),
Column("seqincrement", BigInteger),
Column("seqmax", BigInteger),
Column("seqmin", BigInteger),
Column("seqcache", BigInteger),
Column("seqcycle", Boolean),
info={"server_version": (10,)},
)
pg_attrdef = Table(
"pg_attrdef",
pg_catalog_meta,
Column("oid", OID, info={"server_version": (9, 3)}),
Column("adrelid", OID),
Column("adnum", SmallInteger),
Column("adbin", PG_NODE_TREE),
)
pg_description = Table(
"pg_description",
pg_catalog_meta,
Column("objoid", OID),
Column("classoid", OID),
Column("objsubid", Integer),
Column("description", Text(collation="C")),
)
pg_enum = Table(
"pg_enum",
pg_catalog_meta,
Column("oid", OID, info={"server_version": (9, 3)}),
Column("enumtypid", OID),
Column("enumsortorder", Float(), info={"server_version": (9, 1)}),
Column("enumlabel", NAME),
)
pg_am = Table(
"pg_am",
pg_catalog_meta,
Column("oid", OID, info={"server_version": (9, 3)}),
Column("amname", NAME),
Column("amhandler", REGPROC, info={"server_version": (9, 6)}),
Column("amtype", CHAR, info={"server_version": (9, 6)}),
)
pg_collation = Table(
"pg_collation",
pg_catalog_meta,
Column("oid", OID, info={"server_version": (9, 3)}),
Column("collname", NAME),
Column("collnamespace", OID),
Column("collowner", OID),
Column("collprovider", CHAR, info={"server_version": (10,)}),
Column("collisdeterministic", Boolean, info={"server_version": (12,)}),
Column("collencoding", Integer),
Column("collcollate", Text),
Column("collctype", Text),
Column("colliculocale", Text),
Column("collicurules", Text, info={"server_version": (16,)}),
Column("collversion", Text, info={"server_version": (10,)}),
)
pg_opclass = Table(
"pg_opclass",
pg_catalog_meta,
Column("oid", OID, info={"server_version": (9, 3)}),
Column("opcmethod", NAME),
Column("opcname", NAME),
Column("opsnamespace", OID),
Column("opsowner", OID),
Column("opcfamily", OID),
Column("opcintype", OID),
Column("opcdefault", Boolean),
Column("opckeytype", OID),
)
pg_inherits = Table(
"pg_inherits",
pg_catalog_meta,
Column("inhrelid", OID),
Column("inhparent", OID),
Column("inhseqno", Integer),
Column("inhdetachpending", Boolean, info={"server_version": (14,)}),
)
pg_tablespace = Table(
"pg_tablespace",
pg_catalog_meta,
Column("oid", OID),
Column("spcname", NAME),
Column("spcowner", OID),
Column("spcoptions", ARRAY(Text)),
)
|
_SpaceVector
|
python
|
squidfunk__mkdocs-material
|
material/plugins/projects/plugin.py
|
{
"start": 1969,
"end": 12022
}
|
class ____(BasePlugin[ProjectsConfig]):
# Projects builder
builder: ProjectsBuilder = None
# Initialize plugin
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Initialize incremental builds
self.is_serve = False
self.is_dirty = False
# Hack: Since we're building in topological order, we cannot let MkDocs
# clean the directory, because it means that nested projects are always
# deleted before a project is built. We also don't need to restore this
# functionality, because it's only used once in the process.
utils.clean_directory = lambda _: _
# Determine whether we're serving the site
def on_startup(self, *, command, dirty):
self.is_serve = command == "serve"
self.is_dirty = dirty
# Resolve projects – compared to our other concurrent plugins, this plugin
# is forced to use a process pool in order to guarantee proper isolation, as
# MkDocs itself is not thread-safe. Additionally, all project configurations
# are resolved and written to the cache (if enabled), as it's sufficient to
# resolve them once on the top-level before projects are built. We might
# need adjacent project configurations for interlinking projects.
def on_config(self, config):
if not self.config.enabled:
return
# Skip if projects should not be built - we can only exit here if we're
# at the top-level, but not when building a nested project
root = self.config.projects_root_dir is None
if root and not self.config.projects:
return
# Set projects root directory to the top-level project
if not self.config.projects_root_dir:
self.config.projects_root_dir = os.path.dirname(
config.config_file_path
)
# Initialize manifest
self.manifest: dict[str, str] = {}
self.manifest_file = os.path.join(
self.config.projects_root_dir,
self.config.cache_dir,
"manifest.json"
)
# Load manifest if it exists and the cache should be used
if os.path.isfile(self.manifest_file):
try:
with open(self.manifest_file) as f:
self.manifest = json.load(f)
except:
pass
# Building the top-level project, we must resolve and load all project
# configurations, as we need all information upfront to build them in
# the correct order, and to resolve links between projects. Furthermore,
# the author might influence a project's path by setting the site URL.
if root:
if not self.builder:
self.builder = ProjectsBuilder(config, self.config)
# @todo: detach project resolution from build
self.manifest = { ".": os.path.relpath(config.config_file_path) }
for job in self.builder.root.jobs():
path = os.path.relpath(job.project.config.config_file_path)
self.manifest[job.project.slug] = path
# Save manifest, a we need it in nested projects
os.makedirs(os.path.dirname(self.manifest_file), exist_ok = True)
with open(self.manifest_file, "w") as f:
f.write(json.dumps(self.manifest, indent = 2, sort_keys = True))
# Schedule projects for building - the general case is that all projects
# can be considered independent of each other, so we build them in parallel
def on_pre_build(self, config):
if not self.config.enabled:
return
# Skip if projects should not be built or we're not at the top-level
if not self.config.projects or not self.builder:
return
# Build projects
self.builder.build(self.is_serve, self.is_dirty)
# Patch environment to allow for hoisting of media files provided by the
# theme itself, which will also work for other themes, not only this one
def on_env(self, env, *, config, files):
if not self.config.enabled:
return
# Skip if projects should not be built or we're at the top-level
if not self.config.projects or self.builder:
return
# If hoisting is enabled and we're building a project, remove all media
# files that are provided by the theme and hoist them to the top
if self.config.hoisting:
theme = get_theme_dir(config.theme.name)
hoist = Files([])
# Retrieve top-level project and check if the current project uses
# the same theme as the top-level project - if not, don't hoist
root = Project("mkdocs.yml", self.config)
if config.theme.name != root.config.theme["name"]:
return
# Remove all media files that are provided by the theme
for file in files.media_files():
if file.abs_src_path.startswith(theme):
files.remove(file)
hoist.append(file)
# Resolve source and target project
source: Project | None = None
target: Project | None = None
for ref, file in self.manifest.items():
base = os.path.join(self.config.projects_root_dir, file)
if file == os.path.relpath(
config.config_file_path, self.config.projects_root_dir
):
source = Project(base, self.config, ref)
if "." == ref:
target = Project(base, self.config, ref)
# Compute path for slug from source and target project
path = target.path(source)
# Fetch URL template filter from environment - the filter might
# be overridden by other plugins, so we must retrieve and wrap it
url_filter = env.filters["url"]
# Patch URL template filter to add support for correctly resolving
# media files that were hoisted to the top-level project
@pass_context
def url_filter_with_hoisting(context: Context, url: str | None):
if url and hoist.get_file_from_path(url):
return posixpath.join(path, url_filter(context, url))
else:
return url_filter(context, url)
# Register custom template filters
env.filters["url"] = url_filter_with_hoisting
# Adjust project navigation in page (run latest) - as always, allow
# other plugins to alter the navigation before we process it here
@event_priority(-100)
def on_page_context(self, context, *, page, config, nav):
if not self.config.enabled:
return
# Skip if projects should not be built
if not self.config.projects:
return
# Replace project URLs in navigation
self._replace(nav.items, config)
# Adjust project navigation in template (run latest) - as always, allow
# other plugins to alter the navigation before we process it here
@event_priority(-100)
def on_template_context(self, context, *, template_name, config):
if not self.config.enabled:
return
# Skip if projects should not be built
if not self.config.projects:
return
# Replace project URLs in navigation
self._replace(context["nav"].items, config)
# Serve projects
def on_serve(self, server, *, config, builder):
if self.config.enabled:
self.builder.serve(server, self.is_dirty)
# -------------------------------------------------------------------------
# Replace project links in the given list of navigation items
def _replace(self, items: list[StructureItem], config: MkDocsConfig):
for index, item in enumerate(items):
# Handle section
if isinstance(item, Section):
self._replace(item.children, config)
# Handle link
if isinstance(item, Link):
url = urlparse(item.url)
if url.scheme == "project":
project, url = self._resolve_project_url(url, config)
# Append file name if directory URLs are disabled
if not project.config.use_directory_urls:
url += "index.html"
# Replace link with project link
items[index] = ProjectLink(
item.title or project.config.site_name,
url
)
# Resolve project URL and slug
def _resolve_project_url(self, url: URL, config: MkDocsConfig):
# Abort if the project URL contains a path, as we first need to collect
# use cases for when, how and whether we need and want to support this
if url.path != "":
raise PluginError(
f"Couldn't resolve project URL: paths currently not supported\n"
f"Please only use 'project://{url.hostname}'"
)
# Compute slug from host name and convert to dot notation
slug = url.hostname
slug = slug if slug.startswith(".") else f".{slug}"
# Resolve source and target project
source: Project | None = None
target: Project | None = None
for ref, file in self.manifest.items():
base = os.path.join(self.config.projects_root_dir, file)
if file == os.path.relpath(
config.config_file_path, self.config.projects_root_dir
):
source = Project(base, self.config, ref)
if slug == ref:
target = Project(base, self.config, ref)
# Abort if slug doesn't match a known project
if not target:
raise PluginError(f"Couldn't find project '{slug}'")
# Return project slug and path
return target, target.path(source)
|
ProjectsPlugin
|
python
|
facebookresearch__faiss
|
tests/test_io.py
|
{
"start": 13019,
"end": 14060
}
|
class ____(unittest.TestCase):
def test_reader(self):
d, n = 32, 1000
xq = np.random.uniform(size=(n, d)).astype('float32')
xb = np.random.uniform(size=(n, d)).astype('float32')
index = faiss.index_factory(32, "IVF32,PQ16np", faiss.METRIC_L2)
index.train(xb)
index.add(xb)
fd, fname = tempfile.mkstemp()
os.close(fd)
try:
faiss.write_index(index, fname)
index_a = faiss.read_index(fname)
index_b = faiss.read_index(
fname, faiss.IO_FLAG_SKIP_PRECOMPUTE_TABLE)
Da, Ia = index_a.search(xq, 10)
Db, Ib = index_b.search(xq, 10)
np.testing.assert_array_equal(Ia, Ib)
np.testing.assert_almost_equal(Da, Db, decimal=5)
codes_a = index_a.sa_encode(xq)
codes_b = index_b.sa_encode(xq)
np.testing.assert_array_equal(codes_a, codes_b)
finally:
if os.path.exists(fname):
os.unlink(fname)
|
TestIVFPQRead
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/execution/plan/inputs.py
|
{
"start": 2284,
"end": 2999
}
|
class ____(ABC):
"""How to load the data for a step input."""
@property
def step_key_dependencies(self) -> set[str]:
return set()
@property
def step_output_handle_dependencies(self) -> Sequence[StepOutputHandle]:
return []
@abstractmethod
def load_input_object(
self, step_context: "StepExecutionContext", input_def: InputDefinition
) -> Iterator[object]: ...
def required_resource_keys(
self, _job_def: JobDefinition, op_handle: NodeHandle, op_input_name: str
) -> AbstractSet[str]:
return set()
@whitelist_for_serdes(
storage_name="FromSourceAsset", storage_field_names={"node_handle": "solid_handle"}
)
@record
|
StepInputSource
|
python
|
pandas-dev__pandas
|
pandas/tests/frame/test_query_eval.py
|
{
"start": 37490,
"end": 38065
}
|
class ____(TestDataFrameQueryNumExprPython):
@pytest.fixture
def engine(self):
return "python"
@pytest.fixture
def parser(self):
return "python"
def test_query_builtin(self, engine, parser):
n = m = 10
df = DataFrame(
np.random.default_rng(2).integers(m, size=(n, 3)), columns=list("abc")
)
df.index.name = "sin"
expected = df[df.index > 5]
result = df.query("sin > 5", engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
|
TestDataFrameQueryPythonPython
|
python
|
plotly__plotly.py
|
plotly/graph_objs/scattermapbox/marker/_colorbar.py
|
{
"start": 233,
"end": 61773
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattermapbox.marker"
_path_str = "scattermapbox.marker.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"labelalias",
"len",
"lenmode",
"minexponent",
"nticks",
"orientation",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklabelstep",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"x",
"xanchor",
"xpad",
"xref",
"y",
"yanchor",
"ypad",
"yref",
}
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T
(10^12). *SI extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI
extended* is used and the exponent is beyond the above ranges,
the formatting rule will automatically be switched to the power
notation.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
@property
def labelalias(self):
"""
Replacement text for specific tick or hover labels. For example
using {US: 'USA', CA: 'Canada'} changes US to USA and CA to
Canada. The labels we would have shown must match the keys
exactly, after adding any tickprefix or ticksuffix. For
negative numbers the minus sign symbol used (U+2212) is wider
than the regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis type, and
both keys (if needed) and values (if desired) can include html-
like tags or MathJax.
The 'labelalias' property accepts values of any type
Returns
-------
Any
"""
return self["labelalias"]
@labelalias.setter
def labelalias(self, val):
self["labelalias"] = val
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
@property
def orientation(self):
"""
Sets the orientation of the colorbar.
The 'orientation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['h', 'v']
Returns
-------
Any
"""
return self["orientation"]
@orientation.setter
def orientation(self, val):
self["orientation"] = val
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattermapbox.marker.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Returns
-------
plotly.graph_objs.scattermapbox.marker.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.scattermapbox.marker.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Returns
-------
tuple[plotly.graph_objs.scattermapbox.marker.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.scattermapbox.
marker.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
scattermapbox.marker.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattermapbox.marker.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Returns
-------
plotly.graph_objs.scattermapbox.marker.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn relative to the ticks.
Left and right options are used when `orientation` is "h", top
and bottom when `orientation` is "v".
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside left', 'inside left', 'outside right', 'inside
right', 'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
@property
def ticklabelstep(self):
"""
Sets the spacing between tick labels as compared to the spacing
between ticks. A value of 1 (default) means each tick gets a
label. A value of 2 means shows every 2nd label. A larger value
n means only every nth tick is labeled. `tick0` determines
which labels are shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is "array".
The 'ticklabelstep' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ticklabelstep"]
@ticklabelstep.setter
def ticklabelstep(self, val):
self["ticklabelstep"] = val
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattermapbox.marker.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Returns
-------
plotly.graph_objs.scattermapbox.marker.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
@property
def x(self):
"""
Sets the x position with respect to `xref` of the color bar (in
plot fraction). When `xref` is "paper", defaults to 1.02 when
`orientation` is "v" and 0.5 when `orientation` is "h". When
`xref` is "container", defaults to 1 when `orientation` is "v"
and 0.5 when `orientation` is "h". Must be between 0 and 1 if
`xref` is "container" and between "-2" and 3 if `xref` is
"paper".
The 'x' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar. Defaults to "left" when `orientation` is "v" and
"center" when `orientation` is "h".
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
@property
def xref(self):
"""
Sets the container `x` refers to. "container" spans the entire
`width` of the plot. "paper" refers to the width of the
plotting area only.
The 'xref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["xref"]
@xref.setter
def xref(self, val):
self["xref"] = val
@property
def y(self):
"""
Sets the y position with respect to `yref` of the color bar (in
plot fraction). When `yref` is "paper", defaults to 0.5 when
`orientation` is "v" and 1.02 when `orientation` is "h". When
`yref` is "container", defaults to 0.5 when `orientation` is
"v" and 1 when `orientation` is "h". Must be between 0 and 1 if
`yref` is "container" and between "-2" and 3 if `yref` is
"paper".
The 'y' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar. Defaults to "middle" when `orientation` is "v"
and "bottom" when `orientation` is "h".
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
@property
def yref(self):
"""
Sets the container `y` refers to. "container" spans the entire
`height` of the plot. "paper" refers to the height of the
plotting area only.
The 'yref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["yref"]
@yref.setter
def yref(self, val):
self["yref"] = val
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.scattermapbox.m
arker.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.scatte
rmapbox.marker.colorbar.tickformatstopdefaults), sets
the default property values to use for elements of
scattermapbox.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.scattermapbox.marker.color
bar.Title` instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
"""
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
labelalias=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
orientation=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklabelstep=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
x=None,
xanchor=None,
xpad=None,
xref=None,
y=None,
yanchor=None,
ypad=None,
yref=None,
**kwargs,
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattermapbox.
marker.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.scattermapbox.m
arker.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.scatte
rmapbox.marker.colorbar.tickformatstopdefaults), sets
the default property values to use for elements of
scattermapbox.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.scattermapbox.marker.color
bar.Title` instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
Returns
-------
ColorBar
"""
super().__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattermapbox.marker.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattermapbox.marker.ColorBar`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("borderwidth", arg, borderwidth)
self._set_property("dtick", arg, dtick)
self._set_property("exponentformat", arg, exponentformat)
self._set_property("labelalias", arg, labelalias)
self._set_property("len", arg, len)
self._set_property("lenmode", arg, lenmode)
self._set_property("minexponent", arg, minexponent)
self._set_property("nticks", arg, nticks)
self._set_property("orientation", arg, orientation)
self._set_property("outlinecolor", arg, outlinecolor)
self._set_property("outlinewidth", arg, outlinewidth)
self._set_property("separatethousands", arg, separatethousands)
self._set_property("showexponent", arg, showexponent)
self._set_property("showticklabels", arg, showticklabels)
self._set_property("showtickprefix", arg, showtickprefix)
self._set_property("showticksuffix", arg, showticksuffix)
self._set_property("thickness", arg, thickness)
self._set_property("thicknessmode", arg, thicknessmode)
self._set_property("tick0", arg, tick0)
self._set_property("tickangle", arg, tickangle)
self._set_property("tickcolor", arg, tickcolor)
self._set_property("tickfont", arg, tickfont)
self._set_property("tickformat", arg, tickformat)
self._set_property("tickformatstops", arg, tickformatstops)
self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults)
self._set_property("ticklabeloverflow", arg, ticklabeloverflow)
self._set_property("ticklabelposition", arg, ticklabelposition)
self._set_property("ticklabelstep", arg, ticklabelstep)
self._set_property("ticklen", arg, ticklen)
self._set_property("tickmode", arg, tickmode)
self._set_property("tickprefix", arg, tickprefix)
self._set_property("ticks", arg, ticks)
self._set_property("ticksuffix", arg, ticksuffix)
self._set_property("ticktext", arg, ticktext)
self._set_property("ticktextsrc", arg, ticktextsrc)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("tickwidth", arg, tickwidth)
self._set_property("title", arg, title)
self._set_property("x", arg, x)
self._set_property("xanchor", arg, xanchor)
self._set_property("xpad", arg, xpad)
self._set_property("xref", arg, xref)
self._set_property("y", arg, y)
self._set_property("yanchor", arg, yanchor)
self._set_property("ypad", arg, ypad)
self._set_property("yref", arg, yref)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
ColorBar
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 981761,
"end": 982147
}
|
class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("Sponsorship", graphql_name="node")
"""The item at the end of the edge."""
|
SponsorshipEdge
|
python
|
pypa__pip
|
src/pip/_vendor/rich/panel.py
|
{
"start": 467,
"end": 11157
}
|
class ____(JupyterMixin):
"""A console renderable that draws a border around its contents.
Example:
>>> console.print(Panel("Hello, World!"))
Args:
renderable (RenderableType): A console renderable object.
box (Box): A Box instance that defines the look of the border (see :ref:`appendix_box`. Defaults to box.ROUNDED.
title (Optional[TextType], optional): Optional title displayed in panel header. Defaults to None.
title_align (AlignMethod, optional): Alignment of title. Defaults to "center".
subtitle (Optional[TextType], optional): Optional subtitle displayed in panel footer. Defaults to None.
subtitle_align (AlignMethod, optional): Alignment of subtitle. Defaults to "center".
safe_box (bool, optional): Disable box characters that don't display on windows legacy terminal with *raster* fonts. Defaults to True.
expand (bool, optional): If True the panel will stretch to fill the console width, otherwise it will be sized to fit the contents. Defaults to True.
style (str, optional): The style of the panel (border and contents). Defaults to "none".
border_style (str, optional): The style of the border. Defaults to "none".
width (Optional[int], optional): Optional width of panel. Defaults to None to auto-detect.
height (Optional[int], optional): Optional height of panel. Defaults to None to auto-detect.
padding (Optional[PaddingDimensions]): Optional padding around renderable. Defaults to 0.
highlight (bool, optional): Enable automatic highlighting of panel title (if str). Defaults to False.
"""
def __init__(
self,
renderable: "RenderableType",
box: Box = ROUNDED,
*,
title: Optional[TextType] = None,
title_align: AlignMethod = "center",
subtitle: Optional[TextType] = None,
subtitle_align: AlignMethod = "center",
safe_box: Optional[bool] = None,
expand: bool = True,
style: StyleType = "none",
border_style: StyleType = "none",
width: Optional[int] = None,
height: Optional[int] = None,
padding: PaddingDimensions = (0, 1),
highlight: bool = False,
) -> None:
self.renderable = renderable
self.box = box
self.title = title
self.title_align: AlignMethod = title_align
self.subtitle = subtitle
self.subtitle_align = subtitle_align
self.safe_box = safe_box
self.expand = expand
self.style = style
self.border_style = border_style
self.width = width
self.height = height
self.padding = padding
self.highlight = highlight
@classmethod
def fit(
cls,
renderable: "RenderableType",
box: Box = ROUNDED,
*,
title: Optional[TextType] = None,
title_align: AlignMethod = "center",
subtitle: Optional[TextType] = None,
subtitle_align: AlignMethod = "center",
safe_box: Optional[bool] = None,
style: StyleType = "none",
border_style: StyleType = "none",
width: Optional[int] = None,
height: Optional[int] = None,
padding: PaddingDimensions = (0, 1),
highlight: bool = False,
) -> "Panel":
"""An alternative constructor that sets expand=False."""
return cls(
renderable,
box,
title=title,
title_align=title_align,
subtitle=subtitle,
subtitle_align=subtitle_align,
safe_box=safe_box,
style=style,
border_style=border_style,
width=width,
height=height,
padding=padding,
highlight=highlight,
expand=False,
)
@property
def _title(self) -> Optional[Text]:
if self.title:
title_text = (
Text.from_markup(self.title)
if isinstance(self.title, str)
else self.title.copy()
)
title_text.end = ""
title_text.plain = title_text.plain.replace("\n", " ")
title_text.no_wrap = True
title_text.expand_tabs()
title_text.pad(1)
return title_text
return None
@property
def _subtitle(self) -> Optional[Text]:
if self.subtitle:
subtitle_text = (
Text.from_markup(self.subtitle)
if isinstance(self.subtitle, str)
else self.subtitle.copy()
)
subtitle_text.end = ""
subtitle_text.plain = subtitle_text.plain.replace("\n", " ")
subtitle_text.no_wrap = True
subtitle_text.expand_tabs()
subtitle_text.pad(1)
return subtitle_text
return None
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
_padding = Padding.unpack(self.padding)
renderable = (
Padding(self.renderable, _padding) if any(_padding) else self.renderable
)
style = console.get_style(self.style)
border_style = style + console.get_style(self.border_style)
width = (
options.max_width
if self.width is None
else min(options.max_width, self.width)
)
safe_box: bool = console.safe_box if self.safe_box is None else self.safe_box
box = self.box.substitute(options, safe=safe_box)
def align_text(
text: Text, width: int, align: str, character: str, style: Style
) -> Text:
"""Gets new aligned text.
Args:
text (Text): Title or subtitle text.
width (int): Desired width.
align (str): Alignment.
character (str): Character for alignment.
style (Style): Border style
Returns:
Text: New text instance
"""
text = text.copy()
text.truncate(width)
excess_space = width - cell_len(text.plain)
if text.style:
text.stylize(console.get_style(text.style))
if excess_space:
if align == "left":
return Text.assemble(
text,
(character * excess_space, style),
no_wrap=True,
end="",
)
elif align == "center":
left = excess_space // 2
return Text.assemble(
(character * left, style),
text,
(character * (excess_space - left), style),
no_wrap=True,
end="",
)
else:
return Text.assemble(
(character * excess_space, style),
text,
no_wrap=True,
end="",
)
return text
title_text = self._title
if title_text is not None:
title_text.stylize_before(border_style)
child_width = (
width - 2
if self.expand
else console.measure(
renderable, options=options.update_width(width - 2)
).maximum
)
child_height = self.height or options.height or None
if child_height:
child_height -= 2
if title_text is not None:
child_width = min(
options.max_width - 2, max(child_width, title_text.cell_len + 2)
)
width = child_width + 2
child_options = options.update(
width=child_width, height=child_height, highlight=self.highlight
)
lines = console.render_lines(renderable, child_options, style=style)
line_start = Segment(box.mid_left, border_style)
line_end = Segment(f"{box.mid_right}", border_style)
new_line = Segment.line()
if title_text is None or width <= 4:
yield Segment(box.get_top([width - 2]), border_style)
else:
title_text = align_text(
title_text,
width - 4,
self.title_align,
box.top,
border_style,
)
yield Segment(box.top_left + box.top, border_style)
yield from console.render(title_text, child_options.update_width(width - 4))
yield Segment(box.top + box.top_right, border_style)
yield new_line
for line in lines:
yield line_start
yield from line
yield line_end
yield new_line
subtitle_text = self._subtitle
if subtitle_text is not None:
subtitle_text.stylize_before(border_style)
if subtitle_text is None or width <= 4:
yield Segment(box.get_bottom([width - 2]), border_style)
else:
subtitle_text = align_text(
subtitle_text,
width - 4,
self.subtitle_align,
box.bottom,
border_style,
)
yield Segment(box.bottom_left + box.bottom, border_style)
yield from console.render(
subtitle_text, child_options.update_width(width - 4)
)
yield Segment(box.bottom + box.bottom_right, border_style)
yield new_line
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
_title = self._title
_, right, _, left = Padding.unpack(self.padding)
padding = left + right
renderables = [self.renderable, _title] if _title else [self.renderable]
if self.width is None:
width = (
measure_renderables(
console,
options.update_width(options.max_width - padding - 2),
renderables,
).maximum
+ padding
+ 2
)
else:
width = self.width
return Measurement(width, width)
if __name__ == "__main__": # pragma: no cover
from .console import Console
c = Console()
from .box import DOUBLE, ROUNDED
from .padding import Padding
p = Panel(
"Hello, World!",
title="rich.Panel",
style="white on blue",
box=DOUBLE,
padding=1,
)
c.print()
c.print(p)
|
Panel
|
python
|
wandb__wandb
|
wandb/jupyter.py
|
{
"start": 936,
"end": 2642
}
|
class ____:
"""State for a cell with the %%wandb cell magic."""
def __init__(self, *, height: int) -> None:
"""Initializes the %%wandb cell magic state.
Args:
height: The desired height for displayed iframes.
"""
self._height = height
self._already_displayed = False
def display_if_allowed(self, run: wandb.Run) -> None:
"""Display a run's iframe if one is not already displayed.
Args:
run: The run to display.
"""
if self._already_displayed:
return
self._already_displayed = True
_display_wandb_run(run, height=self._height)
_current_cell_wandb_magic: _WandbCellMagicState | None = None
def _display_by_wandb_path(path: str, *, height: int) -> None:
"""Display a wandb object (usually in an iframe) given its URI.
Args:
path: A path to a run, sweep, project, report, etc.
height: Height of the iframe in pixels.
"""
api = wandb.Api()
try:
obj = api.from_path(path)
IPython.display.display_html(
obj.to_html(height=height),
raw=True,
)
except wandb.Error:
traceback.print_exc()
IPython.display.display_html(
f"Path {path!r} does not refer to a W&B object you can access.",
raw=True,
)
def _display_wandb_run(run: wandb.Run, *, height: int) -> None:
"""Display a run (usually in an iframe).
Args:
run: The run to display.
height: Height of the iframe in pixels.
"""
IPython.display.display_html(
run.to_html(height=height),
raw=True,
)
@magics_class
|
_WandbCellMagicState
|
python
|
pandas-dev__pandas
|
pandas/core/internals/api.py
|
{
"start": 2259,
"end": 5615
}
|
class ____(DatetimeLikeBlock):
"""implement a datetime64 block with a tz attribute"""
values: DatetimeArray
__slots__ = ()
def make_block(
values, placement, klass=None, ndim=None, dtype: Dtype | None = None
) -> Block:
"""
This is a pseudo-public analogue to blocks.new_block.
We ask that downstream libraries use this rather than any fully-internal
APIs, including but not limited to:
- core.internals.blocks.make_block
- Block.make_block
- Block.make_block_same_class
- Block.__init__
"""
warnings.warn(
# GH#56815
"make_block is deprecated and will be removed in a future version. "
"Use pd.api.internals.create_dataframe_from_blocks or "
"(recommended) higher-level public APIs instead.",
Pandas4Warning,
stacklevel=2,
)
if dtype is not None:
dtype = pandas_dtype(dtype)
values, dtype = extract_pandas_array(values, dtype, ndim)
from pandas.core.internals.blocks import ExtensionBlock
if klass is ExtensionBlock and isinstance(values.dtype, PeriodDtype):
# GH-44681 changed PeriodArray to be stored in the 2D
# NDArrayBackedExtensionBlock instead of ExtensionBlock
# -> still allow ExtensionBlock to be passed in this case for back compat
klass = None
if klass is None:
dtype = dtype or values.dtype
klass = get_block_type(dtype)
elif klass is _DatetimeTZBlock and not isinstance(values.dtype, DatetimeTZDtype):
# pyarrow calls get here (pyarrow<15)
values = DatetimeArray._simple_new(
# error: Argument "dtype" to "_simple_new" of "DatetimeArray" has
# incompatible type "Union[ExtensionDtype, dtype[Any], None]";
# expected "Union[dtype[datetime64], DatetimeTZDtype]"
values,
dtype=dtype, # type: ignore[arg-type]
)
if not isinstance(placement, BlockPlacement):
placement = BlockPlacement(placement)
ndim = _maybe_infer_ndim(values, placement, ndim)
if isinstance(values.dtype, (PeriodDtype, DatetimeTZDtype)):
# GH#41168 ensure we can pass 1D dt64tz values
# More generally, any EA dtype that isn't is_1d_only_ea_dtype
values = extract_array(values, extract_numpy=True)
values = ensure_block_shape(values, ndim)
check_ndim(values, placement, ndim)
values = maybe_coerce_values(values)
return klass(values, ndim=ndim, placement=placement)
def _maybe_infer_ndim(values, placement: BlockPlacement, ndim: int | None) -> int:
"""
If `ndim` is not provided, infer it from placement and values.
"""
if ndim is None:
# GH#38134 Block constructor now assumes ndim is not None
if not isinstance(values.dtype, np.dtype):
if len(placement) != 1:
ndim = 1
else:
ndim = 2
else:
ndim = values.ndim
return ndim
def maybe_infer_ndim(values, placement: BlockPlacement, ndim: int | None) -> int:
"""
If `ndim` is not provided, infer it from placement and values.
"""
warnings.warn(
"maybe_infer_ndim is deprecated and will be removed in a future version.",
Pandas4Warning,
stacklevel=2,
)
return _maybe_infer_ndim(values, placement, ndim)
|
_DatetimeTZBlock
|
python
|
sqlalchemy__sqlalchemy
|
test/ext/test_associationproxy.py
|
{
"start": 93890,
"end": 94091
}
|
class ____(
ScalarRemoveTest, fixtures.DeclarativeMappedTest
):
run_create_tables = None
useobject = True
cascade_scalar_deletes = False
uselist = True
|
ScalarRemoveListObjectNoCascade
|
python
|
numpy__numpy
|
benchmarks/benchmarks/bench_reduce.py
|
{
"start": 2351,
"end": 2682
}
|
class ____(Benchmark):
params = [np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32,
np.int64, np.uint64, np.float32, np.float64, bool]
param_names = ['dtype']
def setup(self, dtype):
self.d = np.ones(200000, dtype=dtype)
def time_argmin(self, dtype):
np.argmin(self.d)
|
ArgMin
|
python
|
python__mypy
|
misc/generate_changelog.py
|
{
"start": 847,
"end": 6556
}
|
class ____:
commit: str
author: str
title: str
pr_number: int | None
def normalize_author(author: str) -> str:
# Some ad-hoc rules to get more consistent author names.
if author == "AlexWaygood":
return "Alex Waygood"
elif author == "jhance":
return "Jared Hance"
return author
def git_commit_log(rev1: str, rev2: str) -> list[CommitInfo]:
result = subprocess.run(
["git", "log", "--pretty=%H\t%an\t%s", f"{rev1}..{rev2}"],
text=True,
capture_output=True,
check=True,
)
commits = []
for line in result.stdout.splitlines():
commit, author, title = line.strip().split("\t", 2)
pr_number = None
if m := re.match(r".*\(#([0-9]+)\) *$", title):
pr_number = int(m.group(1))
title = re.sub(r" *\(#[0-9]+\) *$", "", title)
author = normalize_author(author)
entry = CommitInfo(commit, author, title, pr_number)
commits.append(entry)
return commits
def filter_omitted_commits(commits: list[CommitInfo]) -> list[CommitInfo]:
result = []
for c in commits:
title = c.title
keep = True
if title.startswith("Sync typeshed"):
# Typeshed syncs aren't mentioned in release notes
keep = False
if title.startswith(
(
"Revert sum literal integer change",
"Remove use of LiteralString in builtins",
"Revert typeshed ctypes change",
)
):
# These are generated by a typeshed sync.
keep = False
if re.search(r"(bump|update).*version.*\+dev", title.lower()):
# Version number updates aren't mentioned
keep = False
if "pre-commit autoupdate" in title:
keep = False
if title.startswith(("Update commit hashes", "Update hashes")):
# Internal tool change
keep = False
if keep:
result.append(c)
return result
def normalize_title(title: str) -> str:
# We sometimes add a title prefix when cherry-picking commits to a
# release branch. Attempt to remove these prefixes so that we can
# match them to the corresponding master branch.
if m := re.match(r"\[release [0-9.]+\] *", title, flags=re.I):
title = title.replace(m.group(0), "")
return title
def filter_out_commits_from_old_release_branch(
new_commits: list[CommitInfo], old_commits: list[CommitInfo]
) -> list[CommitInfo]:
old_titles = {normalize_title(commit.title) for commit in old_commits}
result = []
for commit in new_commits:
drop = False
if normalize_title(commit.title) in old_titles:
drop = True
if normalize_title(f"{commit.title} (#{commit.pr_number})") in old_titles:
drop = True
if not drop:
result.append(commit)
else:
print(f'NOTE: Drop "{commit.title}", since it was in previous release branch')
return result
def find_changes_between_releases(old_branch: str, new_branch: str) -> list[CommitInfo]:
merge_base = git_merge_base(old_branch, new_branch)
print(f"Merge base: {merge_base}")
new_commits = git_commit_log(merge_base, new_branch)
old_commits = git_commit_log(merge_base, old_branch)
# Filter out some commits that won't be mentioned in release notes.
new_commits = filter_omitted_commits(new_commits)
# Filter out commits cherry-picked to old branch.
new_commits = filter_out_commits_from_old_release_branch(new_commits, old_commits)
return new_commits
def format_changelog_entry(c: CommitInfo) -> str:
"""
s = f" * {c.commit[:9]} - {c.title}"
if c.pr_number:
s += f" (#{c.pr_number})"
s += f" ({c.author})"
"""
title = c.title.removesuffix(".")
s = f" * {title} ({c.author}"
if c.pr_number:
s += f", PR [{c.pr_number}](https://github.com/python/mypy/pull/{c.pr_number})"
s += ")"
return s
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("version", help="target mypy version (form X.Y)")
parser.add_argument("--local", action="store_true")
args = parser.parse_args()
version: str = args.version
local: bool = args.local
if not re.match(r"[0-9]+\.[0-9]+$", version):
sys.exit(f"error: Release must be of form X.Y (not {version!r})")
major, minor = (int(component) for component in version.split("."))
if not local:
print("Running 'git fetch' to fetch all release branches...")
subprocess.run(["git", "fetch"], check=True)
if minor > 0:
prev_major = major
prev_minor = minor - 1
else:
# For a x.0 release, the previous release is the most recent (x-1).y release.
all_releases = sorted(find_all_release_branches())
if (major, minor) not in all_releases:
sys.exit(f"error: Can't find release branch for {major}.{minor} at origin")
for i in reversed(range(len(all_releases))):
if all_releases[i][0] == major - 1:
prev_major, prev_minor = all_releases[i]
break
else:
sys.exit("error: Could not determine previous release")
print(f"Generating changelog for {major}.{minor}")
print(f"Previous release was {prev_major}.{prev_minor}")
new_branch = f"origin/release-{major}.{minor}"
old_branch = f"origin/release-{prev_major}.{prev_minor}"
changes = find_changes_between_releases(old_branch, new_branch)
print()
for c in changes:
print(format_changelog_entry(c))
if __name__ == "__main__":
main()
|
CommitInfo
|
python
|
readthedocs__readthedocs.org
|
readthedocs/redirects/admin.py
|
{
"start": 148,
"end": 594
}
|
class ____(admin.ModelAdmin):
list_display = [
"project",
"redirect_type",
"from_url",
"to_url",
"position",
"enabled",
]
list_select_related = ("project",)
list_filter = ("redirect_type", "enabled")
raw_id_fields = ("project",)
search_fields = (
"project__slug",
"from_url",
"to_url",
)
readonly_fields = ("from_url_without_rest",)
|
RedirectAdmin
|
python
|
huggingface__transformers
|
tests/models/xcodec/test_modeling_xcodec.py
|
{
"start": 3669,
"end": 11554
}
|
class ____(ModelTesterMixin, unittest.TestCase):
all_model_classes = (XcodecModel,) if is_torch_available() else ()
is_encoder_decoder = True
test_resize_embeddings = False
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
# model does not support returning hidden states
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if "output_attentions" in inputs_dict:
inputs_dict.pop("output_attentions")
if "output_hidden_states" in inputs_dict:
inputs_dict.pop("output_hidden_states")
return inputs_dict
def setUp(self):
self.model_tester = XcodecModelTester(self)
self.config_tester = ConfigTester(
self, config_class=XcodecConfig, common_properties=[], has_text_modality=False
)
def test_config(self):
self.config_tester.run_common_tests()
def test_model_forward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_forward(*config_and_inputs)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["input_values", "audio_codes", "bandwidth", "return_dict"]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
def test_gradient_checkpointing_backward_compatibility(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
if not model_class.supports_gradient_checkpointing:
continue
config.text_encoder.gradient_checkpointing = True
config.audio_encoder.gradient_checkpointing = True
config.decoder.gradient_checkpointing = True
model = model_class(config)
self.assertTrue(model.is_gradient_checkpointing)
@unittest.skip(reason="The XcodecModel does not have `inputs_embeds` logics")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="The XcodecModel does not have `inputs_embeds` logics")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="The XcodecModel does not have the usual `attention` logic")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="The XcodecModel does not have the usual `attention` logic")
def test_attention_outputs(self):
pass
@unittest.skip(reason="The XcodecModel does not have the usual `hidden_states` logic")
def test_hidden_states_output(self):
pass
# Copied from transformers.tests.encodec.test_modeling_encodecEncodecModelTest.test_determinism
def test_determinism(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_determinism(first, second):
# outputs are not tensors but list (since each sequence don't have the same frame_length)
out_1 = first.cpu().numpy()
out_2 = second.cpu().numpy()
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
first = model(**self._prepare_for_class(inputs_dict, model_class))[0]
second = model(**self._prepare_for_class(inputs_dict, model_class))[0]
if isinstance(first, tuple) and isinstance(second, tuple):
for tensor1, tensor2 in zip(first, second):
check_determinism(tensor1, tensor2)
else:
check_determinism(first, second)
# Copied from transformers.tests.encodec.test_modeling_encodecEncodecModelTest.test_model_outputs_equivalence
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs)
self.assertTrue(isinstance(tuple_output, tuple))
self.assertTrue(isinstance(dict_output, dict))
for tuple_value, dict_value in zip(tuple_output, dict_output.values()):
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(tuple_value), set_nan_tensor_to_zero(dict_value), atol=1e-5
),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_value - dict_value))}. Tuple has `nan`:"
f" {torch.isnan(tuple_value).any()} and `inf`: {torch.isinf(tuple_value)}. Dict has"
f" `nan`: {torch.isnan(dict_value).any()} and `inf`: {torch.isinf(dict_value)}."
),
)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
@unittest.skip(reason="The XcodecModel does not have support dynamic compile yet")
def test_sdpa_can_compile_dynamic(self):
pass
# Copied from transformers.tests.encodec.test_modeling_encodec.normalize
def normalize(arr):
norm = np.linalg.norm(arr)
normalized_arr = arr / norm
return normalized_arr
# Copied from transformers.tests.encodec.test_modeling_encodec.compute_rmse
def compute_rmse(arr1, arr2):
arr1_np = arr1.cpu().numpy().squeeze()
arr2_np = arr2.cpu().numpy().squeeze()
max_length = min(arr1.shape[-1], arr2.shape[-1])
arr1_np = arr1_np[..., :max_length]
arr2_np = arr2_np[..., :max_length]
arr1_normalized = normalize(arr1_np)
arr2_normalized = normalize(arr2_np)
return np.sqrt(((arr1_normalized - arr2_normalized) ** 2).mean())
"""
Integration tests for XCodec
Code for reproducing expected outputs can be found here:
https://gist.github.com/ebezzam/cdaf8c223e59e7677b2ea6bc2dc8230b
One reason for higher tolerances is because of different implementation of `Snake1d` within Transformer version DAC
See here: https://github.com/huggingface/transformers/pull/39793#issue-3277407384
"""
RESULTS_PATH = Path(__file__).parent.parent.parent / "fixtures/xcodec/integration_tests.json"
with open(RESULTS_PATH, "r") as f:
raw_data = json.load(f)
# convert dicts into tuples ordered to match test args
EXPECTED_OUTPUTS_JSON = [
(
f"{d['repo_id']}_{d['bandwidth']}",
d["repo_id"],
d["bandwidth"],
d["codes"],
d["decoded"],
d["codec_error"],
d["codec_tol"],
d["dec_tol"],
)
for d in raw_data
]
@slow
@require_torch
|
XcodecModelTest
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_cloud_batch.py
|
{
"start": 3576,
"end": 4431
}
|
class ____:
@mock.patch(CLOUD_BATCH_HOOK_PATH)
def test_execute(self, hook_mock):
delete_operation_mock = self._delete_operation_mock()
hook_mock.return_value.delete_job.return_value = delete_operation_mock
operator = CloudBatchDeleteJobOperator(
task_id=TASK_ID,
project_id=PROJECT_ID,
region=REGION,
job_name=JOB_NAME,
)
operator.execute(context=mock.MagicMock())
hook_mock.return_value.delete_job.assert_called_once_with(
job_name=JOB_NAME, region=REGION, project_id=PROJECT_ID
)
delete_operation_mock.result.assert_called_once()
def _delete_operation_mock(self):
operation = mock.MagicMock()
operation.result.return_value = mock.MagicMock()
return operation
|
TestCloudBatchDeleteJobOperator
|
python
|
pypa__pip
|
src/pip/_internal/commands/list.py
|
{
"start": 1342,
"end": 13514
}
|
class ____(IndexGroupCommand):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
ignore_require_venv = True
usage = """
%prog [options]"""
def add_options(self) -> None:
self.cmd_opts.add_option(
"-o",
"--outdated",
action="store_true",
default=False,
help="List outdated packages",
)
self.cmd_opts.add_option(
"-u",
"--uptodate",
action="store_true",
default=False,
help="List uptodate packages",
)
self.cmd_opts.add_option(
"-e",
"--editable",
action="store_true",
default=False,
help="List editable projects.",
)
self.cmd_opts.add_option(
"-l",
"--local",
action="store_true",
default=False,
help=(
"If in a virtualenv that has global access, do not list "
"globally-installed packages."
),
)
self.cmd_opts.add_option(
"--user",
dest="user",
action="store_true",
default=False,
help="Only output packages installed in user-site.",
)
self.cmd_opts.add_option(cmdoptions.list_path())
self.cmd_opts.add_option(
"--pre",
action="store_true",
default=False,
help=(
"Include pre-release and development versions. By default, "
"pip only finds stable versions."
),
)
self.cmd_opts.add_option(
"--format",
action="store",
dest="list_format",
default="columns",
choices=("columns", "freeze", "json"),
help=(
"Select the output format among: columns (default), freeze, or json. "
"The 'freeze' format cannot be used with the --outdated option."
),
)
self.cmd_opts.add_option(
"--not-required",
action="store_true",
dest="not_required",
help="List packages that are not dependencies of installed packages.",
)
self.cmd_opts.add_option(
"--exclude-editable",
action="store_false",
dest="include_editable",
help="Exclude editable package from output.",
)
self.cmd_opts.add_option(
"--include-editable",
action="store_true",
dest="include_editable",
help="Include editable package in output.",
default=True,
)
self.cmd_opts.add_option(cmdoptions.list_exclude())
index_opts = cmdoptions.make_option_group(cmdoptions.index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, self.cmd_opts)
def handle_pip_version_check(self, options: Values) -> None:
if options.outdated or options.uptodate:
super().handle_pip_version_check(options)
def _build_package_finder(
self, options: Values, session: PipSession
) -> PackageFinder:
"""
Create a package finder appropriate to this list command.
"""
# Lazy import the heavy index modules as most list invocations won't need 'em.
from pip._internal.index.collector import LinkCollector
from pip._internal.index.package_finder import PackageFinder
link_collector = LinkCollector.create(session, options=options)
# Pass allow_yanked=False to ignore yanked versions.
selection_prefs = SelectionPreferences(
allow_yanked=False,
allow_all_prereleases=options.pre,
)
return PackageFinder.create(
link_collector=link_collector,
selection_prefs=selection_prefs,
)
def run(self, options: Values, args: list[str]) -> int:
if options.outdated and options.uptodate:
raise CommandError("Options --outdated and --uptodate cannot be combined.")
if options.outdated and options.list_format == "freeze":
raise CommandError(
"List format 'freeze' cannot be used with the --outdated option."
)
cmdoptions.check_list_path_option(options)
skip = set(stdlib_pkgs)
if options.excludes:
skip.update(canonicalize_name(n) for n in options.excludes)
packages: _ProcessedDists = [
cast("_DistWithLatestInfo", d)
for d in get_environment(options.path).iter_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=options.editable,
include_editables=options.include_editable,
skip=skip,
)
]
# get_not_required must be called firstly in order to find and
# filter out all dependencies correctly. Otherwise a package
# can't be identified as requirement because some parent packages
# could be filtered out before.
if options.not_required:
packages = self.get_not_required(packages, options)
if options.outdated:
packages = self.get_outdated(packages, options)
elif options.uptodate:
packages = self.get_uptodate(packages, options)
self.output_package_listing(packages, options)
return SUCCESS
def get_outdated(
self, packages: _ProcessedDists, options: Values
) -> _ProcessedDists:
return [
dist
for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version > dist.version
]
def get_uptodate(
self, packages: _ProcessedDists, options: Values
) -> _ProcessedDists:
return [
dist
for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version == dist.version
]
def get_not_required(
self, packages: _ProcessedDists, options: Values
) -> _ProcessedDists:
dep_keys = {
canonicalize_name(dep.name)
for dist in packages
for dep in (dist.iter_dependencies() or ())
}
# Create a set to remove duplicate packages, and cast it to a list
# to keep the return type consistent with get_outdated and
# get_uptodate
return list({pkg for pkg in packages if pkg.canonical_name not in dep_keys})
def iter_packages_latest_infos(
self, packages: _ProcessedDists, options: Values
) -> Generator[_DistWithLatestInfo, None, None]:
with self._build_session(options) as session:
finder = self._build_package_finder(options, session)
def latest_info(
dist: _DistWithLatestInfo,
) -> _DistWithLatestInfo | None:
all_candidates = finder.find_all_candidates(dist.canonical_name)
if not options.pre:
# Remove prereleases
all_candidates = [
candidate
for candidate in all_candidates
if not candidate.version.is_prerelease
]
evaluator = finder.make_candidate_evaluator(
project_name=dist.canonical_name,
)
best_candidate = evaluator.sort_best_candidate(all_candidates)
if best_candidate is None:
return None
remote_version = best_candidate.version
if best_candidate.link.is_wheel:
typ = "wheel"
else:
typ = "sdist"
dist.latest_version = remote_version
dist.latest_filetype = typ
return dist
for dist in map(latest_info, packages):
if dist is not None:
yield dist
def output_package_listing(
self, packages: _ProcessedDists, options: Values
) -> None:
packages = sorted(
packages,
key=lambda dist: dist.canonical_name,
)
if options.list_format == "columns" and packages:
data, header = format_for_columns(packages, options)
self.output_package_listing_columns(data, header)
elif options.list_format == "freeze":
for dist in packages:
try:
req_string = f"{dist.raw_name}=={dist.version}"
except InvalidVersion:
req_string = f"{dist.raw_name}==={dist.raw_version}"
if options.verbose >= 1:
write_output("%s (%s)", req_string, dist.location)
else:
write_output(req_string)
elif options.list_format == "json":
write_output(format_for_json(packages, options))
def output_package_listing_columns(
self, data: list[list[str]], header: list[str]
) -> None:
# insert the header first: we need to know the size of column names
if len(data) > 0:
data.insert(0, header)
pkg_strings, sizes = tabulate(data)
# Create and add a separator.
if len(data) > 0:
pkg_strings.insert(1, " ".join("-" * x for x in sizes))
for val in pkg_strings:
write_output(val)
def format_for_columns(
pkgs: _ProcessedDists, options: Values
) -> tuple[list[list[str]], list[str]]:
"""
Convert the package data into something usable
by output_package_listing_columns.
"""
header = ["Package", "Version"]
running_outdated = options.outdated
if running_outdated:
header.extend(["Latest", "Type"])
def wheel_build_tag(dist: BaseDistribution) -> str | None:
try:
wheel_file = dist.read_text("WHEEL")
except FileNotFoundError:
return None
return Parser().parsestr(wheel_file).get("Build")
build_tags = [wheel_build_tag(p) for p in pkgs]
has_build_tags = any(build_tags)
if has_build_tags:
header.append("Build")
has_editables = any(x.editable for x in pkgs)
if has_editables:
header.append("Editable project location")
if options.verbose >= 1:
header.append("Location")
if options.verbose >= 1:
header.append("Installer")
data = []
for i, proj in enumerate(pkgs):
# if we're working on the 'outdated' list, separate out the
# latest_version and type
row = [proj.raw_name, proj.raw_version]
if running_outdated:
row.append(str(proj.latest_version))
row.append(proj.latest_filetype)
if has_build_tags:
row.append(build_tags[i] or "")
if has_editables:
row.append(proj.editable_project_location or "")
if options.verbose >= 1:
row.append(proj.location or "")
if options.verbose >= 1:
row.append(proj.installer)
data.append(row)
return data, header
def format_for_json(packages: _ProcessedDists, options: Values) -> str:
data = []
for dist in packages:
try:
version = str(dist.version)
except InvalidVersion:
version = dist.raw_version
info = {
"name": dist.raw_name,
"version": version,
}
if options.verbose >= 1:
info["location"] = dist.location or ""
info["installer"] = dist.installer
if options.outdated:
info["latest_version"] = str(dist.latest_version)
info["latest_filetype"] = dist.latest_filetype
editable_project_location = dist.editable_project_location
if editable_project_location:
info["editable_project_location"] = editable_project_location
data.append(info)
return json.dumps(data)
|
ListCommand
|
python
|
Pylons__pyramid
|
src/pyramid/config/factories.py
|
{
"start": 447,
"end": 9175
}
|
class ____:
@action_method
def set_root_factory(self, factory):
"""Add a :term:`root factory` to the current configuration
state. If the ``factory`` argument is ``None`` a default root
factory will be registered.
.. note::
Using the ``root_factory`` argument to the
:class:`pyramid.config.Configurator` constructor can be used to
achieve the same purpose.
"""
factory = self.maybe_dotted(factory)
if factory is None:
factory = DefaultRootFactory
def register():
self.registry.registerUtility(factory, IRootFactory)
self.registry.registerUtility(factory, IDefaultRootFactory) # b/c
intr = self.introspectable(
'root factories',
None,
self.object_description(factory),
'root factory',
)
intr['factory'] = factory
self.action(IRootFactory, register, introspectables=(intr,))
_set_root_factory = set_root_factory # bw compat
@action_method
def set_session_factory(self, factory):
"""
Configure the application with a :term:`session factory`. If this
method is called, the ``factory`` argument must be a session
factory callable or a :term:`dotted Python name` to that factory.
.. note::
Using the ``session_factory`` argument to the
:class:`pyramid.config.Configurator` constructor can be used to
achieve the same purpose.
"""
factory = self.maybe_dotted(factory)
def register():
self.registry.registerUtility(factory, ISessionFactory)
intr = self.introspectable(
'session factory',
None,
self.object_description(factory),
'session factory',
)
intr['factory'] = factory
self.action(ISessionFactory, register, introspectables=(intr,))
@action_method
def set_request_factory(self, factory):
"""The object passed as ``factory`` should be an object (or a
:term:`dotted Python name` which refers to an object) which
will be used by the :app:`Pyramid` router to create all
request objects. This factory object must have the same
methods and attributes as the
:class:`pyramid.request.Request` class (particularly
``__call__``, and ``blank``).
See :meth:`pyramid.config.Configurator.add_request_method`
for a less intrusive way to extend the request objects with
custom methods and properties.
.. note::
Using the ``request_factory`` argument to the
:class:`pyramid.config.Configurator` constructor
can be used to achieve the same purpose.
"""
factory = self.maybe_dotted(factory)
def register():
self.registry.registerUtility(factory, IRequestFactory)
intr = self.introspectable(
'request factory',
None,
self.object_description(factory),
'request factory',
)
intr['factory'] = factory
self.action(IRequestFactory, register, introspectables=(intr,))
@action_method
def set_response_factory(self, factory):
"""The object passed as ``factory`` should be an object (or a
:term:`dotted Python name` which refers to an object) which
will be used by the :app:`Pyramid` as the default response
objects. The factory should conform to the
:class:`pyramid.interfaces.IResponseFactory` interface.
.. note::
Using the ``response_factory`` argument to the
:class:`pyramid.config.Configurator` constructor
can be used to achieve the same purpose.
"""
factory = self.maybe_dotted(factory)
def register():
self.registry.registerUtility(factory, IResponseFactory)
intr = self.introspectable(
'response factory',
None,
self.object_description(factory),
'response factory',
)
intr['factory'] = factory
self.action(IResponseFactory, register, introspectables=(intr,))
@action_method
def add_request_method(
self, callable=None, name=None, property=False, reify=False
):
"""Add a property or method to the request object.
When adding a method to the request, ``callable`` may be any
function that receives the request object as the first
parameter. If ``name`` is ``None`` then it will be computed
from the name of the ``callable``.
When adding a property to the request, ``callable`` can either
be a callable that accepts the request as its single positional
parameter, or it can be a property descriptor. If ``callable`` is
a property descriptor, it has to be an instance of a class which is
a subclass of ``property``. If ``name`` is ``None``, the name of
the property will be computed from the name of the ``callable``.
If the ``callable`` is a property descriptor a ``ValueError``
will be raised if ``name`` is ``None`` or ``reify`` is ``True``.
See :meth:`pyramid.request.Request.set_property` for more
details on ``property`` vs ``reify``. When ``reify`` is
``True``, the value of ``property`` is assumed to also be
``True``.
In all cases, ``callable`` may also be a
:term:`dotted Python name` which refers to either a callable or
a property descriptor.
If ``callable`` is ``None`` then the method is only used to
assist in conflict detection between different addons requesting
the same attribute on the request object.
This is the recommended method for extending the request object
and should be used in favor of providing a custom request
factory via
:meth:`pyramid.config.Configurator.set_request_factory`.
.. versionadded:: 1.4
"""
if callable is not None:
callable = self.maybe_dotted(callable)
property = property or reify
if property:
name, callable = InstancePropertyHelper.make_property(
callable, name=name, reify=reify
)
elif name is None:
name = callable.__name__
else:
name = get_callable_name(name)
def register():
exts = self.registry.queryUtility(IRequestExtensions)
if exts is None:
exts = _RequestExtensions()
self.registry.registerUtility(exts, IRequestExtensions)
plist = exts.descriptors if property else exts.methods
plist[name] = callable
if callable is None:
self.action(('request extensions', name), None)
elif property:
intr = self.introspectable(
'request extensions',
name,
self.object_description(callable),
'request property',
)
intr['callable'] = callable
intr['property'] = True
intr['reify'] = reify
self.action(
('request extensions', name), register, introspectables=(intr,)
)
else:
intr = self.introspectable(
'request extensions',
name,
self.object_description(callable),
'request method',
)
intr['callable'] = callable
intr['property'] = False
intr['reify'] = False
self.action(
('request extensions', name), register, introspectables=(intr,)
)
@action_method
def set_execution_policy(self, policy):
"""
Override the :app:`Pyramid` :term:`execution policy` in the
current configuration. The ``policy`` argument must be an instance
of an :class:`pyramid.interfaces.IExecutionPolicy` or a
:term:`dotted Python name` that points at an instance of an
execution policy.
"""
policy = self.maybe_dotted(policy)
if policy is None:
policy = default_execution_policy
def register():
self.registry.registerUtility(policy, IExecutionPolicy)
intr = self.introspectable(
'execution policy',
None,
self.object_description(policy),
'execution policy',
)
intr['policy'] = policy
self.action(IExecutionPolicy, register, introspectables=(intr,))
@implementer(IRequestExtensions)
|
FactoriesConfiguratorMixin
|
python
|
huggingface__transformers
|
tests/models/pix2struct/test_modeling_pix2struct.py
|
{
"start": 27474,
"end": 32382
}
|
class ____(unittest.TestCase):
def test_inference_image_captioning(self):
model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-textcaps-base").to(torch_device)
processor = Pix2StructProcessor.from_pretrained("google/pix2struct-textcaps-base")
image = prepare_img()
# image only
inputs = processor(images=image, return_tensors="pt").to(torch_device)
predictions = model.generate(**inputs)
self.assertEqual(
processor.decode(predictions[0], skip_special_tokens=True), "A stop sign is on a street corner."
)
def test_batched_inference_image_captioning(self):
model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-textcaps-base").to(torch_device)
processor = Pix2StructProcessor.from_pretrained("google/pix2struct-textcaps-base")
image_1 = prepare_img()
second_url = (
"https://www.connollycove.com/wp-content/uploads/2019/06/temple-bar-dublin-world-famous-irish-pub.jpg"
)
image_2 = Image.open(requests.get(second_url, stream=True).raw)
# image only
inputs = processor(images=[image_1, image_2], return_tensors="pt").to(torch_device)
predictions = model.generate(**inputs)
self.assertEqual(
processor.decode(predictions[0], skip_special_tokens=True), "A stop sign is on a street corner."
)
self.assertEqual(
processor.decode(predictions[1], skip_special_tokens=True),
"A row of books including The Temple Bar and Guiness.",
)
def test_batched_inference_image_captioning_conditioned(self):
model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-textcaps-base").to(torch_device)
processor = Pix2StructProcessor.from_pretrained("google/pix2struct-textcaps-base")
image_1 = prepare_img()
second_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/temple-bar-dublin-world-famous-irish-pub.jpg"
image_2 = Image.open(requests.get(second_url, stream=True).raw)
texts = ["A picture of", "An photography of"]
# image only
inputs = processor(images=[image_1, image_2], text=texts, return_tensors="pt", add_special_tokens=False).to(
torch_device
)
predictions = model.generate(**inputs)
self.assertEqual(
processor.decode(predictions[0], skip_special_tokens=True),
"A picture of a stop sign with a red stop sign",
)
self.assertEqual(
processor.decode(predictions[1], skip_special_tokens=True),
"An photography of the Temple Bar and other places in the city.",
)
def test_vqa_model(self):
model_id = "google/pix2struct-ai2d-base"
image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg"
image = Image.open(requests.get(image_url, stream=True).raw)
model = Pix2StructForConditionalGeneration.from_pretrained(model_id, dtype=torch.bfloat16).to(torch_device)
processor = Pix2StructProcessor.from_pretrained(model_id)
# image only
text = "What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud"
inputs = processor(images=image, return_tensors="pt", text=text).to(torch_device, torch.bfloat16)
predictions = model.generate(**inputs)
self.assertEqual(processor.decode(predictions[0], skip_special_tokens=True), "ash cloud")
def test_vqa_model_batched(self):
model_id = "google/pix2struct-ai2d-base"
image_urls = [
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg",
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo-2.png",
]
images = [Image.open(requests.get(image_url, stream=True).raw) for image_url in image_urls]
texts = [
"What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud",
"What is the producer in the diagram? (1) Phytoplankton (2) Zooplankton (3) Large fish (4) Small fish",
]
model = Pix2StructForConditionalGeneration.from_pretrained(model_id, dtype=torch.bfloat16).to(torch_device)
processor = Pix2StructProcessor.from_pretrained(model_id)
inputs = processor(images=images, return_tensors="pt", text=texts).to(torch_device, torch.bfloat16)
predictions = model.generate(**inputs)
self.assertEqual(processor.decode(predictions[0], skip_special_tokens=True), "ash cloud")
self.assertEqual(processor.decode(predictions[1], skip_special_tokens=True), "Phytoplankton")
|
Pix2StructIntegrationTest
|
python
|
pytest-dev__pytest
|
testing/test_assertion.py
|
{
"start": 14216,
"end": 31299
}
|
class ____:
def test_different_types(self) -> None:
assert callequal([0, 1], "foo") is None
def test_summary(self) -> None:
lines = callequal([0, 1], [0, 2])
assert lines is not None
summary = lines[0]
assert len(summary) < 65
def test_text_diff(self) -> None:
assert callequal("spam", "eggs") == [
"'spam' == 'eggs'",
"",
"- eggs",
"+ spam",
]
def test_text_skipping(self) -> None:
lines = callequal("a" * 50 + "spam", "a" * 50 + "eggs")
assert lines is not None
assert "Skipping" in lines[2]
for line in lines:
assert "a" * 50 not in line
def test_text_skipping_verbose(self) -> None:
lines = callequal("a" * 50 + "spam", "a" * 50 + "eggs", verbose=1)
assert lines is not None
assert "- " + "a" * 50 + "eggs" in lines
assert "+ " + "a" * 50 + "spam" in lines
def test_multiline_text_diff(self) -> None:
left = "foo\nspam\nbar"
right = "foo\neggs\nbar"
diff = callequal(left, right)
assert diff is not None
assert "- eggs" in diff
assert "+ spam" in diff
def test_bytes_diff_normal(self) -> None:
"""Check special handling for bytes diff (#5260)"""
diff = callequal(b"spam", b"eggs")
assert diff == [
"b'spam' == b'eggs'",
"",
"At index 0 diff: b's' != b'e'",
"Use -v to get more diff",
]
def test_bytes_diff_verbose(self) -> None:
"""Check special handling for bytes diff (#5260)"""
diff = callequal(b"spam", b"eggs", verbose=1)
assert diff == [
"b'spam' == b'eggs'",
"",
"At index 0 diff: b's' != b'e'",
"",
"Full diff:",
"- b'eggs'",
"+ b'spam'",
]
def test_list(self) -> None:
expl = callequal([0, 1], [0, 2])
assert expl is not None
assert len(expl) > 1
@pytest.mark.parametrize(
["left", "right", "expected"],
[
pytest.param(
[0, 1],
[0, 2],
"""
Full diff:
[
0,
- 2,
? ^
+ 1,
? ^
]
""",
id="lists",
),
pytest.param(
{0: 1},
{0: 2},
"""
Full diff:
{
- 0: 2,
? ^
+ 0: 1,
? ^
}
""",
id="dicts",
),
pytest.param(
{0, 1},
{0, 2},
"""
Full diff:
{
0,
- 2,
? ^
+ 1,
? ^
}
""",
id="sets",
),
],
)
def test_iterable_full_diff(self, left, right, expected) -> None:
"""Test the full diff assertion failure explanation.
When verbose is False, then just a -v notice to get the diff is rendered,
when verbose is True, then ndiff of the pprint is returned.
"""
expl = callequal(left, right, verbose=0)
assert expl is not None
assert expl[-1] == "Use -v to get more diff"
verbose_expl = callequal(left, right, verbose=1)
assert verbose_expl is not None
assert "\n".join(verbose_expl).endswith(textwrap.dedent(expected).strip())
def test_iterable_quiet(self) -> None:
expl = callequal([1, 2], [10, 2], verbose=-1)
assert expl == [
"[1, 2] == [10, 2]",
"",
"At index 0 diff: 1 != 10",
"Use -v to get more diff",
]
def test_iterable_full_diff_ci(
self, monkeypatch: MonkeyPatch, pytester: Pytester
) -> None:
pytester.makepyfile(
r"""
def test_full_diff():
left = [0, 1]
right = [0, 2]
assert left == right
"""
)
monkeypatch.setenv("CI", "true")
result = pytester.runpytest()
result.stdout.fnmatch_lines(["E Full diff:"])
# Setting CI to empty string is same as having it undefined
monkeypatch.setenv("CI", "")
result = pytester.runpytest()
result.stdout.fnmatch_lines(["E Use -v to get more diff"])
monkeypatch.delenv("CI", raising=False)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["E Use -v to get more diff"])
def test_list_different_lengths(self) -> None:
expl = callequal([0, 1], [0, 1, 2])
assert expl is not None
assert len(expl) > 1
expl = callequal([0, 1, 2], [0, 1])
assert expl is not None
assert len(expl) > 1
def test_list_wrap_for_multiple_lines(self) -> None:
long_d = "d" * 80
l1 = ["a", "b", "c"]
l2 = ["a", "b", "c", long_d]
diff = callequal(l1, l2, verbose=True)
assert diff == [
"['a', 'b', 'c'] == ['a', 'b', 'c...dddddddddddd']",
"",
"Right contains one more item: '" + long_d + "'",
"",
"Full diff:",
" [",
" 'a',",
" 'b',",
" 'c',",
"- '" + long_d + "',",
" ]",
]
diff = callequal(l2, l1, verbose=True)
assert diff == [
"['a', 'b', 'c...dddddddddddd'] == ['a', 'b', 'c']",
"",
"Left contains one more item: '" + long_d + "'",
"",
"Full diff:",
" [",
" 'a',",
" 'b',",
" 'c',",
"+ '" + long_d + "',",
" ]",
]
def test_list_wrap_for_width_rewrap_same_length(self) -> None:
long_a = "a" * 30
long_b = "b" * 30
long_c = "c" * 30
l1 = [long_a, long_b, long_c]
l2 = [long_b, long_c, long_a]
diff = callequal(l1, l2, verbose=True)
assert diff == [
"['aaaaaaaaaaa...cccccccccccc'] == ['bbbbbbbbbbb...aaaaaaaaaaaa']",
"",
"At index 0 diff: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' != 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'",
"",
"Full diff:",
" [",
"+ 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',",
" 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbb',",
" 'cccccccccccccccccccccccccccccc',",
"- 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',",
" ]",
]
def test_list_dont_wrap_strings(self) -> None:
long_a = "a" * 10
l1 = ["a"] + [long_a for _ in range(7)]
l2 = ["should not get wrapped"]
diff = callequal(l1, l2, verbose=True)
assert diff == [
"['a', 'aaaaaa...aaaaaaa', ...] == ['should not get wrapped']",
"",
"At index 0 diff: 'a' != 'should not get wrapped'",
"Left contains 7 more items, first extra item: 'aaaaaaaaaa'",
"",
"Full diff:",
" [",
"- 'should not get wrapped',",
"+ 'a',",
"+ 'aaaaaaaaaa',",
"+ 'aaaaaaaaaa',",
"+ 'aaaaaaaaaa',",
"+ 'aaaaaaaaaa',",
"+ 'aaaaaaaaaa',",
"+ 'aaaaaaaaaa',",
"+ 'aaaaaaaaaa',",
" ]",
]
def test_dict_wrap(self) -> None:
d1 = {"common": 1, "env": {"env1": 1, "env2": 2}}
d2 = {"common": 1, "env": {"env1": 1}}
diff = callequal(d1, d2, verbose=True)
assert diff == [
"{'common': 1,...1, 'env2': 2}} == {'common': 1,...: {'env1': 1}}",
"",
"Omitting 1 identical items, use -vv to show",
"Differing items:",
"{'env': {'env1': 1, 'env2': 2}} != {'env': {'env1': 1}}",
"",
"Full diff:",
" {",
" 'common': 1,",
" 'env': {",
" 'env1': 1,",
"+ 'env2': 2,",
" },",
" }",
]
long_a = "a" * 80
sub = {"long_a": long_a, "sub1": {"long_a": "substring that gets wrapped " * 3}}
d1 = {"env": {"sub": sub}}
d2 = {"env": {"sub": sub}, "new": 1}
diff = callequal(d1, d2, verbose=True)
assert diff == [
"{'env': {'sub... wrapped '}}}} == {'env': {'sub...}}}, 'new': 1}",
"",
"Omitting 1 identical items, use -vv to show",
"Right contains 1 more item:",
"{'new': 1}",
"",
"Full diff:",
" {",
" 'env': {",
" 'sub': {",
f" 'long_a': '{long_a}',",
" 'sub1': {",
" 'long_a': 'substring that gets wrapped substring that gets wrapped '",
" 'substring that gets wrapped ',",
" },",
" },",
" },",
"- 'new': 1,",
" }",
]
def test_dict(self) -> None:
expl = callequal({"a": 0}, {"a": 1})
assert expl is not None
assert len(expl) > 1
def test_dict_omitting(self) -> None:
lines = callequal({"a": 0, "b": 1}, {"a": 1, "b": 1})
assert lines is not None
assert lines[2].startswith("Omitting 1 identical item")
assert "Common items" not in lines
for line in lines[1:]:
assert "b" not in line
def test_dict_omitting_with_verbosity_1(self) -> None:
"""Ensure differing items are visible for verbosity=1 (#1512)."""
lines = callequal({"a": 0, "b": 1}, {"a": 1, "b": 1}, verbose=1)
assert lines is not None
assert lines[1] == ""
assert lines[2].startswith("Omitting 1 identical item")
assert lines[3].startswith("Differing items")
assert lines[4] == "{'a': 0} != {'a': 1}"
assert "Common items" not in lines
def test_dict_omitting_with_verbosity_2(self) -> None:
lines = callequal({"a": 0, "b": 1}, {"a": 1, "b": 1}, verbose=2)
assert lines is not None
assert lines[2].startswith("Common items:")
assert "Omitting" not in lines[2]
assert lines[3] == "{'b': 1}"
def test_dict_different_items(self) -> None:
lines = callequal({"a": 0}, {"b": 1, "c": 2}, verbose=2)
assert lines == [
"{'a': 0} == {'b': 1, 'c': 2}",
"",
"Left contains 1 more item:",
"{'a': 0}",
"Right contains 2 more items:",
"{'b': 1, 'c': 2}",
"",
"Full diff:",
" {",
"- 'b': 1,",
"? ^ ^",
"+ 'a': 0,",
"? ^ ^",
"- 'c': 2,",
" }",
]
lines = callequal({"b": 1, "c": 2}, {"a": 0}, verbose=2)
assert lines == [
"{'b': 1, 'c': 2} == {'a': 0}",
"",
"Left contains 2 more items:",
"{'b': 1, 'c': 2}",
"Right contains 1 more item:",
"{'a': 0}",
"",
"Full diff:",
" {",
"- 'a': 0,",
"? ^ ^",
"+ 'b': 1,",
"? ^ ^",
"+ 'c': 2,",
" }",
]
def test_sequence_different_items(self) -> None:
lines = callequal((1, 2), (3, 4, 5), verbose=2)
assert lines == [
"(1, 2) == (3, 4, 5)",
"",
"At index 0 diff: 1 != 3",
"Right contains one more item: 5",
"",
"Full diff:",
" (",
"- 3,",
"? ^",
"+ 1,",
"? ^",
"- 4,",
"? ^",
"+ 2,",
"? ^",
"- 5,",
" )",
]
lines = callequal((1, 2, 3), (4,), verbose=2)
assert lines == [
"(1, 2, 3) == (4,)",
"",
"At index 0 diff: 1 != 4",
"Left contains 2 more items, first extra item: 2",
"",
"Full diff:",
" (",
"- 4,",
"? ^",
"+ 1,",
"? ^",
"+ 2,",
"+ 3,",
" )",
]
lines = callequal((1, 2, 3), (1, 20, 3), verbose=2)
assert lines == [
"(1, 2, 3) == (1, 20, 3)",
"",
"At index 1 diff: 2 != 20",
"",
"Full diff:",
" (",
" 1,",
"- 20,",
"? -",
"+ 2,",
" 3,",
" )",
]
def test_set(self) -> None:
expl = callequal({0, 1}, {0, 2})
assert expl is not None
assert len(expl) > 1
def test_frozenzet(self) -> None:
expl = callequal(frozenset([0, 1]), {0, 2})
assert expl is not None
assert len(expl) > 1
def test_Sequence(self) -> None:
# Test comparing with a Sequence subclass.
class TestSequence(MutableSequence[int]):
def __init__(self, iterable):
self.elements = list(iterable)
def __getitem__(self, item):
return self.elements[item]
def __len__(self):
return len(self.elements)
def __setitem__(self, item, value):
pass
def __delitem__(self, item):
pass
def insert(self, index, value):
pass
expl = callequal(TestSequence([0, 1]), list([0, 2]))
assert expl is not None
assert len(expl) > 1
def test_list_tuples(self) -> None:
expl = callequal([], [(1, 2)])
assert expl is not None
assert len(expl) > 1
expl = callequal([(1, 2)], [])
assert expl is not None
assert len(expl) > 1
def test_list_bad_repr(self) -> None:
class A:
def __repr__(self):
raise ValueError(42)
expl = callequal([], [A()])
assert expl is not None
assert "ValueError" in "".join(expl)
expl = callequal({}, {"1": A()}, verbose=2)
assert expl is not None
assert expl[0].startswith("{} == <[ValueError")
assert "raised in repr" in expl[0]
assert expl[2:] == [
"(pytest_assertion plugin: representation of details failed:"
f" {__file__}:{A.__repr__.__code__.co_firstlineno + 1}: ValueError: 42.",
" Probably an object has a faulty __repr__.)",
]
def test_one_repr_empty(self) -> None:
"""The faulty empty string repr did trigger an unbound local error in _diff_text."""
class A(str):
def __repr__(self):
return ""
expl = callequal(A(), "")
assert not expl
def test_repr_no_exc(self) -> None:
expl = callequal("foo", "bar")
assert expl is not None
assert "raised in repr()" not in " ".join(expl)
def test_unicode(self) -> None:
assert callequal("£€", "£") == [
"'£€' == '£'",
"",
"- £",
"+ £€",
]
def test_nonascii_text(self) -> None:
"""
:issue: 877
non ascii python2 str caused a UnicodeDecodeError
"""
class A(str):
def __repr__(self):
return "\xff"
expl = callequal(A(), "1")
assert expl == ["ÿ == '1'", "", "- 1"]
def test_format_nonascii_explanation(self) -> None:
assert util.format_explanation("λ")
def test_mojibake(self) -> None:
# issue 429
left = b"e"
right = b"\xc3\xa9"
expl = callequal(left, right)
assert expl is not None
for line in expl:
assert isinstance(line, str)
msg = "\n".join(expl)
assert msg
def test_nfc_nfd_same_string(self) -> None:
# issue 3426
left = "hyv\xe4"
right = "hyva\u0308"
expl = callequal(left, right)
assert expl == [
r"'hyv\xe4' == 'hyva\u0308'",
"",
f"- {right!s}",
f"+ {left!s}",
]
expl = callequal(left, right, verbose=2)
assert expl == [
r"'hyv\xe4' == 'hyva\u0308'",
"",
f"- {right!s}",
f"+ {left!s}",
]
|
TestAssert_reprcompare
|
python
|
huggingface__transformers
|
src/transformers/models/apertus/modular_apertus.py
|
{
"start": 1407,
"end": 8907
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ApertusModel`]. It is used to instantiate a Apertus
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Apertus-8B.
e.g. [swiss-ai/Apertus-8B](https://huggingface.co/swiss-ai/Apertus-8B)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 131072):
Vocabulary size of the Apertus model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ApertusModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 14336):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"xielu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 65536):
The maximum sequence length that this model might ever be used with. Apertus supports up to 65536 tokens.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 3):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
```python
>>> from transformers import ApertusModel, ApertusConfig
>>> # Initializing a Apertus-8B style configuration
>>> configuration = ApertusConfig()
>>> # Initializing a model from the Apertus-8B style configuration
>>> model = ApertusModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "apertus"
keys_to_ignore_at_inference = ["past_key_values"]
default_theta = 12000000.0
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise_rep", # we need to replicate here due to the added norm on q and k
"layers.*.self_attn.k_proj": "colwise_rep", # we need to replicate here due to the added norm on q and k
"layers.*.self_attn.v_proj": "colwise_rep", # we need to replicate here due to the added norm on q and k
"layers.*.self_attn.o_proj": "rowwise_rep", # we need to replicate here due to the added norm on q and k
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 131072,
hidden_size: Optional[int] = 4096,
intermediate_size: Optional[int] = 14336,
num_hidden_layers: Optional[int] = 32,
num_attention_heads: Optional[int] = 32,
num_key_value_heads: Optional[int] = None,
hidden_act: Optional[str] = "xielu",
max_position_embeddings: Optional[int] = 65536,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[float] = 1e-5,
use_cache: Optional[bool] = True,
pad_token_id: Optional[int] = 3,
bos_token_id: Optional[int] = 1,
eos_token_id: Optional[int] = 2,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters] = {
"rope_type": "llama3",
"rope_theta": 12000000.0,
"factor": 8.0,
"original_max_position_embeddings": 8192,
"low_freq_factor": 1.0,
"high_freq_factor": 4.0,
},
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.rope_parameters = rope_parameters
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
|
ApertusConfig
|
python
|
sympy__sympy
|
sympy/assumptions/assume.py
|
{
"start": 4723,
"end": 6485
}
|
class ____(type):
def __new__(cls, clsname, bases, dct):
# If handler is not defined, assign empty dispatcher.
if "handler" not in dct:
name = f"Ask{clsname.capitalize()}Handler"
handler = Dispatcher(name, doc=f"Handler for key {name}")
dct["handler"] = handler
dct["_orig_doc"] = dct.get("__doc__", "")
return super().__new__(cls, clsname, bases, dct)
@property
def __doc__(cls):
handler = cls.handler
doc = cls._orig_doc
if cls is not Predicate and handler is not None:
doc += "Handler\n"
doc += " =======\n\n"
# Append the handler's doc without breaking sphinx documentation.
docs = [f" Multiply dispatched method: {handler.name}"]
if handler.doc:
for line in handler.doc.splitlines():
if not line:
continue
docs.append(f" {line}")
other = []
for sig in handler.ordering[::-1]:
func = handler.funcs[sig]
if func.__doc__:
s = f" Inputs: <{str_signature(sig)}>"
lines = []
for line in func.__doc__.splitlines():
lines.append(f" {line}")
s += "\n".join(lines)
docs.append(s)
else:
other.append(str_signature(sig))
if other:
othersig = " Other signatures:"
for line in other:
othersig += f"\n * {line}"
docs.append(othersig)
doc += '\n\n'.join(docs)
return doc
|
PredicateMeta
|
python
|
pytorch__pytorch
|
test/dynamo/cpython/3_13/test_unittest/test_assertions.py
|
{
"start": 6472,
"end": 18605
}
|
class ____(__TestCase):
"""Test that the individual asserts honour longMessage.
This actually tests all the message behaviour for
asserts that use longMessage."""
def setUp(self):
super().setUp()
class TestableTestFalse(unittest.TestCase):
longMessage = False
failureException = self.failureException
def testTest(self):
pass
class TestableTestTrue(unittest.TestCase):
longMessage = True
failureException = self.failureException
def testTest(self):
pass
self.testableTrue = TestableTestTrue('testTest')
self.testableFalse = TestableTestFalse('testTest')
def testDefault(self):
self.assertTrue(unittest.TestCase.longMessage)
def test_formatMsg(self):
self.assertEqual(self.testableFalse._formatMessage(None, "foo"), "foo")
self.assertEqual(self.testableFalse._formatMessage("foo", "bar"), "foo")
self.assertEqual(self.testableTrue._formatMessage(None, "foo"), "foo")
self.assertEqual(self.testableTrue._formatMessage("foo", "bar"), "bar : foo")
# This blows up if _formatMessage uses string concatenation
self.testableTrue._formatMessage(object(), 'foo')
def test_formatMessage_unicode_error(self):
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing msg
self.testableTrue._formatMessage(one, '\uFFFD')
def assertMessages(self, methodName, args, errors):
"""
Check that methodName(*args) raises the correct error messages.
errors should be a list of 4 regex that match the error when:
1) longMessage = False and no msg passed;
2) longMessage = False and msg passed;
3) longMessage = True and no msg passed;
4) longMessage = True and msg passed;
"""
def getMethod(i):
useTestableFalse = i < 2
if useTestableFalse:
test = self.testableFalse
else:
test = self.testableTrue
return getattr(test, methodName)
for i, expected_regex in enumerate(errors):
testMethod = getMethod(i)
kwargs = {}
withMsg = i % 2
if withMsg:
kwargs = {"msg": "oops"}
with self.assertRaisesRegex(self.failureException,
expected_regex=expected_regex):
testMethod(*args, **kwargs)
def testAssertTrue(self):
self.assertMessages('assertTrue', (False,),
["^False is not true$", "^oops$", "^False is not true$",
"^False is not true : oops$"])
def testAssertFalse(self):
self.assertMessages('assertFalse', (True,),
["^True is not false$", "^oops$", "^True is not false$",
"^True is not false : oops$"])
def testNotEqual(self):
self.assertMessages('assertNotEqual', (1, 1),
["^1 == 1$", "^oops$", "^1 == 1$",
"^1 == 1 : oops$"])
def testAlmostEqual(self):
self.assertMessages(
'assertAlmostEqual', (1, 2),
[r"^1 != 2 within 7 places \(1 difference\)$", "^oops$",
r"^1 != 2 within 7 places \(1 difference\)$",
r"^1 != 2 within 7 places \(1 difference\) : oops$"])
def testNotAlmostEqual(self):
self.assertMessages('assertNotAlmostEqual', (1, 1),
["^1 == 1 within 7 places$", "^oops$",
"^1 == 1 within 7 places$", "^1 == 1 within 7 places : oops$"])
def test_baseAssertEqual(self):
self.assertMessages('_baseAssertEqual', (1, 2),
["^1 != 2$", "^oops$", "^1 != 2$", "^1 != 2 : oops$"])
def testAssertSequenceEqual(self):
# Error messages are multiline so not testing on full message
# assertTupleEqual and assertListEqual delegate to this method
self.assertMessages('assertSequenceEqual', ([], [None]),
[r"\+ \[None\]$", "^oops$", r"\+ \[None\]$",
r"\+ \[None\] : oops$"])
def testAssertSetEqual(self):
self.assertMessages('assertSetEqual', (set(), set([None])),
["None$", "^oops$", "None$",
"None : oops$"])
def testAssertIn(self):
self.assertMessages('assertIn', (None, []),
[r'^None not found in \[\]$', "^oops$",
r'^None not found in \[\]$',
r'^None not found in \[\] : oops$'])
def testAssertNotIn(self):
self.assertMessages('assertNotIn', (None, [None]),
[r'^None unexpectedly found in \[None\]$', "^oops$",
r'^None unexpectedly found in \[None\]$',
r'^None unexpectedly found in \[None\] : oops$'])
def testAssertDictEqual(self):
self.assertMessages('assertDictEqual', ({}, {'key': 'value'}),
[r"\+ \{'key': 'value'\}$", "^oops$",
r"\+ \{'key': 'value'\}$",
r"\+ \{'key': 'value'\} : oops$"])
def testAssertMultiLineEqual(self):
self.assertMessages('assertMultiLineEqual', ("", "foo"),
[r"\+ foo\n$", "^oops$",
r"\+ foo\n$",
r"\+ foo\n : oops$"])
def testAssertLess(self):
self.assertMessages('assertLess', (2, 1),
["^2 not less than 1$", "^oops$",
"^2 not less than 1$", "^2 not less than 1 : oops$"])
def testAssertLessEqual(self):
self.assertMessages('assertLessEqual', (2, 1),
["^2 not less than or equal to 1$", "^oops$",
"^2 not less than or equal to 1$",
"^2 not less than or equal to 1 : oops$"])
def testAssertGreater(self):
self.assertMessages('assertGreater', (1, 2),
["^1 not greater than 2$", "^oops$",
"^1 not greater than 2$",
"^1 not greater than 2 : oops$"])
def testAssertGreaterEqual(self):
self.assertMessages('assertGreaterEqual', (1, 2),
["^1 not greater than or equal to 2$", "^oops$",
"^1 not greater than or equal to 2$",
"^1 not greater than or equal to 2 : oops$"])
def testAssertIsNone(self):
self.assertMessages('assertIsNone', ('not None',),
["^'not None' is not None$", "^oops$",
"^'not None' is not None$",
"^'not None' is not None : oops$"])
def testAssertIsNotNone(self):
self.assertMessages('assertIsNotNone', (None,),
["^unexpectedly None$", "^oops$",
"^unexpectedly None$",
"^unexpectedly None : oops$"])
def testAssertIs(self):
self.assertMessages('assertIs', (None, 'foo'),
["^None is not 'foo'$", "^oops$",
"^None is not 'foo'$",
"^None is not 'foo' : oops$"])
def testAssertIsNot(self):
self.assertMessages('assertIsNot', (None, None),
["^unexpectedly identical: None$", "^oops$",
"^unexpectedly identical: None$",
"^unexpectedly identical: None : oops$"])
def testAssertRegex(self):
self.assertMessages('assertRegex', ('foo', 'bar'),
["^Regex didn't match:",
"^oops$",
"^Regex didn't match:",
"^Regex didn't match: (.*) : oops$"])
def testAssertNotRegex(self):
self.assertMessages('assertNotRegex', ('foo', 'foo'),
["^Regex matched:",
"^oops$",
"^Regex matched:",
"^Regex matched: (.*) : oops$"])
def assertMessagesCM(self, methodName, args, func, errors):
"""
Check that the correct error messages are raised while executing:
with method(*args):
func()
*errors* should be a list of 4 regex that match the error when:
1) longMessage = False and no msg passed;
2) longMessage = False and msg passed;
3) longMessage = True and no msg passed;
4) longMessage = True and msg passed;
"""
p = product((self.testableFalse, self.testableTrue),
({}, {"msg": "oops"}))
for (cls, kwargs), err in zip(p, errors):
method = getattr(cls, methodName)
with self.assertRaisesRegex(cls.failureException, err):
with method(*args, **kwargs) as cm:
func()
def testAssertRaises(self):
self.assertMessagesCM('assertRaises', (TypeError,), lambda: None,
['^TypeError not raised$', '^oops$',
'^TypeError not raised$',
'^TypeError not raised : oops$'])
def testAssertRaisesRegex(self):
# test error not raised
self.assertMessagesCM('assertRaisesRegex', (TypeError, 'unused regex'),
lambda: None,
['^TypeError not raised$', '^oops$',
'^TypeError not raised$',
'^TypeError not raised : oops$'])
# test error raised but with wrong message
def raise_wrong_message():
raise TypeError('foo')
self.assertMessagesCM('assertRaisesRegex', (TypeError, 'regex'),
raise_wrong_message,
['^"regex" does not match "foo"$', '^oops$',
'^"regex" does not match "foo"$',
'^"regex" does not match "foo" : oops$'])
def testAssertWarns(self):
self.assertMessagesCM('assertWarns', (UserWarning,), lambda: None,
['^UserWarning not triggered$', '^oops$',
'^UserWarning not triggered$',
'^UserWarning not triggered : oops$'])
def test_assertNotWarns(self):
def warn_future():
warnings.warn('xyz', FutureWarning, stacklevel=2)
self.assertMessagesCM('_assertNotWarns', (FutureWarning,),
warn_future,
['^FutureWarning triggered$',
'^oops$',
'^FutureWarning triggered$',
'^FutureWarning triggered : oops$'])
def testAssertWarnsRegex(self):
# test error not raised
self.assertMessagesCM('assertWarnsRegex', (UserWarning, 'unused regex'),
lambda: None,
['^UserWarning not triggered$', '^oops$',
'^UserWarning not triggered$',
'^UserWarning not triggered : oops$'])
# test warning raised but with wrong message
def raise_wrong_message():
warnings.warn('foo')
self.assertMessagesCM('assertWarnsRegex', (UserWarning, 'regex'),
raise_wrong_message,
['^"regex" does not match "foo"$', '^oops$',
'^"regex" does not match "foo"$',
'^"regex" does not match "foo" : oops$'])
if __name__ == "__main__":
run_tests()
|
TestLongMessage
|
python
|
getsentry__sentry
|
src/sentry/snuba/metrics/extraction.py
|
{
"start": 41899,
"end": 42013
}
|
class ____:
function: str
arguments: Sequence[str]
alias: str
@dataclass(frozen=True)
|
FieldParsingResult
|
python
|
django__django
|
tests/auth_tests/test_auth_backends.py
|
{
"start": 1478,
"end": 1785
}
|
class ____(BaseBackend):
def get_user_permissions(self, user_obj, obj=None):
return ["user_perm"]
def get_group_permissions(self, user_obj, obj=None):
return ["group_perm"]
@override_settings(
AUTHENTICATION_BACKENDS=["auth_tests.test_auth_backends.SimpleBackend"]
)
|
SimpleBackend
|
python
|
pytorch__pytorch
|
torch/testing/_internal/common_fsdp.py
|
{
"start": 24594,
"end": 25422
}
|
class ____(ModuleWithDelay):
@staticmethod
def init( # type: ignore[override]
group: dist.ProcessGroup,
fsdp_init_mode: FSDPInitMode,
device_init_mode: DEVICEInitMode = DEVICEInitMode.DEVICE_AFTER,
fsdp_kwargs: Optional[dict[str, Any]] = None,
deterministic: bool = False,
delay_after_loss_ms: int = 0,
delay_before_reduction_ms: int = 0,
):
return ModuleWithDelay.init(
NestedWrappedModule,
group=group,
fsdp_init_mode=fsdp_init_mode,
device_init_mode=device_init_mode,
fsdp_kwargs=fsdp_kwargs,
deterministic=deterministic,
delay_after_loss_ms=delay_after_loss_ms,
delay_before_reduction_ms=delay_before_reduction_ms,
)
|
NestedWrappedModuleWithDelay
|
python
|
encode__django-rest-framework
|
rest_framework/renderers.py
|
{
"start": 14669,
"end": 29493
}
|
class ____(BaseRenderer):
"""
HTML renderer used to self-document the API.
"""
media_type = 'text/html'
format = 'api'
template = 'rest_framework/api.html'
filter_template = 'rest_framework/filters/base.html'
code_style = 'emacs'
charset = 'utf-8'
form_renderer_class = HTMLFormRenderer
def get_default_renderer(self, view):
"""
Return an instance of the first valid renderer.
(Don't use another documenting renderer.)
"""
renderers = [renderer for renderer in view.renderer_classes
if not issubclass(renderer, BrowsableAPIRenderer)]
non_template_renderers = [renderer for renderer in renderers
if not hasattr(renderer, 'get_template_names')]
if not renderers:
return None
elif non_template_renderers:
return non_template_renderers[0]()
return renderers[0]()
def get_content(self, renderer, data,
accepted_media_type, renderer_context):
"""
Get the content as if it had been rendered by the default
non-documenting renderer.
"""
if not renderer:
return '[No renderers were found]'
renderer_context['indent'] = 4
content = renderer.render(data, accepted_media_type, renderer_context)
render_style = getattr(renderer, 'render_style', 'text')
assert render_style in ['text', 'binary'], 'Expected .render_style ' \
'"text" or "binary", but got "%s"' % render_style
if render_style == 'binary':
return '[%d bytes of binary content]' % len(content)
return content.decode('utf-8') if isinstance(content, bytes) else content
def show_form_for_method(self, view, method, request, obj):
"""
Returns True if a form should be shown for this method.
"""
if method not in view.allowed_methods:
return # Not a valid method
try:
view.check_permissions(request)
if obj is not None:
view.check_object_permissions(request, obj)
except exceptions.APIException:
return False # Doesn't have permissions
return True
def _get_serializer(self, serializer_class, view_instance, request, *args, **kwargs):
kwargs['context'] = {
'request': request,
'format': self.format,
'view': view_instance
}
return serializer_class(*args, **kwargs)
def get_rendered_html_form(self, data, view, method, request):
"""
Return a string representing a rendered HTML form, possibly bound to
either the input or output data.
In the absence of the View having an associated form then return None.
"""
# See issue #2089 for refactoring this.
serializer = getattr(data, 'serializer', None)
if serializer and not getattr(serializer, 'many', False):
instance = getattr(serializer, 'instance', None)
if isinstance(instance, Page):
instance = None
else:
instance = None
# If this is valid serializer data, and the form is for the same
# HTTP method as was used in the request then use the existing
# serializer instance, rather than dynamically creating a new one.
if request.method == method and serializer is not None:
try:
kwargs = {'data': request.data}
except ParseError:
kwargs = {}
existing_serializer = serializer
else:
kwargs = {}
existing_serializer = None
with override_method(view, request, method) as request:
if not self.show_form_for_method(view, method, request, instance):
return
if method in ('DELETE', 'OPTIONS'):
return True # Don't actually need to return a form
has_serializer = getattr(view, 'get_serializer', None)
has_serializer_class = getattr(view, 'serializer_class', None)
if (
(not has_serializer and not has_serializer_class) or
not any(is_form_media_type(parser.media_type) for parser in view.parser_classes)
):
return
if existing_serializer is not None:
with contextlib.suppress(TypeError):
return self.render_form_for_serializer(existing_serializer)
if has_serializer:
if method in ('PUT', 'PATCH'):
serializer = view.get_serializer(instance=instance, **kwargs)
else:
serializer = view.get_serializer(**kwargs)
else:
# at this point we must have a serializer_class
if method in ('PUT', 'PATCH'):
serializer = self._get_serializer(view.serializer_class, view,
request, instance=instance, **kwargs)
else:
serializer = self._get_serializer(view.serializer_class, view,
request, **kwargs)
return self.render_form_for_serializer(serializer)
def render_form_for_serializer(self, serializer):
if isinstance(serializer, serializers.ListSerializer):
return None
if hasattr(serializer, 'initial_data'):
serializer.is_valid()
form_renderer = self.form_renderer_class()
return form_renderer.render(
serializer.data,
self.accepted_media_type,
{'style': {'template_pack': 'rest_framework/horizontal'}}
)
def get_raw_data_form(self, data, view, method, request):
"""
Returns a form that allows for arbitrary content types to be tunneled
via standard HTML forms.
(Which are typically application/x-www-form-urlencoded)
"""
# See issue #2089 for refactoring this.
serializer = getattr(data, 'serializer', None)
if serializer and not getattr(serializer, 'many', False):
instance = getattr(serializer, 'instance', None)
if isinstance(instance, Page):
instance = None
else:
instance = None
with override_method(view, request, method) as request:
# Check permissions
if not self.show_form_for_method(view, method, request, instance):
return
# If possible, serialize the initial content for the generic form
default_parser = view.parser_classes[0]
renderer_class = getattr(default_parser, 'renderer_class', None)
if hasattr(view, 'get_serializer') and renderer_class:
# View has a serializer defined and parser class has a
# corresponding renderer that can be used to render the data.
if method in ('PUT', 'PATCH'):
serializer = view.get_serializer(instance=instance)
else:
serializer = view.get_serializer()
# Render the raw data content
renderer = renderer_class()
accepted = self.accepted_media_type
context = self.renderer_context.copy()
context['indent'] = 4
# strip HiddenField from output
is_list_serializer = isinstance(serializer, serializers.ListSerializer)
serializer = serializer.child if is_list_serializer else serializer
data = serializer.data.copy()
for name, field in serializer.fields.items():
if isinstance(field, serializers.HiddenField):
data.pop(name, None)
data = [data] if is_list_serializer else data
content = renderer.render(data, accepted, context)
# Renders returns bytes, but CharField expects a str.
content = content.decode()
else:
content = None
# Generate a generic form that includes a content type field,
# and a content field.
media_types = [parser.media_type for parser in view.parser_classes]
choices = [(media_type, media_type) for media_type in media_types]
initial = media_types[0]
class GenericContentForm(forms.Form):
_content_type = forms.ChoiceField(
label='Media type',
choices=choices,
initial=initial,
widget=forms.Select(attrs={'data-override': 'content-type'})
)
_content = forms.CharField(
label='Content',
widget=forms.Textarea(attrs={'data-override': 'content'}),
initial=content,
required=False
)
return GenericContentForm()
def get_name(self, view):
return view.get_view_name()
def get_description(self, view, status_code):
if status_code in (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN):
return ''
return view.get_view_description(html=True)
def get_breadcrumbs(self, request):
return get_breadcrumbs(request.path, request)
def get_extra_actions(self, view, status_code):
if (status_code in (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN)):
return None
elif not hasattr(view, 'get_extra_action_url_map'):
return None
return view.get_extra_action_url_map()
def get_filter_form(self, data, view, request):
if not hasattr(view, 'get_queryset') or not hasattr(view, 'filter_backends'):
return
# Infer if this is a list view or not.
paginator = getattr(view, 'paginator', None)
if isinstance(data, list):
pass
elif paginator is not None and data is not None:
try:
paginator.get_results(data)
except (TypeError, KeyError):
return
elif not isinstance(data, list):
return
queryset = view.get_queryset()
elements = []
for backend in view.filter_backends:
if hasattr(backend, 'to_html'):
html = backend().to_html(request, queryset, view)
if html:
elements.append(html)
if not elements:
return
template = loader.get_template(self.filter_template)
context = {'elements': elements}
return template.render(context)
def get_context(self, data, accepted_media_type, renderer_context):
"""
Returns the context used to render.
"""
view = renderer_context['view']
request = renderer_context['request']
response = renderer_context['response']
renderer = self.get_default_renderer(view)
raw_data_post_form = self.get_raw_data_form(data, view, 'POST', request)
raw_data_put_form = self.get_raw_data_form(data, view, 'PUT', request)
raw_data_patch_form = self.get_raw_data_form(data, view, 'PATCH', request)
raw_data_put_or_patch_form = raw_data_put_form or raw_data_patch_form
response_headers = dict(sorted(response.items()))
renderer_content_type = ''
if renderer:
renderer_content_type = '%s' % renderer.media_type
if renderer.charset:
renderer_content_type += ' ;%s' % renderer.charset
response_headers['Content-Type'] = renderer_content_type
if getattr(view, 'paginator', None) and view.paginator.display_page_controls:
paginator = view.paginator
else:
paginator = None
csrf_cookie_name = settings.CSRF_COOKIE_NAME
csrf_header_name = settings.CSRF_HEADER_NAME
if csrf_header_name.startswith('HTTP_'):
csrf_header_name = csrf_header_name[5:]
csrf_header_name = csrf_header_name.replace('_', '-')
return {
'content': self.get_content(renderer, data, accepted_media_type, renderer_context),
'code_style': pygments_css(self.code_style),
'view': view,
'request': request,
'response': response,
'user': request.user,
'description': self.get_description(view, response.status_code),
'name': self.get_name(view),
'version': VERSION,
'paginator': paginator,
'breadcrumblist': self.get_breadcrumbs(request),
'allowed_methods': view.allowed_methods,
'available_formats': [renderer_cls.format for renderer_cls in view.renderer_classes],
'response_headers': response_headers,
'put_form': self.get_rendered_html_form(data, view, 'PUT', request),
'post_form': self.get_rendered_html_form(data, view, 'POST', request),
'delete_form': self.get_rendered_html_form(data, view, 'DELETE', request),
'options_form': self.get_rendered_html_form(data, view, 'OPTIONS', request),
'extra_actions': self.get_extra_actions(view, response.status_code),
'filter_form': self.get_filter_form(data, view, request),
'raw_data_put_form': raw_data_put_form,
'raw_data_post_form': raw_data_post_form,
'raw_data_patch_form': raw_data_patch_form,
'raw_data_put_or_patch_form': raw_data_put_or_patch_form,
'display_edit_forms': bool(response.status_code != 403),
'api_settings': api_settings,
'csrf_cookie_name': csrf_cookie_name,
'csrf_header_name': csrf_header_name
}
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render the HTML for the browsable API representation.
"""
self.accepted_media_type = accepted_media_type or ''
self.renderer_context = renderer_context or {}
template = loader.get_template(self.template)
context = self.get_context(data, accepted_media_type, renderer_context)
ret = template.render(context, request=renderer_context['request'])
# Munge DELETE Response code to allow us to return content
# (Do this *after* we've rendered the template so that we include
# the normal deletion response code in the output)
response = renderer_context['response']
if response.status_code == status.HTTP_204_NO_CONTENT:
response.status_code = status.HTTP_200_OK
return ret
|
BrowsableAPIRenderer
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/protocol3.py
|
{
"start": 2513,
"end": 2619
}
|
class ____:
@property
def bar(self: C6) -> ContextManager[C6]: ...
i: MockClass6 = Class6()
|
Class6
|
python
|
cython__cython
|
Cython/Compiler/PyrexTypes.py
|
{
"start": 181880,
"end": 182210
}
|
class ____(PyrexType):
# Used as a placeholder until the type can be determined.
is_unspecified = 1
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
return "<unspecified>"
def same_as_resolved_type(self, other_type):
return False
|
UnspecifiedType
|
python
|
django__django
|
tests/test_runner/test_debug_sql.py
|
{
"start": 6575,
"end": 11786
}
|
class ____(unittest.TestCase):
class PassingTest(TestCase):
def runTest(self):
Person.objects.filter(first_name="pass").count()
class FailingTest(TestCase):
def runTest(self):
Person.objects.filter(first_name="fail").count()
self.fail()
class ErrorTest(TestCase):
def runTest(self):
Person.objects.filter(first_name="error").count()
raise Exception
class ErrorSetUpTestDataTest(TestCase):
@classmethod
def setUpTestData(cls):
raise Exception
def runTest(self):
pass
class PassingSubTest(TestCase):
def runTest(self):
with self.subTest():
Person.objects.filter(first_name="subtest-pass").count()
class FailingSubTest(TestCase):
def runTest(self):
with self.subTest():
Person.objects.filter(first_name="subtest-fail").count()
self.fail()
class ErrorSubTest(TestCase):
def runTest(self):
with self.subTest():
Person.objects.filter(first_name="subtest-error").count()
raise Exception
def _test_output(self, verbosity):
runner = DiscoverRunner(debug_sql=True, verbosity=0)
suite = runner.test_suite()
suite.addTest(self.FailingTest())
suite.addTest(self.ErrorTest())
suite.addTest(self.PassingTest())
suite.addTest(self.PassingSubTest())
suite.addTest(self.FailingSubTest())
suite.addTest(self.ErrorSubTest())
old_config = runner.setup_databases()
stream = StringIO()
resultclass = runner.get_resultclass()
runner.test_runner(
verbosity=verbosity,
stream=stream,
resultclass=resultclass,
).run(suite)
runner.teardown_databases(old_config)
return stream.getvalue()
def test_output_normal(self):
full_output = self._test_output(1)
for output in self.expected_outputs:
self.assertIn(output, full_output)
for output in self.verbose_expected_outputs:
self.assertNotIn(output, full_output)
def test_output_verbose(self):
full_output = self._test_output(2)
for output in self.expected_outputs:
self.assertIn(output, full_output)
for output in self.verbose_expected_outputs:
self.assertIn(output, full_output)
expected_outputs = [
(
"""SELECT COUNT(*) AS "__count"\n"""
"""FROM "test_runner_person"\n"""
"""WHERE "test_runner_person"."first_name" = 'error'; """
"""args=('error',); alias=default"""
),
(
"""SELECT COUNT(*) AS "__count"\n"""
"""FROM "test_runner_person"\n"""
"""WHERE "test_runner_person"."first_name" = 'fail'; """
"""args=('fail',); alias=default"""
),
(
"""SELECT COUNT(*) AS "__count"\n"""
"""FROM "test_runner_person"\n"""
"""WHERE "test_runner_person"."first_name" = 'subtest-error'; """
"""args=('subtest-error',); alias=default"""
),
(
"""SELECT COUNT(*) AS "__count"\n"""
"""FROM "test_runner_person"\n"""
"""WHERE "test_runner_person"."first_name" = 'subtest-fail'; """
"""args=('subtest-fail',); alias=default"""
),
]
test_class_path = "test_runner.test_debug_sql.TestDebugSQL"
verbose_expected_outputs = [
f"runTest ({test_class_path}.FailingTest.runTest) ... FAIL",
f"runTest ({test_class_path}.ErrorTest.runTest) ... ERROR",
f"runTest ({test_class_path}.PassingTest.runTest) ... ok",
# If there are errors/failures in subtests but not in test itself,
# the status is not written. That behavior comes from Python.
f"runTest ({test_class_path}.FailingSubTest.runTest) ...",
f"runTest ({test_class_path}.ErrorSubTest.runTest) ...",
(
"""SELECT COUNT(*) AS "__count"\n"""
"""FROM "test_runner_person"\nWHERE """
""""test_runner_person"."first_name" = 'pass'; """
"""args=('pass',); alias=default"""
),
(
"""SELECT COUNT(*) AS "__count"\n"""
"""FROM "test_runner_person"\nWHERE """
""""test_runner_person"."first_name" = 'subtest-pass'; """
"""args=('subtest-pass',); alias=default"""
),
]
def test_setupclass_exception(self):
runner = DiscoverRunner(debug_sql=True, verbosity=0)
suite = runner.test_suite()
suite.addTest(self.ErrorSetUpTestDataTest())
old_config = runner.setup_databases()
stream = StringIO()
runner.test_runner(
verbosity=0,
stream=stream,
resultclass=runner.get_resultclass(),
).run(suite)
runner.teardown_databases(old_config)
output = stream.getvalue()
self.assertIn(
"ERROR: setUpClass "
"(test_runner.test_debug_sql.TestDebugSQL.ErrorSetUpTestDataTest)",
output,
)
|
TestDebugSQL
|
python
|
spack__spack
|
lib/spack/spack/modules/lmod.py
|
{
"start": 18938,
"end": 19130
}
|
class ____(spack.error.SpackError, TypeError):
"""Error raised if non-virtual specs are used as hierarchy tokens in
the lmod section of ``modules.yaml``.
"""
|
NonVirtualInHierarchyError
|
python
|
huggingface__transformers
|
src/transformers/pipelines/automatic_speech_recognition.py
|
{
"start": 4077,
"end": 33333
}
|
class ____(ChunkPipeline):
"""
Pipeline that aims at extracting spoken text contained within some audio.
The input can be either a raw waveform or a audio file. In case of the audio file, ffmpeg should be installed for
to support multiple audio formats
Unless the model you're using explicitly sets these generation parameters in its configuration files
(`generation_config.json`), the following default values will be used:
- max_new_tokens: 256
- num_beams: 5
Example:
```python
>>> from transformers import pipeline
>>> transcriber = pipeline(model="openai/whisper-base")
>>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac")
{'text': ' He hoped there would be stew for dinner, turnips and carrots and bruised potatoes and fat mutton pieces to be ladled out in thick, peppered flour-fatten sauce.'}
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
Arguments:
model ([`PreTrainedModel`]):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
[`PreTrainedModel`].
feature_extractor ([`SequenceFeatureExtractor`]):
The feature extractor that will be used by the pipeline to encode waveform for the model.
tokenizer ([`PreTrainedTokenizer`]):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
[`PreTrainedTokenizer`].
decoder (`pyctcdecode.BeamSearchDecoderCTC`, *optional*):
[PyCTCDecode's
BeamSearchDecoderCTC](https://github.com/kensho-technologies/pyctcdecode/blob/2fd33dc37c4111417e08d89ccd23d28e9b308d19/pyctcdecode/decoder.py#L180)
can be passed for language model boosted decoding. See [`Wav2Vec2ProcessorWithLM`] for more information.
chunk_length_s (`float`, *optional*, defaults to 0):
The input length for in each chunk. If `chunk_length_s = 0` then chunking is disabled (default).
<Tip>
For more information on how to effectively use `chunk_length_s`, please have a look at the [ASR chunking
blog post](https://huggingface.co/blog/asr-chunking).
</Tip>
stride_length_s (`float`, *optional*, defaults to `chunk_length_s / 6`):
The length of stride on the left and right of each chunk. Used only with `chunk_length_s > 0`. This enables
the model to *see* more context and infer letters better than without this context but the pipeline
discards the stride bits at the end to make the final reconstitution as perfect as possible.
<Tip>
For more information on how to effectively use `stride_length_s`, please have a look at the [ASR chunking
blog post](https://huggingface.co/blog/asr-chunking).
</Tip>
device (Union[`int`, `torch.device`], *optional*):
Device ordinal for CPU/GPU supports. Setting this to `None` will leverage CPU, a positive will run the
model on the associated CUDA device id.
"""
_pipeline_calls_generate = True
_load_processor = False
_load_image_processor = False
_load_feature_extractor = True
_load_tokenizer = True
# Make sure the docstring is updated when the default generation config is changed
_default_generation_config = GenerationConfig(
max_new_tokens=256,
num_beams=5, # follows openai's whisper implementation
)
def __init__(
self,
model: "PreTrainedModel",
feature_extractor: Union["SequenceFeatureExtractor", str] | None = None,
tokenizer: PreTrainedTokenizer | None = None,
decoder: Union["BeamSearchDecoderCTC", str] | None = None,
device: Union[int, "torch.device"] | None = None,
**kwargs,
):
# set the model type so we can check we have the right pre- and post-processing parameters
if model.config.model_type == "whisper":
self.type = "seq2seq_whisper"
elif model.__class__.__name__ in MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES.values():
self.type = "seq2seq"
elif (
feature_extractor._processor_class
and feature_extractor._processor_class.endswith("WithLM")
and decoder is not None
):
self.decoder = decoder
self.type = "ctc_with_lm"
else:
self.type = "ctc"
super().__init__(model, tokenizer, feature_extractor, device=device, **kwargs)
def __call__(self, inputs: np.ndarray | bytes | str | dict, **kwargs: Any) -> list[dict[str, Any]]:
"""
Transcribe the audio sequence(s) given as inputs to text. See the [`AutomaticSpeechRecognitionPipeline`]
documentation for more information.
Args:
inputs (`np.ndarray` or `bytes` or `str` or `dict`):
The inputs is either :
- `str` that is either the filename of a local audio file, or a public URL address to download the
audio file. The file will be read at the correct sampling rate to get the waveform using
*ffmpeg*. This requires *ffmpeg* to be installed on the system.
- `bytes` it is supposed to be the content of an audio file and is interpreted by *ffmpeg* in the
same way.
- (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`)
Raw audio at the correct sampling rate (no further check will be done)
- `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this
pipeline do the resampling. The dict must be in the format `{"sampling_rate": int, "raw":
np.array}` with optionally a `"stride": (left: int, right: int)` than can ask the pipeline to
treat the first `left` samples and last `right` samples to be ignored in decoding (but used at
inference to provide more context to the model). Only use `stride` with CTC models.
return_timestamps (*optional*, `str` or `bool`):
Only available for pure CTC models (Wav2Vec2, HuBERT, etc) and the Whisper model. Not available for
other sequence-to-sequence models.
For CTC models, timestamps can take one of two formats:
- `"char"`: the pipeline will return timestamps along the text for every character in the text. For
instance, if you get `[{"text": "h", "timestamp": (0.5, 0.6)}, {"text": "i", "timestamp": (0.7,
0.9)}]`, then it means the model predicts that the letter "h" was spoken after `0.5` and before
`0.6` seconds.
- `"word"`: the pipeline will return timestamps along the text for every word in the text. For
instance, if you get `[{"text": "hi ", "timestamp": (0.5, 0.9)}, {"text": "there", "timestamp":
(1.0, 1.5)}]`, then it means the model predicts that the word "hi" was spoken after `0.5` and
before `0.9` seconds.
For the Whisper model, timestamps can take one of two formats:
- `"word"`: same as above for word-level CTC timestamps. Word-level timestamps are predicted
through the *dynamic-time warping (DTW)* algorithm, an approximation to word-level timestamps
by inspecting the cross-attention weights.
- `True`: the pipeline will return timestamps along the text for *segments* of words in the text.
For instance, if you get `[{"text": " Hi there!", "timestamp": (0.5, 1.5)}]`, then it means the
model predicts that the segment "Hi there!" was spoken after `0.5` and before `1.5` seconds.
Note that a segment of text refers to a sequence of one or more words, rather than individual
words as with word-level timestamps.
generate_kwargs (`dict`, *optional*):
The dictionary of ad-hoc parametrization of `generate_config` to be used for the generation call. For a
complete overview of generate, check the [following
guide](https://huggingface.co/docs/transformers/en/main_classes/text_generation).
Return:
`Dict`: A dictionary with the following keys:
- **text** (`str`): The recognized text.
- **chunks** (*optional(, `list[Dict]`)
When using `return_timestamps`, the `chunks` will become a list containing all the various text
chunks identified by the model, *e.g.* `[{"text": "hi ", "timestamp": (0.5, 0.9)}, {"text":
"there", "timestamp": (1.0, 1.5)}]`. The original full text can roughly be recovered by doing
`"".join(chunk["text"] for chunk in output["chunks"])`.
"""
return super().__call__(inputs, **kwargs)
def _sanitize_parameters(
self,
chunk_length_s=None,
stride_length_s=None,
ignore_warning=None,
decoder_kwargs=None,
return_timestamps=None,
return_language=None,
**generate_kwargs,
):
preprocess_params = {}
forward_params = {}
postprocess_params = {}
# Preprocess params
if chunk_length_s is not None:
if self.type in ["seq2seq", "seq2seq_whisper"] and not ignore_warning:
type_warning = (
"Using `chunk_length_s` is very experimental with seq2seq models. The results will not necessarily"
" be entirely accurate and will have caveats. More information:"
" https://github.com/huggingface/transformers/pull/20104. Ignore this warning with pipeline(...,"
" ignore_warning=True)."
)
if self.type == "seq2seq_whisper":
type_warning += (
" To use Whisper for long-form transcription, use rather the model's `generate` method directly "
"as the model relies on it's own chunking mechanism (cf. Whisper original paper, section 3.8. "
"Long-form Transcription)."
)
logger.warning(type_warning)
preprocess_params["chunk_length_s"] = chunk_length_s
if stride_length_s is not None:
preprocess_params["stride_length_s"] = stride_length_s
# Forward params
# BC: accept a dictionary of generation kwargs (as opposed to **generate_kwargs)
if "generate_kwargs" in generate_kwargs:
forward_params.update(generate_kwargs.pop("generate_kwargs"))
# Default use for kwargs: they are generation-time kwargs
forward_params.update(generate_kwargs)
if getattr(self, "assistant_model", None) is not None:
forward_params["assistant_model"] = self.assistant_model
if getattr(self, "assistant_tokenizer", None) is not None:
forward_params["tokenizer"] = self.tokenizer
forward_params["assistant_tokenizer"] = self.assistant_tokenizer
# Postprocess params
if decoder_kwargs is not None:
postprocess_params["decoder_kwargs"] = decoder_kwargs
if return_language is not None:
if self.type != "seq2seq_whisper":
raise ValueError("Only Whisper can return language for now.")
postprocess_params["return_language"] = return_language
# Parameter used in more than one place
# in some models like whisper, the generation config has a `return_timestamps` key
if hasattr(self, "generation_config") and hasattr(self.generation_config, "return_timestamps"):
return_timestamps = return_timestamps or self.generation_config.return_timestamps
if return_timestamps is not None:
# Check whether we have a valid setting for return_timestamps and throw an error before we perform a forward pass
if self.type == "seq2seq" and return_timestamps:
raise ValueError("We cannot return_timestamps yet on non-CTC models apart from Whisper!")
if self.type == "ctc_with_lm" and return_timestamps != "word":
raise ValueError("CTC with LM can only predict word level timestamps, set `return_timestamps='word'`")
if self.type == "ctc" and return_timestamps not in ["char", "word"]:
raise ValueError(
"CTC can either predict character level timestamps, or word level timestamps. "
"Set `return_timestamps='char'` or `return_timestamps='word'` as required."
)
if self.type == "seq2seq_whisper" and return_timestamps == "char":
raise ValueError(
"Whisper cannot return `char` timestamps, only word level or segment level timestamps. "
"Use `return_timestamps='word'` or `return_timestamps=True` respectively."
)
forward_params["return_timestamps"] = return_timestamps
postprocess_params["return_timestamps"] = return_timestamps
return preprocess_params, forward_params, postprocess_params
def preprocess(self, inputs, chunk_length_s=0, stride_length_s=None):
if isinstance(inputs, str):
if inputs.startswith("http://") or inputs.startswith("https://"):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
inputs = httpx.get(inputs, follow_redirects=True).content
else:
with open(inputs, "rb") as f:
inputs = f.read()
if isinstance(inputs, bytes):
inputs = ffmpeg_read(inputs, self.feature_extractor.sampling_rate)
stride = None
extra = {}
if is_torch_available():
import torch
if isinstance(inputs, torch.Tensor):
inputs = inputs.cpu().numpy()
if is_torchcodec_available():
import torchcodec
if isinstance(inputs, torchcodec.decoders.AudioDecoder):
_audio_samples = inputs.get_all_samples()
# torchcodec always returns (num_channels, num_samples)
# while before (datasets < 4.0) we had (2, num_samples) if stereo, (num_samples,) if mono
_array = _audio_samples.data
_array = _array[0] if _array.ndim == 2 and _array.shape[0] == 1 else _array
inputs = {"array": _array, "sampling_rate": _audio_samples.sample_rate}
if isinstance(inputs, dict):
stride = inputs.pop("stride", None)
# Accepting `"array"` which is the key defined in `datasets` for
# better integration
if not ("sampling_rate" in inputs and ("raw" in inputs or "array" in inputs)):
raise ValueError(
"When passing a dictionary to AutomaticSpeechRecognitionPipeline, the dict needs to contain a "
'"raw" key containing the numpy array or torch tensor representing the audio and a "sampling_rate" key, '
"containing the sampling_rate associated with that array"
)
_inputs = inputs.pop("raw", None)
if _inputs is None:
# Remove path which will not be used from `datasets`.
inputs.pop("path", None)
_inputs = inputs.pop("array", None)
in_sampling_rate = inputs.pop("sampling_rate")
extra = inputs
inputs = _inputs
if in_sampling_rate != self.feature_extractor.sampling_rate:
if is_torchaudio_available():
from torchaudio import functional as F
else:
raise ImportError(
"torchaudio is required to resample audio samples in AutomaticSpeechRecognitionPipeline. "
"The torchaudio package can be installed through: `pip install torchaudio`."
)
inputs = F.resample(
torch.from_numpy(inputs) if isinstance(inputs, np.ndarray) else inputs,
in_sampling_rate,
self.feature_extractor.sampling_rate,
).numpy()
ratio = self.feature_extractor.sampling_rate / in_sampling_rate
else:
ratio = 1
if stride is not None:
if stride[0] + stride[1] > inputs.shape[0]:
raise ValueError("Stride is too large for input")
# Stride needs to get the chunk length here, it's going to get
# swallowed by the `feature_extractor` later, and then batching
# can add extra data in the inputs, so we need to keep track
# of the original length in the stride so we can cut properly.
stride = (inputs.shape[0], int(round(stride[0] * ratio)), int(round(stride[1] * ratio)))
if not isinstance(inputs, (np.ndarray, torch.Tensor)):
raise TypeError(f"We expect a numpy ndarray or torch tensor as input, got `{type(inputs)}`")
if inputs.ndim != 1:
logger.warning(
f"We expect a single channel audio input for AutomaticSpeechRecognitionPipeline, got {inputs.ndim}. Taking the mean of the channels for mono conversion."
)
inputs = inputs.mean(axis=0)
if chunk_length_s:
if stride_length_s is None:
stride_length_s = chunk_length_s / 6
if isinstance(stride_length_s, (int, float)):
stride_length_s = [stride_length_s, stride_length_s]
# XXX: Carefully, this variable will not exist in `seq2seq` setting.
# Currently chunking is not possible at this level for `seq2seq` so
# it's ok.
align_to = getattr(self.model.config, "inputs_to_logits_ratio", 1)
chunk_len = int(round(chunk_length_s * self.feature_extractor.sampling_rate / align_to) * align_to)
stride_left = int(round(stride_length_s[0] * self.feature_extractor.sampling_rate / align_to) * align_to)
stride_right = int(round(stride_length_s[1] * self.feature_extractor.sampling_rate / align_to) * align_to)
if chunk_len < stride_left + stride_right:
raise ValueError("Chunk length must be superior to stride length")
for item in chunk_iter(inputs, self.feature_extractor, chunk_len, stride_left, stride_right, self.dtype):
yield {**item, **extra}
else:
if self.type == "seq2seq_whisper" and inputs.shape[0] > self.feature_extractor.n_samples:
processed = self.feature_extractor(
inputs,
sampling_rate=self.feature_extractor.sampling_rate,
truncation=False,
padding="longest",
return_tensors="pt",
return_attention_mask=True,
)
else:
if self.type == "seq2seq_whisper" and stride is None:
processed = self.feature_extractor(
inputs,
sampling_rate=self.feature_extractor.sampling_rate,
return_tensors="pt",
return_token_timestamps=True,
return_attention_mask=True,
)
extra["num_frames"] = processed.pop("num_frames")
else:
processed = self.feature_extractor(
inputs,
sampling_rate=self.feature_extractor.sampling_rate,
return_tensors="pt",
return_attention_mask=True,
)
if self.dtype is not None:
processed = processed.to(dtype=self.dtype)
if stride is not None:
if self.type == "seq2seq":
raise ValueError("Stride is only usable with CTC models, try removing it !")
processed["stride"] = stride
yield {"is_last": True, **processed, **extra}
def _forward(self, model_inputs, return_timestamps=False, **generate_kwargs):
attention_mask = model_inputs.pop("attention_mask", None)
stride = model_inputs.pop("stride", None)
num_frames = model_inputs.pop("num_frames", None)
is_last = model_inputs.pop("is_last")
if stride is not None and num_frames is not None:
raise ValueError("num_frames must be used only when stride is None")
if self.type in {"seq2seq", "seq2seq_whisper"}:
# Consume values so we can let extra information flow freely through
# the pipeline (important for `partial` in microphone)
if "input_features" in model_inputs:
inputs = model_inputs.pop("input_features")
elif "input_values" in model_inputs:
inputs = model_inputs.pop("input_values")
else:
raise ValueError(
"Seq2Seq speech recognition model requires either a "
f"`input_features` or `input_values` key, but only has {model_inputs.keys()}"
)
# custom processing for Whisper timestamps and word-level timestamps
return_timestamps = return_timestamps or getattr(self.generation_config, "return_timestamps", False)
if return_timestamps and self.type == "seq2seq_whisper":
generate_kwargs["return_timestamps"] = bool(return_timestamps)
if return_timestamps == "word":
generate_kwargs["return_token_timestamps"] = True
generate_kwargs["return_segments"] = True
# User-defined `generation_config` passed to the pipeline call take precedence
if "generation_config" not in generate_kwargs:
generate_kwargs["generation_config"] = self.generation_config
main_input_name = self.model.main_input_name if hasattr(self.model, "main_input_name") else "inputs"
generate_kwargs = {
main_input_name: inputs,
"attention_mask": attention_mask,
**generate_kwargs,
}
tokens = self.model.generate(**generate_kwargs)
# whisper longform generation stores timestamps in "segments"
if return_timestamps == "word" and self.type == "seq2seq_whisper":
if "segments" not in tokens:
out = {"tokens": tokens["sequences"], "token_timestamps": tokens["token_timestamps"]}
else:
token_timestamps = [
torch.cat([segment["token_timestamps"] for segment in segment_list])
for segment_list in tokens["segments"]
]
out = {"tokens": tokens["sequences"], "token_timestamps": token_timestamps}
else:
out = {"tokens": tokens}
if self.type == "seq2seq_whisper":
if stride is not None:
out["stride"] = stride
else:
inputs = {
self.model.main_input_name: model_inputs.pop(self.model.main_input_name),
"attention_mask": attention_mask,
}
outputs = self.model(**inputs)
logits = outputs.logits
if self.type == "ctc_with_lm":
out = {"logits": logits}
else:
out = {"tokens": logits.argmax(dim=-1)}
if stride is not None:
# Send stride to `postprocess`.
# it needs to be handled there where
# the pieces are to be concatenated.
ratio = 1 / self.model.config.inputs_to_logits_ratio
if isinstance(stride, tuple):
out["stride"] = rescale_stride([stride], ratio)[0]
else:
out["stride"] = rescale_stride(stride, ratio)
# Leftover
extra = model_inputs
return {"is_last": is_last, **out, **extra}
def postprocess(
self, model_outputs, decoder_kwargs: dict | None = None, return_timestamps=None, return_language=None
):
# Optional return types
optional = {}
final_items = []
key = "logits" if self.type == "ctc_with_lm" else "tokens"
stride = None
for outputs in model_outputs:
if outputs[key].dtype in (torch.bfloat16, torch.float16):
items = outputs[key].to(torch.float32).numpy()
else:
items = outputs[key].numpy()
stride = outputs.get("stride", None)
if stride is not None and self.type in {"ctc", "ctc_with_lm"}:
total_n, left, right = stride
# Total_n might be < logits.shape[1]
# because of padding, that's why
# we need to reconstruct this information
# This won't work with left padding (which doesn't exist right now)
right_n = total_n - right
items = items[:, left:right_n]
final_items.append(items)
if stride and self.type == "seq2seq":
items = _find_longest_common_sequence(final_items, self.tokenizer)
elif self.type == "seq2seq_whisper":
time_precision = self.feature_extractor.chunk_length / self.model.config.max_source_positions
# Send the chunking back to seconds, it's easier to handle in whisper
sampling_rate = self.feature_extractor.sampling_rate
for output in model_outputs:
if "stride" in output:
chunk_len, stride_left, stride_right = output["stride"]
# Go back in seconds
chunk_len /= sampling_rate
stride_left /= sampling_rate
stride_right /= sampling_rate
output["stride"] = chunk_len, stride_left, stride_right
text, optional = self.tokenizer._decode_asr(
model_outputs,
return_timestamps=return_timestamps,
return_language=return_language,
time_precision=time_precision,
)
else:
items = np.concatenate(final_items, axis=1)
items = items.squeeze(0)
if self.type == "ctc_with_lm":
if decoder_kwargs is None:
decoder_kwargs = {}
beams = self.decoder.decode_beams(items, **decoder_kwargs)
text = beams[0][0]
if return_timestamps:
# Simply cast from pyctcdecode format to wav2vec2 format to leverage
# pre-existing code later
chunk_offset = beams[0][2]
offsets = []
for word, (start_offset, end_offset) in chunk_offset:
offsets.append({"word": word, "start_offset": start_offset, "end_offset": end_offset})
elif self.type != "seq2seq_whisper":
skip_special_tokens = self.type != "ctc"
text = self.tokenizer.decode(items, skip_special_tokens=skip_special_tokens)
if return_timestamps:
offsets = self.tokenizer.decode(
items, skip_special_tokens=skip_special_tokens, output_char_offsets=True
)["char_offsets"]
if return_timestamps == "word":
offsets = self.tokenizer._get_word_offsets(offsets, self.tokenizer.replace_word_delimiter_char)
if return_timestamps and self.type not in {"seq2seq", "seq2seq_whisper"}:
chunks = []
for item in offsets:
start = item["start_offset"] * self.model.config.inputs_to_logits_ratio
start /= self.feature_extractor.sampling_rate
stop = item["end_offset"] * self.model.config.inputs_to_logits_ratio
stop /= self.feature_extractor.sampling_rate
chunks.append({"text": item[return_timestamps], "timestamp": (start, stop)})
optional["chunks"] = chunks
extra = defaultdict(list)
for output in model_outputs:
output.pop("tokens", None)
output.pop("logits", None)
output.pop("is_last", None)
output.pop("stride", None)
output.pop("token_timestamps", None)
for k, v in output.items():
extra[k].append(v)
return {"text": text, **optional, **extra}
|
AutomaticSpeechRecognitionPipeline
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/super4.py
|
{
"start": 391,
"end": 572
}
|
class ____(Parent1["Child1"]):
@classmethod
def construct(cls) -> "Child1":
return super().construct()
reveal_type(Child1.construct(), expected_text="Child1")
|
Child1
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/experimental/kernel_tests/sql_dataset_test.py
|
{
"start": 28130,
"end": 29214
}
|
class ____(SqlDatasetTestBase,
checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_dataset(self, num_repeats):
data_source_name = os.path.join(test.get_temp_dir(), "tftest.sqlite")
driver_name = array_ops.placeholder_with_default(
array_ops.constant("sqlite", dtypes.string), shape=[])
query = ("SELECT first_name, last_name, motto FROM students ORDER BY "
"first_name DESC")
output_types = (dtypes.string, dtypes.string, dtypes.string)
return readers.SqlDataset(driver_name, data_source_name, query,
output_types).repeat(num_repeats)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations()))
def test(self, verify_fn):
num_repeats = 4
num_outputs = num_repeats * 2
verify_fn(self, lambda: self._build_dataset(num_repeats), num_outputs)
if __name__ == "__main__":
test.main()
|
SqlDatasetCheckpointTest
|
python
|
doocs__leetcode
|
solution/0800-0899/0885.Spiral Matrix III/Solution.py
|
{
"start": 0,
"end": 664
}
|
class ____:
def spiralMatrixIII(
self, rows: int, cols: int, rStart: int, cStart: int
) -> List[List[int]]:
ans = [[rStart, cStart]]
if rows * cols == 1:
return ans
k = 1
while True:
for dr, dc, dk in [[0, 1, k], [1, 0, k], [0, -1, k + 1], [-1, 0, k + 1]]:
for _ in range(dk):
rStart += dr
cStart += dc
if 0 <= rStart < rows and 0 <= cStart < cols:
ans.append([rStart, cStart])
if len(ans) == rows * cols:
return ans
k += 2
|
Solution
|
python
|
jina-ai__jina
|
jina/proto/docarray_v1/pb/jina_pb2_grpc.py
|
{
"start": 13024,
"end": 14012
}
|
class ____(object):
"""*
jina gRPC service to expose Endpoints from Executors.
"""
def dry_run(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_JinaGatewayDryRunRPCServicer_to_server(servicer, server):
rpc_method_handlers = {
'dry_run': grpc.unary_unary_rpc_method_handler(
servicer.dry_run,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=jina__pb2.StatusProto.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'jina.JinaGatewayDryRunRPC', rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
|
JinaGatewayDryRunRPCServicer
|
python
|
langchain-ai__langchain
|
libs/partners/mistralai/langchain_mistralai/embeddings.py
|
{
"start": 858,
"end": 1095
}
|
class ____:
"""Dummy tokenizer for when tokenizer cannot be accessed (e.g., via Huggingface)."""
@staticmethod
def encode_batch(texts: list[str]) -> list[list[str]]:
return [list(text) for text in texts]
|
DummyTokenizer
|
python
|
getsentry__sentry
|
tests/acceptance/test_proxy.py
|
{
"start": 1216,
"end": 2840
}
|
class ____(TransactionTestCase):
live_server: LiveServer
endpoint = "sentry-api-0-organization-teams"
method = "post"
organization: Organization
api_key: ApiKey
def get_response(self, *args: str, **params: Any) -> HttpResponse | StreamingHttpResponse:
url = reverse(self.endpoint, args=args)
headers = params.pop("extra_headers", {})
return getattr(self.client, self.method)(url, format="json", data=params, **headers)
def test_through_api_gateway(self) -> None:
if SiloMode.get_current_mode() == SiloMode.MONOLITH:
return
self.client = APIClient()
config = asdict(test_region)
config["address"] = self.live_server.url
with override_regions([Region(**config)]):
self.organization = Factories.create_organization(owner=self.user, region="us")
self.api_key = Factories.create_api_key(
organization=self.organization, scope_list=["org:write", "org:admin", "team:write"]
)
with SingleProcessSiloModeState.enter(SiloMode.CONTROL):
resp = self.get_response(
self.organization.slug,
name="hello world",
idp_provisioned=True,
extra_headers=dict(
HTTP_AUTHORIZATION=self.create_basic_auth_header(self.api_key.key)
),
)
assert_status_code(resp, 201)
result = json.loads(resp.getvalue())
team = Team.objects.get(id=result["id"])
assert team.idp_provisioned
|
EndToEndAPIProxyTest
|
python
|
pola-rs__polars
|
py-polars/src/polars/io/iceberg/_utils.py
|
{
"start": 18889,
"end": 19213
}
|
class ____(LoadFromBytesImpl):
def load_from_bytes(self, byte_values: list[bytes | None]) -> pl.Series:
import polars as pl
return (
pl.Series(byte_values, dtype=pl.Binary)
.bin.reinterpret(dtype=pl.Int32, endianness="little")
.cast(pl.Date)
)
|
LoadDateFromBytes
|
python
|
huggingface__transformers
|
src/transformers/models/dinov2/modeling_dinov2.py
|
{
"start": 10930,
"end": 11352
}
|
class ____(nn.Module):
def __init__(self, config: Dinov2Config):
super().__init__()
self.attention = Dinov2SelfAttention(config)
self.output = Dinov2SelfOutput(config)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
self_attn_output, _ = self.attention(hidden_states)
output = self.output(self_attn_output, hidden_states)
return output
|
Dinov2Attention
|
python
|
kamyu104__LeetCode-Solutions
|
Python/substrings-that-begin-and-end-with-the-same-letter.py
|
{
"start": 377,
"end": 579
}
|
class ____(object):
def numberOfSubstrings(self, s):
"""
:type s: str
:rtype: int
"""
return sum(v*(v+1)//2 for v in collections.Counter(s).itervalues())
|
Solution
|
python
|
python-poetry__poetry
|
src/poetry/repositories/repository.py
|
{
"start": 662,
"end": 3821
}
|
class ____(AbstractRepository):
def __init__(self, name: str, packages: Sequence[Package] | None = None) -> None:
super().__init__(name)
self._packages: list[Package] = []
for package in packages or []:
self.add_package(package)
@property
def packages(self) -> list[Package]:
return self._packages
def find_packages(self, dependency: Dependency) -> list[Package]:
packages = []
ignored_pre_release_packages = []
constraint = dependency.constraint
allow_prereleases = dependency.allows_prereleases()
for package in self._find_packages(dependency.name, constraint):
if package.yanked and not isinstance(constraint, Version):
# PEP 592: yanked files are always ignored, unless they are the only
# file that matches a version specifier that "pins" to an exact
# version
continue
if (
package.is_prerelease()
and not allow_prereleases
and not package.is_direct_origin()
):
ignored_pre_release_packages.append(package)
continue
packages.append(package)
self._log(
f"{len(packages)} packages found for {dependency.name} {constraint!s}",
level="debug",
)
if allow_prereleases is False: # in contrast to None!
return packages
return packages or ignored_pre_release_packages
def has_package(self, package: Package) -> bool:
package_id = package.unique_name
return any(
package_id == repo_package.unique_name for repo_package in self.packages
)
def add_package(self, package: Package) -> None:
self._packages.append(package)
def search(self, query: str | list[str]) -> list[Package]:
results: list[Package] = []
tokens = query if isinstance(query, list) else [query]
for package in self.packages:
if any(token in package.name for token in tokens):
results.append(package)
return results
def _find_packages(
self, name: NormalizedName, constraint: VersionConstraint
) -> list[Package]:
return [
package
for package in self._packages
if package.name == name and constraint.allows(package.version)
]
def _log(self, msg: str, level: str = "info") -> None:
logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
getattr(logger, level)(f"<c1>Source ({self.name}):</c1> {msg}")
def __len__(self) -> int:
return len(self._packages)
def find_links_for_package(self, package: Package) -> list[Link]:
return []
def package(self, name: str, version: Version) -> Package:
canonicalized_name = canonicalize_name(name)
for package in self.packages:
if canonicalized_name == package.name and package.version == version:
return package
raise PackageNotFoundError(f"Package {name} ({version}) not found.")
|
Repository
|
python
|
getsentry__sentry
|
src/sentry/models/groupemailthread.py
|
{
"start": 219,
"end": 1202
}
|
class ____(Model):
"""
Keep track of the original Message-Id that was sent
unique per email destination and Group object.This allows
the tracking of proper In-Reply-To and References headers
for email threading.
"""
__relocation_scope__ = RelocationScope.Excluded
email = models.EmailField(max_length=75)
project = FlexibleForeignKey("sentry.Project", related_name="groupemail_set")
group = FlexibleForeignKey("sentry.Group", related_name="groupemail_set")
msgid = models.CharField(max_length=100)
date = models.DateTimeField(default=timezone.now, db_index=True)
class Meta:
app_label = "sentry"
db_table = "sentry_groupemailthread"
unique_together = (("email", "group"), ("email", "msgid"))
indexes = [
models.Index(fields=["date", "project", "id"]),
models.Index(fields=["project", "date"]),
]
__repr__ = sane_repr("email", "group_id", "msgid")
|
GroupEmailThread
|
python
|
ray-project__ray
|
python/ray/serve/tests/test_standalone_2.py
|
{
"start": 9548,
"end": 10602
}
|
class ____:
def __call__(self):
return "Hello B"
serve.run(B.bind())"""
f1 = tmp_path / "file1.py"
f1.write_text(file1)
# Driver 1 (starts Serve controller)
output = subprocess.check_output(
[sys.executable, str(f1)], stderr=subprocess.STDOUT
)
assert "Connecting to existing Ray cluster" in output.decode("utf-8")
assert "Adding 1 replica to Deployment(name='A'" in output.decode("utf-8")
f2 = tmp_path / "file2.py"
f2.write_text(file2)
# Driver 2 (reconnects to the same Serve controller)
output = subprocess.check_output(
[sys.executable, str(f2)], stderr=subprocess.STDOUT
)
assert "Connecting to existing Ray cluster" in output.decode("utf-8")
assert "Adding 1 replica to Deployment(name='B'" in output.decode("utf-8")
def test_checkpoint_deleted_on_serve_shutdown(
start_and_shutdown_ray_cli_function, tmp_path
):
"""Test the application target state checkpoint is deleted when Serve is shutdown"""
file1 = """from ray import serve
@serve.deployment
|
B
|
python
|
ray-project__ray
|
python/ray/autoscaler/v2/instance_manager/cloud_providers/read_only/cloud_provider.py
|
{
"start": 524,
"end": 2680
}
|
class ____(ICloudInstanceProvider):
"""
A read only provider that use the ray node states from the GCS as the
cloud instances.
This is used for laptop mode / manual cluster setup modes, in order to
provide status reporting in the same way for users.
"""
def __init__(self, provider_config: dict):
self._provider_config = provider_config
self._gcs_address = provider_config["gcs_address"]
self._gcs_client = GcsClient(address=self._gcs_address)
def get_non_terminated(self) -> Dict[str, CloudInstance]:
cluster_resource_state = get_cluster_resource_state(self._gcs_client)
cloud_instances = {}
for gcs_node_state in cluster_resource_state.node_states:
if gcs_node_state.status == NodeStatus.DEAD:
# Skip dead nodes.
continue
# Use node's node id if instance id is not available
cloud_instance_id = (
gcs_node_state.instance_id
if gcs_node_state.instance_id
else binary_to_hex(gcs_node_state.node_id)
)
# TODO: we should add a field to the proto to indicate if the node is head
# or not.
is_head = is_head_node(gcs_node_state)
cloud_instances[cloud_instance_id] = CloudInstance(
cloud_instance_id=cloud_instance_id,
node_kind=NodeKind.HEAD if is_head else NodeKind.WORKER,
node_type=format_readonly_node_type(
binary_to_hex(gcs_node_state.node_id) # Legacy behavior.
),
is_running=True,
request_id="",
)
return cloud_instances
def terminate(self, instance_id: CloudInstanceId) -> None:
raise NotImplementedError("Cannot terminate instances in read-only mode.")
def launch(
self, shape: Dict[CloudInstanceId, int], request_id: CloudInstanceId
) -> None:
raise NotImplementedError("Cannot launch instances in read-only mode.")
def poll_errors(self) -> List[CloudInstanceProviderError]:
return []
|
ReadOnlyProvider
|
python
|
pyca__cryptography
|
tests/hazmat/primitives/test_serialization.py
|
{
"start": 52618,
"end": 55458
}
|
class ____:
def test_load_der_private_key(self, backend):
data = load_vectors_from_file(
os.path.join("asymmetric", "Ed25519", "ed25519-pkcs8-enc.der"),
lambda derfile: derfile.read(),
mode="rb",
)
unencrypted = load_vectors_from_file(
os.path.join("asymmetric", "Ed25519", "ed25519-pkcs8.der"),
lambda derfile: derfile.read(),
mode="rb",
)
key = load_der_private_key(data, b"password", backend)
assert (
key.private_bytes(
Encoding.DER, PrivateFormat.PKCS8, NoEncryption()
)
== unencrypted
)
def test_load_pem_private_key(self, backend):
data = load_vectors_from_file(
os.path.join("asymmetric", "Ed25519", "ed25519-pkcs8-enc.pem"),
lambda pemfile: pemfile.read(),
mode="rb",
)
unencrypted = load_vectors_from_file(
os.path.join("asymmetric", "Ed25519", "ed25519-pkcs8.pem"),
lambda pemfile: pemfile.read(),
mode="rb",
)
key = load_pem_private_key(data, b"password", backend)
assert (
key.private_bytes(
Encoding.PEM, PrivateFormat.PKCS8, NoEncryption()
)
== unencrypted
)
@pytest.mark.parametrize(
("key_path", "encoding", "loader"),
[
(
["Ed25519", "ed25519-pub.pem"],
Encoding.PEM,
load_pem_public_key,
),
(
["Ed25519", "ed25519-pub.der"],
Encoding.DER,
load_der_public_key,
),
],
)
def test_load_public_key(self, key_path, encoding, loader, backend):
data = load_vectors_from_file(
os.path.join("asymmetric", *key_path),
lambda pemfile: pemfile.read(),
mode="rb",
)
public_key = loader(data, backend)
assert (
public_key.public_bytes(
encoding, PublicFormat.SubjectPublicKeyInfo
)
== data
)
def test_openssl_serialization_unsupported(self, backend):
key = ed25519.Ed25519PrivateKey.generate()
with pytest.raises(ValueError):
key.private_bytes(
Encoding.PEM,
PrivateFormat.TraditionalOpenSSL,
NoEncryption(),
)
with pytest.raises(ValueError):
key.private_bytes(
Encoding.DER,
PrivateFormat.TraditionalOpenSSL,
NoEncryption(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.x448_supported(),
skip_message="Requires OpenSSL with X448 support",
)
|
TestEd25519Serialization
|
python
|
joke2k__faker
|
faker/providers/geo/tr_TR/__init__.py
|
{
"start": 41,
"end": 6700
}
|
class ____(GeoProvider):
# Source: https://tr.wikipedia.org/wiki/T%C3%BCrkiye%27nin_illeri
land_coords = (
("37.003277000000004", "35.3261219", "Adana", "TR", "Europe/Istanbul"),
("37.7640008", "38.2764355", "Adıyaman", "TR", "Europe/Istanbul"),
(
"38.756850899999996",
"30.538694399999997",
"Afyonkarahisar",
"TR",
"Europe/Istanbul",
),
("38.3705416", "34.026907", "Aksaray", "TR", "Europe/Istanbul"),
("40.6569451", "35.7727169", "Amasya", "TR", "Europe/Istanbul"),
("39.921521899999995", "32.8537929", "Ankara", "TR", "Europe/Istanbul"),
("36.9009641", "30.6954846", "Antalya", "TR", "Europe/Istanbul"),
("41.1102966", "42.7035585", "Ardahan", "TR", "Europe/Istanbul"),
("41.160506", "41.839862700000005", "Artvin", "TR", "Europe/Istanbul"),
("37.841300700000005", "27.832837400000003", "Aydın", "TR", "Europe/Istanbul"),
("39.7201318", "43.050038799999996", "Ağrı", "TR", "Europe/Istanbul"),
("39.6473917", "27.8879787", "Balıkesir", "TR", "Europe/Istanbul"),
("41.6338394", "32.3384354", "Bartın", "TR", "Europe/Istanbul"),
("37.7874104", "41.2573924", "Batman", "TR", "Europe/Istanbul"),
("40.25569", "40.224099", "Bayburt", "TR", "Europe/Istanbul"),
("40.1435101", "29.975291100000003", "Bilecik", "TR", "Europe/Istanbul"),
("38.8851831", "40.4965998", "Bingöl", "TR", "Europe/Istanbul"),
("38.4002185", "42.1081317", "Bitlis", "TR", "Europe/Istanbul"),
("40.733295299999995", "31.6110479", "Bolu", "TR", "Europe/Istanbul"),
("37.7248394", "30.288728600000002", "Burdur", "TR", "Europe/Istanbul"),
("40.1826036", "29.067565500000004", "Bursa", "TR", "Europe/Istanbul"),
(
"37.773483299999995",
"29.087389399999996",
"Denizli",
"TR",
"Europe/Istanbul",
),
("37.9167321", "40.2225658", "Diyarbakır", "TR", "Europe/Istanbul"),
("40.8458611", "31.164851000000002", "Düzce", "TR", "Europe/Istanbul"),
("41.675932700000004", "26.5587225", "Edirne", "TR", "Europe/Istanbul"),
("38.5824771", "39.396179", "Elazığ", "TR", "Europe/Istanbul"),
("39.749605200000005", "39.4941023", "Erzincan", "TR", "Europe/Istanbul"),
("39.7581897", "41.4032241", "Erzurum", "TR", "Europe/Istanbul"),
("39.766681299999995", "30.5255947", "Eskişehir", "TR", "Europe/Istanbul"),
("37.0611756", "37.3793085", "Gaziantep", "TR", "Europe/Istanbul"),
("40.9148702", "38.3879289", "Giresun", "TR", "Europe/Istanbul"),
("40.4617844", "39.475733899999994", "Gümüşhane", "TR", "Europe/Istanbul"),
("37.574898", "43.73766", "Hakkari", "TR", "Europe/Istanbul"),
("36.202593900000004", "36.1603945", "Hatay", "TR", "Europe/Istanbul"),
("37.77035", "30.5556933", "Isparta", "TR", "Europe/Istanbul"),
("39.921566799999994", "44.0467724", "Iğdır", "TR", "Europe/Istanbul"),
("37.5812744", "36.927509", "Kahramanmaraş", "TR", "Europe/Istanbul"),
("41.1110349", "32.619390100000004", "Karabük", "TR", "Europe/Istanbul"),
(
"37.179244700000005",
"33.222478100000004",
"Karaman",
"TR",
"Europe/Istanbul",
),
("40.605158", "43.0961734", "Kars", "TR", "Europe/Istanbul"),
("41.3765359", "33.7770087", "Kastamonu", "TR", "Europe/Istanbul"),
("38.7225274", "35.4874516", "Kayseri", "TR", "Europe/Istanbul"),
("36.718045000000004", "37.11688", "Kilis", "TR", "Europe/Istanbul"),
("40.765382", "29.9406983", "Kocaeli", "TR", "Europe/Istanbul"),
("37.8719963", "32.484401500000004", "Konya", "TR", "Europe/Istanbul"),
("39.4191505", "29.987292800000002", "Kütahya", "TR", "Europe/Istanbul"),
("41.7370223", "27.223552299999998", "Kırklareli", "TR", "Europe/Istanbul"),
("39.8485708", "33.5276222", "Kırıkkale", "TR", "Europe/Istanbul"),
("39.14611420000001", "34.1605587", "Kırşehir", "TR", "Europe/Istanbul"),
("38.3483098", "38.3178715", "Malatya", "TR", "Europe/Istanbul"),
("38.615502899999996", "27.4255716", "Manisa", "TR", "Europe/Istanbul"),
("37.341485399999996", "40.7476249", "Mardin", "TR", "Europe/Istanbul"),
("36.8117583", "34.6292679", "Mersin", "TR", "Europe/Istanbul"),
("37.1642053", "28.2624288", "Muğla", "TR", "Europe/Istanbul"),
("38.740370299999995", "41.4967451", "Muş", "TR", "Europe/Istanbul"),
("38.6223688", "34.713602200000004", "Nevşehir", "TR", "Europe/Istanbul"),
("37.971207899999996", "34.6775534", "Niğde", "TR", "Europe/Istanbul"),
("40.8292569", "37.4082764", "Ordu", "TR", "Europe/Istanbul"),
("37.073671000000004", "36.255941", "Osmaniye", "TR", "Europe/Istanbul"),
("41.022809", "40.519612", "Rize", "TR", "Europe/Istanbul"),
("40.7731834", "30.481606", "Sakarya", "TR", "Europe/Istanbul"),
("41.2889924", "36.329445899999996", "Samsun", "TR", "Europe/Istanbul"),
("37.931282", "41.939840000000004", "Siirt", "TR", "Europe/Istanbul"),
("42.0266698", "35.1506765", "Sinop", "TR", "Europe/Istanbul"),
("39.7503572", "37.0145185", "Sivas", "TR", "Europe/Istanbul"),
("40.986222999999995", "27.513944", "Tekirdağ", "TR", "Europe/Istanbul"),
("40.327746999999995", "36.5539494", "Tokat", "TR", "Europe/Istanbul"),
("41.0058605", "39.718092799999994", "Trabzon", "TR", "Europe/Istanbul"),
("39.1080631", "39.548196999999995", "Tunceli", "TR", "Europe/Istanbul"),
("38.6710838", "29.407250899999998", "Uşak", "TR", "Europe/Istanbul"),
("38.508360100000004", "43.374532200000004", "Van", "TR", "Europe/Istanbul"),
("40.6556669", "29.272909100000003", "Yalova", "TR", "Europe/Istanbul"),
("39.8205571", "34.8094917", "Yozgat", "TR", "Europe/Istanbul"),
("41.250324", "31.8389738", "Zonguldak", "TR", "Europe/Istanbul"),
("40.1534952", "26.4140933", "Çanakkale", "TR", "Europe/Istanbul"),
("40.5971947", "33.6212704", "Çankırı", "TR", "Europe/Istanbul"),
("40.54914960000001", "34.9602453", "Çorum", "TR", "Europe/Istanbul"),
("41.0096334", "28.9651646", "İstanbul", "TR", "Europe/Istanbul"),
("38.415342100000004", "27.144474", "İzmir", "TR", "Europe/Istanbul"),
("37.2595198", "39.0408174", "Şanlıurfa", "TR", "Europe/Istanbul"),
("37.455253000000006", "42.5212049", "Şırnak", "TR", "Europe/Istanbul"),
)
|
Provider
|
python
|
google__jax
|
jax/experimental/mosaic/gpu/constraints.py
|
{
"start": 23290,
"end": 32388
}
|
class ____:
...
def non_splat_variables(
constraints: Sequence[Constraint],
) -> set[Variable]:
"""Returns a all vars distinct from a splat."""
vars: set[Variable] = set()
for constraint in constraints:
match constraint:
case NotOfType(expr=Variable() as var, type=fa.WGSplatFragLayout):
assert isinstance(var, Variable) # make pytype happy
vars.add(var)
return vars
def _has_relayout_of_non_splat_to_splat(constraints: Sequence[Constraint]) -> bool:
"""Returns whether the constraints imply a non-splat to splat relayout.
Such relayouts are impossible and this helps shortcut the search.
If this function returns False, this doesn't necessarily mean that there are
no non-splat to splat relayouts, just that this is not known yet.
"""
non_splat = non_splat_variables(constraints)
if not non_splat:
return False
def is_constant_splat(e) -> bool:
return isinstance(e, RegisterLayout) and isinstance(
e.value, fa.WGSplatFragLayout
)
for constraint in constraints:
match constraint:
case Relayout(source=source, target=target):
if source in non_splat and is_constant_splat(target):
return True
case _:
pass
return False
def saturate_distinct_from_splat(
constraint_system: ConstraintSystem,
) -> ConstraintSystem | Unsatisfiable:
"""Adds transitive NotOfType constraints for all non-splat variables.
Given `n` variables `l0`, ... `l{n-1}`, and a set of relayouts
`{ Relayout(l{i}, l{i+1}) : 0 <= i < n }`, if we also know that
`l{0}` is not splat, then we can automatically deduce that none of
`l0`, ..., `l{n-1}` are splat either.
This helps us quickly conclude that a system is unsatisfiable in cases where
a non-splat variable is transitively relaid out into a splat layout.
"""
non_splat = non_splat_variables(constraint_system.constraints)
new_constraints: list[Constraint] = []
new_non_splat_found = len(non_splat) > 0
while new_non_splat_found:
new_non_splat_found = False
for constraint in constraint_system.constraints:
match constraint:
case Relayout(source=source, target=target):
if (
isinstance(target, Variable)
and source in non_splat
and target not in non_splat
):
new_non_splat_found = True
non_splat.add(target)
new_constraints.append(NotOfType(target, fa.WGSplatFragLayout))
case _:
pass
return constraint_system & ConstraintSystem(constraints=new_constraints)
def compute_transitively_equal_vars(
system: ConstraintSystem,
) -> dict[Variable, list[Variable]]:
"""Computes all transitively equal variables in a constraint system.
The output dictionary maps each variable that appears in constraints in the
constraint system to all the variables it is transitively equal to.
"""
# The equality relations between variables form a graph where variables are
# nodes and a constraint `v1 == v2` forms an edge. All variables in a
# connected component are transitively equal. We use a Union-Find data
# structure with path compression to efficiently find these connected
# components (i.e., equivalence classes).
parent: dict[Variable, Variable] = {}
def find(v: Variable) -> Variable:
if v not in parent:
parent[v] = v
if parent[v] != v:
parent[v] = find(parent[v])
return parent[v]
def union(v1: Variable, v2: Variable):
root1 = find(v1)
root2 = find(v2)
if root1 != root2:
parent[root2] = root1
all_vars: set[Variable] = set()
for constraint in system.constraints:
match constraint:
case Equals(lhs=Variable() as lhs, rhs=Variable() as rhs):
assert isinstance(lhs, Variable) # make pytype happy
assert isinstance(rhs, Variable) # make pytype happy
all_vars.add(lhs)
all_vars.add(rhs)
union(lhs, rhs)
# Group variables by their component representative.
components: dict[Variable, list[Variable]] = {}
for v in sorted(all_vars, key=str):
root = find(v)
components.setdefault(root, []).append(v)
equal_vars: dict[Variable, list[Variable]] = {}
for component_vars in components.values():
for v in component_vars:
equal_vars[v] = [other for other in component_vars if other != v]
return equal_vars
def saturate_divides_constraints_for_equal_vars(
system: ConstraintSystem,
) -> ConstraintSystem:
"""Saturates Divides constraints between all transitively equal vars.
"""
equal_vars = compute_transitively_equal_vars(system)
new_constraints: list[Constraint] = []
for constraint in system.constraints:
new_constraints.append(constraint)
match constraint:
case Divides(expr=expr, tiling_multiple=tiling_multiple):
if isinstance(expr, Variable):
for equal_var in equal_vars.get(expr, []):
new_constraints.append(Divides(equal_var, tiling_multiple))
case _:
pass
new_constraints = merge_divides_constraints(new_constraints)
return dataclasses.replace(system, constraints=new_constraints)
# TODO(bchetioui): clean up API.
def merge_divides_constraints(constraints: Sequence[Constraint]) -> list[Constraint]:
"""Merges Divides constraints that can be merged."""
result: list[Constraint] = []
var_to_tiling_multiples : dict[Variable, tuple[int, ...]] = {}
for constraint in constraints:
match constraint:
case Divides(expr=Variable() as v, tiling_multiple=tiling_multiple):
assert isinstance(v, Variable) # make pytype happy
if (previous_tiling_multiple := var_to_tiling_multiples.get(v)) is None:
var_to_tiling_multiples[v] = tiling_multiple
continue
# If the two tuples are of different lengths, the larger tuple will
# be truncated (removing initial multiples) to the length of the
# smaller tuple. This preserves the semantics of the Divides constraints
# where a tiling's rank cannot exceed the size of tiling_multiple.
min_len = min(len(tiling_multiple), len(previous_tiling_multiple))
new_tiling_multiple = []
if min_len > 0:
for x, y in zip(tiling_multiple[-min_len:], previous_tiling_multiple[-min_len:], strict=True):
new_tiling_multiple.append(math.gcd(x, y))
var_to_tiling_multiples[v] = tuple(new_tiling_multiple)
case _:
result.append(constraint)
for expr, tiling_multiple in var_to_tiling_multiples.items():
result.append(Divides(expr, tiling_multiple))
return result
def _reduce_system_once(
constraint_system: ConstraintSystem,
) -> ConstraintSystem | Unsatisfiable | None:
"""Performs one reduction step over each constraint in a constraint system.
Returns:
- Unsatisfiable(): if the constraint system is unsatisfiable.
- A new constraint system if any constraint was reduced.
- None: if the constraint system is not known unsatisfiable, but hasn't been
reduced.
"""
assignments = constraint_system.assignments
constraints: list[Constraint] = []
changed = False
def try_assign(var: Variable, cst: Constant) -> bool:
if var in assignments and assignments[var] != cst:
return False
assignments[var] = cst
return True
for constraint in constraint_system.constraints:
match reduce_constraint(constraint, assignments):
case Unsatisfiable():
return Unsatisfiable()
case Equals(lhs=Variable() as var, rhs=Constant() as cst):
if not try_assign(var, cst):
return Unsatisfiable()
changed = True
case Equals(lhs=Constant() as cst, rhs=Variable() as var):
if not try_assign(var, cst):
return Unsatisfiable()
changed = True
case Tautological():
changed = True
case _ as new_constraint:
changed |= new_constraint != constraint
constraints.append(new_constraint)
new_constraints = merge_divides_constraints(constraints)
changed |= len(new_constraints) != len(constraints)
constraints = new_constraints
# Shortcut for a specific case of unsatisfiability. This shortcut
# drastically reduces the size of the search space.
if _has_relayout_of_non_splat_to_splat(constraints):
return Unsatisfiable()
if changed:
return ConstraintSystem(
assignments=assignments | constraint_system.assignments,
constraints=constraints,
)
return None
def reduce(
constraint_system: ConstraintSystem,
) -> ConstraintSystem | Unsatisfiable:
"""Reduces a constraint system until it can no longer be reduced.
Returns:
- Unsatisfiable(): if the constraint system is unsatisfiable.
- The maximally reduced constraint system otherwise.
"""
while True:
match _reduce_system_once(constraint_system):
case None:
break
case Unsatisfiable():
return Unsatisfiable()
case ConstraintSystem() as new_system:
constraint_system = new_system
case _ as never:
assert_never(never)
return constraint_system
|
Tautological
|
python
|
pandas-dev__pandas
|
pandas/io/sas/sas7bdat.py
|
{
"start": 3002,
"end": 27011
}
|
class ____(SASReader):
"""
Read SAS files in SAS7BDAT format.
Parameters
----------
path_or_buf : path name or buffer
Name of SAS file or file-like object pointing to SAS file
contents.
index : column identifier, defaults to None
Column to use as index.
convert_dates : bool, defaults to True
Attempt to convert dates to Pandas datetime values. Note that
some rarely used SAS date formats may be unsupported.
blank_missing : bool, defaults to True
Convert empty strings to missing values (SAS uses blanks to
indicate missing character variables).
chunksize : int, defaults to None
Return SAS7BDATReader object for iterations, returns chunks
with given number of lines.
encoding : str, 'infer', defaults to None
String encoding acc. to Python standard encodings,
encoding='infer' tries to detect the encoding from the file header,
encoding=None will leave the data in binary format.
convert_text : bool, defaults to True
If False, text variables are left as raw bytes.
convert_header_text : bool, defaults to True
If False, header text, including column names, are left as raw
bytes.
"""
_int_length: int
_cached_page: bytes | None
def __init__(
self,
path_or_buf: FilePath | ReadBuffer[bytes],
index=None,
convert_dates: bool = True,
blank_missing: bool = True,
chunksize: int | None = None,
encoding: str | None = None,
convert_text: bool = True,
convert_header_text: bool = True,
compression: CompressionOptions = "infer",
) -> None:
self.index = index
self.convert_dates = convert_dates
self.blank_missing = blank_missing
self.chunksize = chunksize
self.encoding = encoding
self.convert_text = convert_text
self.convert_header_text = convert_header_text
self.default_encoding = "latin-1"
self.compression = b""
self.column_names_raw: list[bytes] = []
self.column_names: list[str | bytes] = []
self.column_formats: list[str | bytes] = []
self.columns: list[_Column] = []
self._current_page_data_subheader_pointers: list[tuple[int, int]] = []
self._cached_page = None
self._column_data_lengths: list[int] = []
self._column_data_offsets: list[int] = []
self._column_types: list[bytes] = []
self._current_row_in_file_index = 0
self._current_row_on_page_index = 0
self._current_row_in_file_index = 0
self.handles = get_handle(
path_or_buf, "rb", is_text=False, compression=compression
)
self._path_or_buf = self.handles.handle
# Same order as const.SASIndex
self._subheader_processors = [
self._process_rowsize_subheader,
self._process_columnsize_subheader,
self._process_subheader_counts,
self._process_columntext_subheader,
self._process_columnname_subheader,
self._process_columnattributes_subheader,
self._process_format_subheader,
self._process_columnlist_subheader,
None, # Data
]
try:
self._get_properties()
self._parse_metadata()
except Exception:
self.close()
raise
def column_data_lengths(self) -> np.ndarray:
"""Return a numpy int64 array of the column data lengths"""
return np.asarray(self._column_data_lengths, dtype=np.int64)
def column_data_offsets(self) -> np.ndarray:
"""Return a numpy int64 array of the column offsets"""
return np.asarray(self._column_data_offsets, dtype=np.int64)
def column_types(self) -> np.ndarray:
"""
Returns a numpy character array of the column types:
s (string) or d (double)
"""
return np.asarray(self._column_types, dtype=np.dtype("S1"))
def close(self) -> None:
self.handles.close()
def _get_properties(self) -> None:
# Check magic number
self._path_or_buf.seek(0)
self._cached_page = self._path_or_buf.read(288)
if self._cached_page[0 : len(const.magic)] != const.magic:
raise ValueError("magic number mismatch (not a SAS file?)")
# Get alignment information
buf = self._read_bytes(const.align_1_offset, const.align_1_length)
if buf == const.u64_byte_checker_value:
self.U64 = True
self._int_length = 8
self._page_bit_offset = const.page_bit_offset_x64
self._subheader_pointer_length = const.subheader_pointer_length_x64
else:
self.U64 = False
self._page_bit_offset = const.page_bit_offset_x86
self._subheader_pointer_length = const.subheader_pointer_length_x86
self._int_length = 4
buf = self._read_bytes(const.align_2_offset, const.align_2_length)
if buf == const.align_1_checker_value:
align1 = const.align_2_value
else:
align1 = 0
# Get endianness information
buf = self._read_bytes(const.endianness_offset, const.endianness_length)
if buf == b"\x01":
self.byte_order = "<"
self.need_byteswap = sys.byteorder == "big"
else:
self.byte_order = ">"
self.need_byteswap = sys.byteorder == "little"
# Get encoding information
buf = self._read_bytes(const.encoding_offset, const.encoding_length)[0]
if buf in const.encoding_names:
self.inferred_encoding = const.encoding_names[buf]
if self.encoding == "infer":
self.encoding = self.inferred_encoding
else:
self.inferred_encoding = f"unknown (code={buf})"
# Timestamp is epoch 01/01/1960
epoch = datetime(1960, 1, 1)
x = self._read_float(
const.date_created_offset + align1, const.date_created_length
)
self.date_created = epoch + pd.to_timedelta(x, unit="s")
x = self._read_float(
const.date_modified_offset + align1, const.date_modified_length
)
self.date_modified = epoch + pd.to_timedelta(x, unit="s")
self.header_length = self._read_uint(
const.header_size_offset + align1, const.header_size_length
)
# Read the rest of the header into cached_page.
buf = self._path_or_buf.read(self.header_length - 288)
self._cached_page += buf
if len(self._cached_page) != self.header_length:
raise ValueError("The SAS7BDAT file appears to be truncated.")
self._page_length = self._read_uint(
const.page_size_offset + align1, const.page_size_length
)
def __next__(self) -> DataFrame:
da = self.read(nrows=self.chunksize or 1)
if da.empty:
self.close()
raise StopIteration
return da
# Read a single float of the given width (4 or 8).
def _read_float(self, offset: int, width: int) -> float:
assert self._cached_page is not None
if width == 4:
return read_float_with_byteswap(
self._cached_page, offset, self.need_byteswap
)
elif width == 8:
return read_double_with_byteswap(
self._cached_page, offset, self.need_byteswap
)
else:
self.close()
raise ValueError("invalid float width")
# Read a single unsigned integer of the given width (1, 2, 4 or 8).
def _read_uint(self, offset: int, width: int) -> int:
assert self._cached_page is not None
if width == 1:
return self._read_bytes(offset, 1)[0]
elif width == 2:
return read_uint16_with_byteswap(
self._cached_page, offset, self.need_byteswap
)
elif width == 4:
return read_uint32_with_byteswap(
self._cached_page, offset, self.need_byteswap
)
elif width == 8:
return read_uint64_with_byteswap(
self._cached_page, offset, self.need_byteswap
)
else:
self.close()
raise ValueError("invalid int width")
def _read_bytes(self, offset: int, length: int):
assert self._cached_page is not None
if offset + length > len(self._cached_page):
self.close()
raise ValueError("The cached page is too small.")
return self._cached_page[offset : offset + length]
def _parse_metadata(self) -> None:
done = False
while not done:
self._cached_page = self._path_or_buf.read(self._page_length)
if len(self._cached_page) <= 0:
break
if len(self._cached_page) != self._page_length:
raise ValueError("Failed to read a meta data page from the SAS file.")
done = self._process_page_meta()
def _process_page_meta(self) -> bool:
self._read_page_header()
pt = const.page_meta_types + [const.page_amd_type, const.page_mix_type]
if self._current_page_type in pt:
self._process_page_metadata()
is_data_page = self._current_page_type == const.page_data_type
is_mix_page = self._current_page_type == const.page_mix_type
return bool(
is_data_page
or is_mix_page
or self._current_page_data_subheader_pointers != []
)
def _read_page_header(self) -> None:
bit_offset = self._page_bit_offset
tx = const.page_type_offset + bit_offset
self._current_page_type = (
self._read_uint(tx, const.page_type_length) & const.page_type_mask2
)
tx = const.block_count_offset + bit_offset
self._current_page_block_count = self._read_uint(tx, const.block_count_length)
tx = const.subheader_count_offset + bit_offset
self._current_page_subheaders_count = self._read_uint(
tx, const.subheader_count_length
)
def _process_page_metadata(self) -> None:
bit_offset = self._page_bit_offset
for i in range(self._current_page_subheaders_count):
offset = const.subheader_pointers_offset + bit_offset
total_offset = offset + self._subheader_pointer_length * i
subheader_offset = self._read_uint(total_offset, self._int_length)
total_offset += self._int_length
subheader_length = self._read_uint(total_offset, self._int_length)
total_offset += self._int_length
subheader_compression = self._read_uint(total_offset, 1)
total_offset += 1
subheader_type = self._read_uint(total_offset, 1)
if (
subheader_length == 0
or subheader_compression == const.truncated_subheader_id
):
continue
subheader_signature = self._read_bytes(subheader_offset, self._int_length)
subheader_index = get_subheader_index(subheader_signature)
subheader_processor = self._subheader_processors[subheader_index]
if subheader_processor is None:
f1 = subheader_compression in (const.compressed_subheader_id, 0)
f2 = subheader_type == const.compressed_subheader_type
if self.compression and f1 and f2:
self._current_page_data_subheader_pointers.append(
(subheader_offset, subheader_length)
)
else:
self.close()
raise ValueError(
f"Unknown subheader signature {subheader_signature}"
)
else:
subheader_processor(subheader_offset, subheader_length)
def _process_rowsize_subheader(self, offset: int, length: int) -> None:
int_len = self._int_length
lcs_offset = offset
lcp_offset = offset
if self.U64:
lcs_offset += 682
lcp_offset += 706
else:
lcs_offset += 354
lcp_offset += 378
self.row_length = self._read_uint(
offset + const.row_length_offset_multiplier * int_len,
int_len,
)
self.row_count = self._read_uint(
offset + const.row_count_offset_multiplier * int_len,
int_len,
)
self.col_count_p1 = self._read_uint(
offset + const.col_count_p1_multiplier * int_len, int_len
)
self.col_count_p2 = self._read_uint(
offset + const.col_count_p2_multiplier * int_len, int_len
)
mx = const.row_count_on_mix_page_offset_multiplier * int_len
self._mix_page_row_count = self._read_uint(offset + mx, int_len)
self._lcs = self._read_uint(lcs_offset, 2)
self._lcp = self._read_uint(lcp_offset, 2)
def _process_columnsize_subheader(self, offset: int, length: int) -> None:
int_len = self._int_length
offset += int_len
self.column_count = self._read_uint(offset, int_len)
if self.col_count_p1 + self.col_count_p2 != self.column_count:
print(
f"Warning: column count mismatch ({self.col_count_p1} + "
f"{self.col_count_p2} != {self.column_count})\n"
)
# Unknown purpose
def _process_subheader_counts(self, offset: int, length: int) -> None:
pass
def _process_columntext_subheader(self, offset: int, length: int) -> None:
offset += self._int_length
text_block_size = self._read_uint(offset, const.text_block_size_length)
buf = self._read_bytes(offset, text_block_size)
cname_raw = buf[0:text_block_size].rstrip(b"\x00 ")
self.column_names_raw.append(cname_raw)
if len(self.column_names_raw) == 1:
compression_literal = b""
for cl in const.compression_literals:
if cl in cname_raw:
compression_literal = cl
self.compression = compression_literal
offset -= self._int_length
offset1 = offset + 16
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
compression_literal = buf.rstrip(b"\x00")
if compression_literal == b"":
self._lcs = 0
offset1 = offset + 32
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
self.creator_proc = buf[0 : self._lcp]
elif compression_literal == const.rle_compression:
offset1 = offset + 40
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
self.creator_proc = buf[0 : self._lcp]
elif self._lcs > 0:
self._lcp = 0
offset1 = offset + 16
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcs)
self.creator_proc = buf[0 : self._lcp]
if hasattr(self, "creator_proc"):
self.creator_proc = self._convert_header_text(self.creator_proc) # pyright: ignore[reportArgumentType]
def _process_columnname_subheader(self, offset: int, length: int) -> None:
int_len = self._int_length
offset += int_len
column_name_pointers_count = (length - 2 * int_len - 12) // 8
for i in range(column_name_pointers_count):
text_subheader = (
offset
+ const.column_name_pointer_length * (i + 1)
+ const.column_name_text_subheader_offset
)
col_name_offset = (
offset
+ const.column_name_pointer_length * (i + 1)
+ const.column_name_offset_offset
)
col_name_length = (
offset
+ const.column_name_pointer_length * (i + 1)
+ const.column_name_length_offset
)
idx = self._read_uint(
text_subheader, const.column_name_text_subheader_length
)
col_offset = self._read_uint(
col_name_offset, const.column_name_offset_length
)
col_len = self._read_uint(col_name_length, const.column_name_length_length)
name_raw = self.column_names_raw[idx]
cname = name_raw[col_offset : col_offset + col_len]
self.column_names.append(self._convert_header_text(cname))
def _process_columnattributes_subheader(self, offset: int, length: int) -> None:
int_len = self._int_length
column_attributes_vectors_count = (length - 2 * int_len - 12) // (int_len + 8)
for i in range(column_attributes_vectors_count):
col_data_offset = (
offset + int_len + const.column_data_offset_offset + i * (int_len + 8)
)
col_data_len = (
offset
+ 2 * int_len
+ const.column_data_length_offset
+ i * (int_len + 8)
)
col_types = (
offset + 2 * int_len + const.column_type_offset + i * (int_len + 8)
)
x = self._read_uint(col_data_offset, int_len)
self._column_data_offsets.append(x)
x = self._read_uint(col_data_len, const.column_data_length_length)
self._column_data_lengths.append(x)
x = self._read_uint(col_types, const.column_type_length)
self._column_types.append(b"d" if x == 1 else b"s")
def _process_columnlist_subheader(self, offset: int, length: int) -> None:
# unknown purpose
pass
def _process_format_subheader(self, offset: int, length: int) -> None:
int_len = self._int_length
text_subheader_format = (
offset + const.column_format_text_subheader_index_offset + 3 * int_len
)
col_format_offset = offset + const.column_format_offset_offset + 3 * int_len
col_format_len = offset + const.column_format_length_offset + 3 * int_len
text_subheader_label = (
offset + const.column_label_text_subheader_index_offset + 3 * int_len
)
col_label_offset = offset + const.column_label_offset_offset + 3 * int_len
col_label_len = offset + const.column_label_length_offset + 3 * int_len
x = self._read_uint(
text_subheader_format, const.column_format_text_subheader_index_length
)
format_idx = min(x, len(self.column_names_raw) - 1)
format_start = self._read_uint(
col_format_offset, const.column_format_offset_length
)
format_len = self._read_uint(col_format_len, const.column_format_length_length)
label_idx = self._read_uint(
text_subheader_label, const.column_label_text_subheader_index_length
)
label_idx = min(label_idx, len(self.column_names_raw) - 1)
label_start = self._read_uint(
col_label_offset, const.column_label_offset_length
)
label_len = self._read_uint(col_label_len, const.column_label_length_length)
label_names = self.column_names_raw[label_idx]
column_label = self._convert_header_text(
label_names[label_start : label_start + label_len]
)
format_names = self.column_names_raw[format_idx]
column_format = self._convert_header_text(
format_names[format_start : format_start + format_len]
)
current_column_number = len(self.columns)
col = _Column(
current_column_number,
self.column_names[current_column_number],
column_label,
column_format,
self._column_types[current_column_number],
self._column_data_lengths[current_column_number],
)
self.column_formats.append(column_format)
self.columns.append(col)
def read(self, nrows: int | None = None) -> DataFrame:
if (nrows is None) and (self.chunksize is not None):
nrows = self.chunksize
elif nrows is None:
nrows = self.row_count
if len(self._column_types) == 0:
self.close()
raise EmptyDataError("No columns to parse from file")
if nrows > 0 and self._current_row_in_file_index >= self.row_count:
return DataFrame()
nrows = min(nrows, self.row_count - self._current_row_in_file_index)
nd = self._column_types.count(b"d")
ns = self._column_types.count(b"s")
self._string_chunk = np.empty((ns, nrows), dtype=object)
self._byte_chunk = np.zeros((nd, 8 * nrows), dtype=np.uint8)
self._current_row_in_chunk_index = 0
p = Parser(self)
p.read(nrows)
rslt = self._chunk_to_dataframe()
if self.index is not None:
rslt = rslt.set_index(self.index)
return rslt
def _read_next_page(self):
self._current_page_data_subheader_pointers = []
self._cached_page = self._path_or_buf.read(self._page_length)
if len(self._cached_page) <= 0:
return True
elif len(self._cached_page) != self._page_length:
self.close()
msg = (
"failed to read complete page from file (read "
f"{len(self._cached_page):d} of {self._page_length:d} bytes)"
)
raise ValueError(msg)
self._read_page_header()
if self._current_page_type in const.page_meta_types:
self._process_page_metadata()
if self._current_page_type not in const.page_meta_types + [
const.page_data_type,
const.page_mix_type,
]:
return self._read_next_page()
return False
def _chunk_to_dataframe(self) -> DataFrame:
n = self._current_row_in_chunk_index
m = self._current_row_in_file_index
ix = range(m - n, m)
rslt = {}
js, jb = 0, 0
infer_string = using_string_dtype()
for j in range(self.column_count):
name = self.column_names[j]
if self._column_types[j] == b"d":
col_arr = self._byte_chunk[jb, :].view(dtype=self.byte_order + "d")
rslt[name] = pd.Series(col_arr, dtype=np.float64, index=ix, copy=False)
if self.convert_dates:
if self.column_formats[j] in const.sas_date_formats:
rslt[name] = _convert_datetimes(rslt[name], "d")
elif self.column_formats[j] in const.sas_datetime_formats:
rslt[name] = _convert_datetimes(rslt[name], "s")
jb += 1
elif self._column_types[j] == b"s":
rslt[name] = pd.Series(self._string_chunk[js, :], index=ix, copy=False)
if self.convert_text and (self.encoding is not None):
rslt[name] = self._decode_string(rslt[name].str)
if infer_string:
rslt[name] = rslt[name].astype("str")
js += 1
else:
self.close()
raise ValueError(f"unknown column type {self._column_types[j]!r}")
df = DataFrame(rslt, columns=self.column_names, index=ix, copy=False)
return df
def _decode_string(self, b):
return b.decode(self.encoding or self.default_encoding)
def _convert_header_text(self, b: bytes) -> str | bytes:
if self.convert_header_text:
return self._decode_string(b)
else:
return b
|
SAS7BDATReader
|
python
|
astropy__astropy
|
astropy/utils/masked/core.py
|
{
"start": 53015,
"end": 56026
}
|
class ____(np.recarray, MaskedNDArray, data_cls=np.recarray):
# Explicit definition since we need to override some methods.
info = MaskedRecarrayInfo()
def __array_finalize__(self, obj):
# recarray.__array_finalize__ does not do super, so we do it
# explicitly.
super().__array_finalize__(obj)
super(np.recarray, self).__array_finalize__(obj)
# __getattribute__, __setattr__, and field use these somewhat
# obscrure ndarray methods. TODO: override in MaskedNDArray?
def getfield(self, dtype, offset=0):
for field, info in self.dtype.fields.items():
if offset == info[1] and dtype == info[0]:
return self[field]
raise NotImplementedError("can only get existing field from structured dtype.")
def setfield(self, val, dtype, offset=0):
for field, info in self.dtype.fields.items():
if offset == info[1] and dtype == info[0]:
self[field] = val
return
raise NotImplementedError("can only set existing field from structured dtype.")
def __repr__(self):
cls_name = type(self).__name__
out = super().__repr__().splitlines()
prefix, _, rest = out[0].partition("(")
out0 = cls_name + "(" + rest
extra_space = (len(cls_name) - len(prefix)) * " "
return "\n".join([out0] + [extra_space + o for o in out[1:]])
def __getattr__(key):
"""Make commonly used Masked subclasses importable for ASDF support.
Registered types associated with ASDF converters must be importable by
their fully qualified name. Masked classes are dynamically created and have
apparent names like ``astropy.utils.masked.core.MaskedQuantity`` although
they aren't actually attributes of this module. Customize module attribute
lookup so that certain commonly used Masked classes are importable.
See:
- https://asdf.readthedocs.io/en/latest/asdf/extending/converters.html#entry-point-performance-considerations
- https://github.com/astropy/asdf-astropy/pull/253
"""
if key.startswith(Masked.__name__):
# TODO: avoid using a private attribute from table.
# Can we make this more beautiful?
from astropy.table.serialize import __construct_mixin_classes
base_class_name = key[len(Masked.__name__) :]
for base_class_qualname in __construct_mixin_classes:
module, _, name = base_class_qualname.rpartition(".")
if name == base_class_name:
base_class = getattr(importlib.import_module(module), name)
# Try creating the masked class
masked_class = Masked(base_class)
# But only return it if it is a standard one, not one
# where we just used the ndarray fallback.
if base_class in Masked._masked_classes:
return masked_class
raise AttributeError(f"module '{__name__}' has no attribute '{key}'")
|
MaskedRecarray
|
python
|
spyder-ide__spyder
|
spyder/plugins/ipythonconsole/utils/websocket_client.py
|
{
"start": 15089,
"end": 16956
}
|
class ____:
time_to_dead: float = 1.0
def __init__(
self,
websocket: aiohttp.ClientWebSocketResponse,
):
self._websocket = websocket
self._websocket._handle_ping_pong_exception = MethodType(
self._handle_heartbeat_exc(
self._websocket._handle_ping_pong_exception
),
self._websocket,
)
self._running = False
def start(self):
"""Start the channel."""
self._running = True
self._set_heartbeat()
def stop(self):
"""Stop the channel."""
self._running = False
self._unset_heartbeat()
def is_alive(self) -> bool:
"""Test whether the channel is alive."""
return self._running and not self._websocket.closed
def pause(self):
"""Pause the heartbeat channel."""
self._unset_heartbeat()
def unpause(self):
"""Unpause the heartbeat channel."""
self._set_heartbeat()
def is_beating(self) -> bool:
"""Test whether the channel is beating."""
return True
def _set_heartbeat(self):
"""Set the heartbeat for the channel."""
self._websocket._heartbeat = self.time_to_dead * 2
self._websocket._pong_heartbeat = self.time_to_dead
self._websocket._reset_heartbeat()
def _unset_heartbeat(self):
"""Unset the heartbeat for the channel."""
self._websocket._heartbeat = None
self._websocket._reset_heartbeat()
def _handle_heartbeat_exc(self, func):
@wraps(func)
def wrapper(ws: aiohttp.ClientWebSocketResponse, *args, **kwargs):
self.call_handlers(ws._loop.time() - ws._heartbeat_when)
return func(*args, **kwargs)
return wrapper
def call_handlers(self, since_last_heartbeat: float):
pass
|
_WebSocketHBChannel
|
python
|
walkccc__LeetCode
|
solutions/3529. Count Cells in Overlapping Horizontal and Vertical Substrings/3529.py
|
{
"start": 0,
"end": 1953
}
|
class ____:
def countCells(self, grid: list[list[str]], pattern: str) -> int:
BASE = 13
HASH = 1_000_000_007
m = len(grid)
n = len(grid[0])
def markMatchedCells(flattenedGrid: str, isHorizontal: bool) -> list[list[bool]]:
matchMatrix = [[False] * n for _ in range(m)]
matchPrefix = [0] * (len(flattenedGrid) + 1)
pows = [1] # pows[i] := BASE^i % HASH
patternHash = 0
runningHash = 0
for i in range(1, len(pattern)):
pows.append((pows[-1] * BASE) % HASH)
for c in pattern:
patternHash = (patternHash * BASE + (ord(c) - ord('a'))) % HASH
for i in range(len(flattenedGrid)):
runningHash = (
runningHash * BASE + (ord(flattenedGrid[i]) - ord('a'))) % HASH
if i >= len(pattern) - 1:
if runningHash == patternHash: # Match found.
matchPrefix[i - len(pattern) + 1] += 1
matchPrefix[i + 1] -= 1
# Remove the contribution of the oldest letter.
oldestLetterHash = (
pows[len(pattern) - 1] *
(ord(flattenedGrid[i - len(pattern) + 1]) - ord('a'))) % HASH
runningHash = (runningHash - oldestLetterHash + HASH) % HASH
for k in range(len(flattenedGrid)):
if k > 0:
matchPrefix[k] += matchPrefix[k - 1]
if matchPrefix[k] > 0:
i = k // n if isHorizontal else k % m
j = k % n if isHorizontal else k // m
matchMatrix[i][j] = True
return matchMatrix
# Find matching positions.
flattenedGridRow = ''.join(cell for row in grid for cell in row)
flattenedGridCol = ''.join(cell for col in zip(*grid) for cell in col)
horizontalMatches = markMatchedCells(flattenedGridRow, True)
verticalMatches = markMatchedCells(flattenedGridCol, False)
return sum(horizontalMatches[i][j] and verticalMatches[i][j]
for i in range(m)
for j in range(n))
|
Solution
|
python
|
davidhalter__parso
|
parso/python/tokenize.py
|
{
"start": 8263,
"end": 8651
}
|
class ____(NamedTuple):
type: PythonTokenTypes
string: str
start_pos: Tuple[int, int]
prefix: str
@property
def end_pos(self) -> Tuple[int, int]:
lines = split_lines(self.string)
if len(lines) > 1:
return self.start_pos[0] + len(lines) - 1, 0
else:
return self.start_pos[0], self.start_pos[1] + len(self.string)
|
Token
|
python
|
doocs__leetcode
|
solution/3100-3199/3144.Minimum Substring Partition of Equal Character Frequency/Solution3.py
|
{
"start": 0,
"end": 448
}
|
class ____:
def minimumSubstringsInPartition(self, s: str) -> int:
n = len(s)
f = [inf] * (n + 1)
f[0] = 0
for i in range(n):
cnt = defaultdict(int)
m = 0
for j in range(i, -1, -1):
cnt[s[j]] += 1
m = max(m, cnt[s[j]])
if i - j + 1 == len(cnt) * m:
f[i + 1] = min(f[i + 1], f[j] + 1)
return f[n]
|
Solution
|
python
|
coleifer__peewee
|
playhouse/reflection.py
|
{
"start": 11015,
"end": 12052
}
|
class ____(PostgresqlMetadata):
# CRDB treats INT the same as BIGINT, so we just map bigint type OIDs to
# regular IntegerField.
column_map = PostgresqlMetadata.column_map.copy()
column_map[20] = IntegerField
array_types = PostgresqlMetadata.array_types.copy()
array_types[1016] = IntegerField
extension_import = 'from playhouse.cockroachdb import *'
def __init__(self, database):
Metadata.__init__(self, database)
self.requires_extension = True
if postgres_ext is not None:
# Attempt to add JSON types.
cursor = self.execute('select oid, typname, format_type(oid, NULL)'
' from pg_type;')
results = cursor.fetchall()
for oid, typname, formatted_type in results:
if typname == 'jsonb':
self.column_map[oid] = postgres_ext.BinaryJSONField
for oid in self.array_types:
self.column_map[oid] = postgres_ext.ArrayField
|
CockroachDBMetadata
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_internal/exceptions.py
|
{
"start": 20274,
"end": 21256
}
|
class ____(ConfigurationError):
"""When there are errors while loading a configuration file"""
def __init__(
self,
reason: str = "could not be loaded",
fname: Optional[str] = None,
error: Optional[configparser.Error] = None,
) -> None:
super().__init__(error)
self.reason = reason
self.fname = fname
self.error = error
def __str__(self) -> str:
if self.fname is not None:
message_part = f" in {self.fname}."
else:
assert self.error is not None
message_part = f".\n{self.error}\n"
return f"Configuration file {self.reason}{message_part}"
_DEFAULT_EXTERNALLY_MANAGED_ERROR = f"""\
The Python environment under {sys.prefix} is managed externally, and may not be
manipulated by the user. Please use specific tooling from the distributor of
the Python installation to interact with this environment instead.
"""
|
ConfigurationFileCouldNotBeLoaded
|
python
|
django__django
|
django/test/runner.py
|
{
"start": 4829,
"end": 5001
}
|
class ____:
"""
Dummy list class for faking storage of results in unittest.TestResult.
"""
__slots__ = ()
def append(self, item):
pass
|
DummyList
|
python
|
scikit-learn__scikit-learn
|
sklearn/ensemble/_voting.py
|
{
"start": 18275,
"end": 24898
}
|
class ____(RegressorMixin, _BaseVoting):
"""Prediction voting regressor for unfitted estimators.
A voting regressor is an ensemble meta-estimator that fits several base
regressors, each on the whole dataset. Then it averages the individual
predictions to form a final prediction.
For a detailed example, refer to
:ref:`sphx_glr_auto_examples_ensemble_plot_voting_regressor.py`.
Read more in the :ref:`User Guide <voting_regressor>`.
.. versionadded:: 0.21
Parameters
----------
estimators : list of (str, estimator) tuples
Invoking the ``fit`` method on the ``VotingRegressor`` will fit clones
of those original estimators that will be stored in the class attribute
``self.estimators_``. An estimator can be set to ``'drop'`` using
:meth:`set_params`.
.. versionchanged:: 0.21
``'drop'`` is accepted. Using None was deprecated in 0.22 and
support was removed in 0.24.
weights : array-like of shape (n_regressors,), default=None
Sequence of weights (`float` or `int`) to weight the occurrences of
predicted values before averaging. Uses uniform weights if `None`.
n_jobs : int, default=None
The number of jobs to run in parallel for ``fit``.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : bool, default=False
If True, the time elapsed while fitting will be printed as it
is completed.
.. versionadded:: 0.23
Attributes
----------
estimators_ : list of regressors
The collection of fitted sub-estimators as defined in ``estimators``
that are not 'drop'.
named_estimators_ : :class:`~sklearn.utils.Bunch`
Attribute to access any fitted sub-estimators by name.
.. versionadded:: 0.20
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying regressor exposes such an attribute when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Only defined if the
underlying estimators expose such an attribute when fit.
.. versionadded:: 1.0
See Also
--------
VotingClassifier : Soft Voting/Majority Rule classifier.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LinearRegression
>>> from sklearn.ensemble import RandomForestRegressor
>>> from sklearn.ensemble import VotingRegressor
>>> from sklearn.neighbors import KNeighborsRegressor
>>> r1 = LinearRegression()
>>> r2 = RandomForestRegressor(n_estimators=10, random_state=1)
>>> r3 = KNeighborsRegressor()
>>> X = np.array([[1, 1], [2, 4], [3, 9], [4, 16], [5, 25], [6, 36]])
>>> y = np.array([2, 6, 12, 20, 30, 42])
>>> er = VotingRegressor([('lr', r1), ('rf', r2), ('r3', r3)])
>>> print(er.fit(X, y).predict(X))
[ 6.8 8.4 12.5 17.8 26 34]
In the following example, we drop the `'lr'` estimator with
:meth:`~VotingRegressor.set_params` and fit the remaining two estimators:
>>> er = er.set_params(lr='drop')
>>> er = er.fit(X, y)
>>> len(er.estimators_)
2
"""
def __init__(self, estimators, *, weights=None, n_jobs=None, verbose=False):
super().__init__(estimators=estimators)
self.weights = weights
self.n_jobs = n_jobs
self.verbose = verbose
@_fit_context(
# estimators in VotingRegressor.estimators are not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y, **fit_params):
"""Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
**fit_params : dict
Parameters to pass to the underlying estimators.
.. versionadded:: 1.5
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Fitted estimator.
"""
_raise_for_params(fit_params, self, "fit", allow=["sample_weight"])
y = column_or_1d(y, warn=True)
return super().fit(X, y, **fit_params)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the estimators in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted values.
"""
check_is_fitted(self)
return np.average(self._predict(X), axis=1, weights=self._weights_not_none)
def transform(self, X):
"""Return predictions for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
predictions : ndarray of shape (n_samples, n_classifiers)
Values predicted by each regressor.
"""
check_is_fitted(self)
return self._predict(X)
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Not used, present here for API consistency by convention.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
check_is_fitted(self, "n_features_in_")
_check_feature_names_in(self, input_features, generate_names=False)
class_name = self.__class__.__name__.lower()
return np.asarray(
[f"{class_name}_{name}" for name, est in self.estimators if est != "drop"],
dtype=object,
)
|
VotingRegressor
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-mailchimp/unit_tests/test_config_datacenter_migration.py
|
{
"start": 3793,
"end": 4895
}
|
class ____:
"""Integration tests using actual config files."""
@pytest.mark.parametrize(
"config_path,expected_data_center",
[
("test_configs/test_config_api_key.json", "us10"),
("test_configs/test_config_oauth.json", "us10"),
],
ids=["api_key_config", "oauth_config"],
)
def test_config_file_integration(self, config_path, expected_data_center, requests_mock):
"""Test integration with actual config files."""
# Mock OAuth endpoint for OAuth config
requests_mock.get("https://login.mailchimp.com/oauth2/metadata", json={"dc": expected_data_center})
config_path = os.path.join(os.path.dirname(__file__), config_path)
config = load_config(config_path)
# Remove existing data_center to test extraction
if "data_center" in config:
del config["data_center"]
extractor = ExtractAndSetDataCenterConfigValue()
extractor.transform(config)
assert config.get("data_center") == expected_data_center
|
TestExtractAndSetDataCenterConfigValueIntegration
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.