language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 66970,
"end": 68054
} | class ____(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
reason = self.request.arguments.get("reason", [])
self.set_status(
int(self.get_argument("code")),
reason=to_unicode(reason[0]) if reason else None,
)
def get_http_client(self):
# simple_httpclient only: curl doesn't expose the reason string
return SimpleAsyncHTTPClient()
def test_status(self):
response = self.fetch("/?code=304")
self.assertEqual(response.code, 304)
self.assertEqual(response.reason, "Not Modified")
response = self.fetch("/?code=304&reason=Foo")
self.assertEqual(response.code, 304)
self.assertEqual(response.reason, "Foo")
response = self.fetch("/?code=682&reason=Bar")
self.assertEqual(response.code, 682)
self.assertEqual(response.reason, "Bar")
response = self.fetch("/?code=682")
self.assertEqual(response.code, 682)
self.assertEqual(response.reason, "Unknown")
| StatusReasonTest |
python | PrefectHQ__prefect | src/prefect/client/orchestration/_automations/client.py | {
"start": 5561,
"end": 10931
} | class ____(BaseAsyncClient):
async def create_automation(self, automation: "AutomationCore") -> "UUID":
"""Creates an automation in Prefect Cloud."""
response = await self.request(
"POST",
"/automations/",
json=automation.model_dump(mode="json"),
)
from uuid import UUID
return UUID(response.json()["id"])
async def update_automation(
self, automation_id: "UUID", automation: "AutomationCore"
) -> None:
"""Updates an automation in Prefect Cloud."""
response = await self.request(
"PUT",
"/automations/{id}",
path_params={"id": automation_id},
json=automation.model_dump(mode="json", exclude_unset=True),
)
response.raise_for_status()
async def read_automations(self) -> list["Automation"]:
response = await self.request("POST", "/automations/filter")
response.raise_for_status()
from prefect.events.schemas.automations import Automation
return Automation.model_validate_list(response.json())
async def find_automation(self, id_or_name: "str | UUID") -> "Automation | None":
from uuid import UUID
if isinstance(id_or_name, str):
name = id_or_name
try:
id = UUID(id_or_name)
except ValueError:
id = None
else:
id = id_or_name
name = str(id)
if id:
try:
automation = await self.read_automation(id)
return automation
except HTTPStatusError as e:
if e.response.status_code == 404:
raise ObjectNotFound(http_exc=e) from e
automations = await self.read_automations()
# Look for it by an exact name
for automation in automations:
if automation.name == name:
return automation
# Look for it by a case-insensitive name
for automation in automations:
if automation.name.lower() == name.lower():
return automation
return None
async def read_automation(self, automation_id: "UUID | str") -> "Automation | None":
response = await self.request(
"GET", "/automations/{id}", path_params={"id": automation_id}
)
if response.status_code == 404:
return None
response.raise_for_status()
from prefect.events.schemas.automations import Automation
return Automation.model_validate(response.json())
async def read_automations_by_name(self, name: str) -> list["Automation"]:
"""
Query the Prefect API for an automation by name. Only automations matching the provided name will be returned.
Args:
name: the name of the automation to query
Returns:
a list of Automation model representations of the automations
"""
from prefect.client.schemas.sorting import AutomationSort
from prefect.events.filters import (
AutomationFilter,
AutomationFilterName,
)
automation_filter = AutomationFilter(name=AutomationFilterName(any_=[name]))
response = await self.request(
"POST",
"/automations/filter",
json={
"sort": AutomationSort.UPDATED_DESC,
"automations": automation_filter.model_dump(mode="json")
if automation_filter
else None,
},
)
response.raise_for_status()
from prefect.events.schemas.automations import Automation
return Automation.model_validate_list(response.json())
async def pause_automation(self, automation_id: "UUID") -> None:
response = await self.request(
"PATCH",
"/automations/{id}",
path_params={"id": automation_id},
json={"enabled": False},
)
response.raise_for_status()
async def resume_automation(self, automation_id: "UUID") -> None:
response = await self.request(
"PATCH",
"/automations/{id}",
path_params={"id": automation_id},
json={"enabled": True},
)
response.raise_for_status()
async def delete_automation(self, automation_id: "UUID") -> None:
response = await self.request(
"DELETE",
"/automations/{id}",
path_params={"id": automation_id},
)
if response.status_code == 404:
return
response.raise_for_status()
async def read_resource_related_automations(
self, resource_id: str
) -> list["Automation"]:
response = await self.request(
"GET",
"/automations/related-to/{resource_id}",
path_params={"resource_id": resource_id},
)
response.raise_for_status()
from prefect.events.schemas.automations import Automation
return Automation.model_validate_list(response.json())
async def delete_resource_owned_automations(self, resource_id: str) -> None:
await self.request(
"DELETE",
"/automations/owned-by/{resource_id}",
path_params={"resource_id": resource_id},
)
| AutomationAsyncClient |
python | ray-project__ray | python/ray/tests/test_task_events_2.py | {
"start": 11278,
"end": 11587
} | class ____:
def children(self, pid_actor):
ray.get(pid_actor.report_pid.remote("children", os.getpid()))
ray.get(task_finish_child.options(name="task_finish_child").remote(pid_actor))
ray.get(task_sleep_child.options(name="task_sleep_child").remote(pid_actor))
@ray.remote
| ChildActor |
python | numpy__numpy | numpy/lib/tests/test_type_check.py | {
"start": 6991,
"end": 7847
} | class ____:
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
res = np.isnan(z) == 0
assert_all(np.all(res, axis=0))
def test_posinf(self):
with np.errstate(divide='ignore'):
assert_all(np.isnan(np.array((1.,)) / 0.) == 0)
def test_neginf(self):
with np.errstate(divide='ignore'):
assert_all(np.isnan(np.array((-1.,)) / 0.) == 0)
def test_ind(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isnan(np.array((0.,)) / 0.) == 1)
def test_integer(self):
assert_all(np.isnan(1) == 0)
def test_complex(self):
assert_all(np.isnan(1 + 1j) == 0)
def test_complex1(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isnan(np.array(0 + 0j) / 0.) == 1)
| TestIsnan |
python | mlflow__mlflow | mlflow/genai/labeling/stores.py | {
"start": 1438,
"end": 7491
} | class ____(metaclass=ABCMeta):
"""
Abstract class defining the interface for labeling store implementations.
This class defines the API interface for labeling operations that can be implemented
by different backend stores (e.g., MLflow tracking store, Databricks API).
"""
def __init__(self, tracking_uri: str | None = None) -> None:
"""
Initialize the labeling store.
Args:
tracking_uri: The tracking URI for the store.
"""
@abstractmethod
def get_labeling_session(self, run_id: str) -> LabelingSession:
"""
Get a labeling session by MLflow run ID.
Args:
run_id: The MLflow run ID of the labeling session.
Returns:
LabelingSession: The labeling session.
Raises:
mlflow.MlflowException: If labeling session is not found.
"""
@abstractmethod
def get_labeling_sessions(self, experiment_id: str | None = None) -> list[LabelingSession]:
"""
Get all labeling sessions for an experiment.
Args:
experiment_id: The experiment ID. If None, uses the currently active experiment.
Returns:
list[LabelingSession]: List of labeling sessions.
"""
@abstractmethod
def create_labeling_session(
self,
name: str,
*,
assigned_users: list[str] | None = None,
agent: str | None = None,
label_schemas: list[str] | None = None,
enable_multi_turn_chat: bool = False,
custom_inputs: dict[str, Any] | None = None,
experiment_id: str | None = None,
) -> LabelingSession:
"""
Create a new labeling session.
Args:
name: The name of the labeling session.
assigned_users: The users that will be assigned to label items in the session.
agent: The agent to be used to generate responses for the items in the session.
label_schemas: The label schemas to be used in the session.
enable_multi_turn_chat: Whether to enable multi-turn chat labeling for the session.
custom_inputs: Optional. Custom inputs to be used in the session.
experiment_id: The experiment ID. If None, uses the currently active experiment.
Returns:
LabelingSession: The created labeling session.
"""
@abstractmethod
def delete_labeling_session(self, labeling_session: LabelingSession) -> None:
"""
Delete a labeling session.
Args:
labeling_session: The labeling session to delete.
"""
@abstractmethod
def get_label_schema(self, name: str) -> LabelSchema:
"""
Get a label schema by name.
Args:
name: The name of the label schema.
Returns:
LabelSchema: The label schema.
Raises:
mlflow.MlflowException: If label schema is not found.
"""
@abstractmethod
def create_label_schema(
self,
name: str,
*,
type: str,
title: str,
input: Any,
instruction: str | None = None,
enable_comment: bool = False,
overwrite: bool = False,
) -> LabelSchema:
"""
Create a new label schema.
Args:
name: The name of the label schema. Must be unique across the review app.
type: The type of the label schema. Either "feedback" or "expectation".
title: The title of the label schema shown to stakeholders.
input: The input type of the label schema.
instruction: Optional. The instruction shown to stakeholders.
enable_comment: Optional. Whether to enable comments for the label schema.
overwrite: Optional. Whether to overwrite the existing label schema with the same name.
Returns:
LabelSchema: The created label schema.
"""
@abstractmethod
def delete_label_schema(self, name: str) -> None:
"""
Delete a label schema.
Args:
name: The name of the label schema to delete.
"""
@abstractmethod
def add_dataset_to_session(
self,
labeling_session: LabelingSession,
dataset_name: str,
record_ids: list[str] | None = None,
) -> LabelingSession:
"""
Add a dataset to a labeling session.
Args:
labeling_session: The labeling session to add the dataset to.
dataset_name: The name of the dataset.
record_ids: Optional. The individual record ids to be added to the session.
Returns:
LabelingSession: The updated labeling session.
"""
@abstractmethod
def add_traces_to_session(
self,
labeling_session: LabelingSession,
traces: list[Trace],
) -> LabelingSession:
"""
Add traces to a labeling session.
Args:
labeling_session: The labeling session to add traces to.
traces: List of Trace objects to add.
Returns:
LabelingSession: The updated labeling session.
"""
@abstractmethod
def sync_session_expectations(self, labeling_session: LabelingSession, dataset: str) -> None:
"""
Sync traces and expectations from a labeling session to a dataset.
Args:
labeling_session: The labeling session to sync.
dataset: The name of the dataset to sync traces and expectations to.
"""
@abstractmethod
def set_session_assigned_users(
self, labeling_session: LabelingSession, assigned_users: list[str]
) -> LabelingSession:
"""
Set the assigned users for a labeling session.
Args:
labeling_session: The labeling session to update.
assigned_users: The list of users to assign to the session.
Returns:
LabelingSession: The updated labeling session.
"""
| AbstractLabelingStore |
python | has2k1__plotnine | plotnine/scales/scale_xy.py | {
"start": 8391,
"end": 8570
} | class ____(scale_x_continuous):
"""
Continuous x position for timedelta data points
"""
trans: TransUser = "pd_timedelta"
@dataclass(kw_only=True)
| scale_x_timedelta |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 119836,
"end": 121403
} | class ____(ASTBase):
def __init__(self, type: ASTType, init: ASTInitializer) -> None:
self.type = type
self.init = init
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTTypeWithInit):
return NotImplemented
return self.type == other.type and self.init == other.init
def __hash__(self) -> int:
return hash((self.type, self.init))
@property
def name(self) -> ASTNestedName:
return self.type.name
@property
def isPack(self) -> bool:
return self.type.isPack
def get_id(
self, version: int, objectType: str | None = None, symbol: Symbol | None = None
) -> str:
if objectType != 'member':
return self.type.get_id(version, objectType)
if version == 1:
return (
symbol.get_full_nested_name().get_id(version)
+ '__'
+ self.type.get_id(version)
)
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
res = [transform(self.type)]
if self.init:
res.append(transform(self.init))
return ''.join(res)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
verify_description_mode(mode)
self.type.describe_signature(signode, mode, env, symbol)
if self.init:
self.init.describe_signature(signode, mode, env, symbol)
| ASTTypeWithInit |
python | pydata__xarray | xarray/tests/arrays.py | {
"start": 1171,
"end": 4322
} | class ____(utils.NDArrayMixin):
"""Array-like that prevents casting to array.
Modeled after cupy."""
def __init__(self, array: np.ndarray):
self.array = array
def __getitem__(self, key):
return type(self)(self.array[key])
def to_numpy(self) -> np.ndarray:
"""Allow explicit conversions to numpy in `to_numpy`, but disallow np.asarray etc."""
return self.array
def __array__(
self, dtype: np.typing.DTypeLike | None = None, /, *, copy: bool | None = None
) -> np.ndarray:
raise UnexpectedDataAccess("Tried accessing data")
def __array_namespace__(self):
"""Present to satisfy is_duck_array test."""
from xarray.tests import namespace
return namespace
CONCATENATABLEARRAY_HANDLED_ARRAY_FUNCTIONS: dict[str, Callable] = {}
def implements(numpy_function):
"""Register an __array_function__ implementation for ConcatenatableArray objects."""
def decorator(func):
CONCATENATABLEARRAY_HANDLED_ARRAY_FUNCTIONS[numpy_function] = func
return func
return decorator
@implements(np.concatenate)
def concatenate(
arrays: Iterable["ConcatenatableArray"], /, *, axis=0
) -> "ConcatenatableArray":
if any(not isinstance(arr, ConcatenatableArray) for arr in arrays):
raise TypeError
result = np.concatenate([arr._array for arr in arrays], axis=axis)
return ConcatenatableArray(result)
@implements(np.stack)
def stack(
arrays: Iterable["ConcatenatableArray"], /, *, axis=0
) -> "ConcatenatableArray":
if any(not isinstance(arr, ConcatenatableArray) for arr in arrays):
raise TypeError
result = np.stack([arr._array for arr in arrays], axis=axis)
return ConcatenatableArray(result)
@implements(np.result_type)
def result_type(*arrays_and_dtypes) -> np.dtype:
"""Called by xarray to ensure all arguments to concat have the same dtype."""
first_dtype, *other_dtypes = (np.dtype(obj) for obj in arrays_and_dtypes)
for other_dtype in other_dtypes:
if other_dtype != first_dtype:
raise ValueError("dtypes not all consistent")
return first_dtype
@implements(np.broadcast_to)
def broadcast_to(
x: "ConcatenatableArray", /, shape: tuple[int, ...]
) -> "ConcatenatableArray":
"""
Broadcasts an array to a specified shape, by either manipulating chunk keys or copying chunk manifest entries.
"""
if not isinstance(x, ConcatenatableArray):
raise TypeError
result = np.broadcast_to(x._array, shape=shape)
return ConcatenatableArray(result)
@implements(np.full_like)
def full_like(
x: "ConcatenatableArray", /, fill_value, **kwargs
) -> "ConcatenatableArray":
"""
Broadcasts an array to a specified shape, by either manipulating chunk keys or copying chunk manifest entries.
"""
if not isinstance(x, ConcatenatableArray):
raise TypeError
return ConcatenatableArray(np.full(x.shape, fill_value=fill_value, **kwargs))
@implements(np.all)
def numpy_all(x: "ConcatenatableArray", **kwargs) -> "ConcatenatableArray":
return type(x)(np.all(x._array, **kwargs))
| DuckArrayWrapper |
python | django__django | django/forms/fields.py | {
"start": 45624,
"end": 46291
} | class ____(CharField):
def __init__(self, *, protocol="both", unpack_ipv4=False, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.default_validators = validators.ip_address_validators(
protocol, unpack_ipv4
)
kwargs.setdefault("max_length", MAX_IPV6_ADDRESS_LENGTH)
super().__init__(**kwargs)
def to_python(self, value):
if value in self.empty_values:
return ""
value = value.strip()
if value and ":" in value:
return clean_ipv6_address(
value, self.unpack_ipv4, max_length=self.max_length
)
return value
| GenericIPAddressField |
python | langchain-ai__langchain | libs/text-splitters/langchain_text_splitters/base.py | {
"start": 11755,
"end": 12989
} | class ____:
"""Tokenizer data class."""
chunk_overlap: int
"""Overlap in tokens between chunks"""
tokens_per_chunk: int
"""Maximum number of tokens per chunk"""
decode: Callable[[list[int]], str]
""" Function to decode a list of token IDs to a string"""
encode: Callable[[str], list[int]]
""" Function to encode a string to a list of token IDs"""
def split_text_on_tokens(*, text: str, tokenizer: Tokenizer) -> list[str]:
"""Split incoming text and return chunks using tokenizer."""
splits: list[str] = []
input_ids = tokenizer.encode(text)
start_idx = 0
if tokenizer.tokens_per_chunk <= tokenizer.chunk_overlap:
msg = "tokens_per_chunk must be greater than chunk_overlap"
raise ValueError(msg)
while start_idx < len(input_ids):
cur_idx = min(start_idx + tokenizer.tokens_per_chunk, len(input_ids))
chunk_ids = input_ids[start_idx:cur_idx]
if not chunk_ids:
break
decoded = tokenizer.decode(chunk_ids)
if decoded:
splits.append(decoded)
if cur_idx == len(input_ids):
break
start_idx += tokenizer.tokens_per_chunk - tokenizer.chunk_overlap
return splits
| Tokenizer |
python | astropy__astropy | astropy/convolution/kernels.py | {
"start": 11936,
"end": 14039
} | class ____(Kernel2D):
"""
2D Ring filter kernel.
The Ring filter kernel is the difference between two Tophat kernels of
different width. This kernel is useful for, e.g., background estimation.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
radius_in : number
Inner radius of the ring kernel.
width : number
Width of the ring kernel.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Ring2DKernel
ring_2D_kernel = Ring2DKernel(9, 8)
plt.imshow(ring_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
def __init__(self, radius_in, width, **kwargs):
radius_out = radius_in + width
self._model = models.Ring2D(
1.0 / (np.pi * (radius_out**2 - radius_in**2)), 0, 0, radius_in, width
)
self._default_size = _round_up_to_odd_integer(2 * radius_out)
super().__init__(**kwargs)
self.normalize()
| Ring2DKernel |
python | getsentry__sentry | src/sentry/releases/endpoints/project_releases_token.py | {
"start": 1080,
"end": 2000
} | class ____(ProjectEndpoint):
publish_status = {
"GET": ApiPublishStatus.UNKNOWN,
"POST": ApiPublishStatus.UNKNOWN,
}
permission_classes = (StrictProjectPermission,)
def _regenerate_token(self, project):
token = uuid1().hex
ProjectOption.objects.set_value(project, "sentry:release-token", token)
return token
def get(self, request: Request, project) -> Response:
token = ProjectOption.objects.get_value(project, "sentry:release-token")
if token is None:
token = self._regenerate_token(project)
return Response({"token": token, "webhookUrl": _get_webhook_url(project, "builtin", token)})
def post(self, request: Request, project) -> Response:
token = self._regenerate_token(project)
return Response({"token": token, "webhookUrl": _get_webhook_url(project, "builtin", token)})
| ProjectReleasesTokenEndpoint |
python | joke2k__faker | faker/providers/address/de_AT/__init__.py | {
"start": 47,
"end": 6422
} | class ____(AddressProvider):
city_formats = ("{{city_name}}",)
city_with_postcode_formats = ("{{postcode}} {{city}}",)
street_name_formats = (
"{{first_name}}-{{last_name}}-{{street_suffix_long}}",
"{{last_name}}{{street_suffix_short}}",
)
street_address_formats = ("{{street_name}} {{building_number}}",)
address_formats = ("{{street_address}}\n{{postcode}} {{city}}",)
building_number_formats = ("###", "##", "#", "#/#")
street_suffixes_long = (
"Gasse",
"Platz",
"Ring",
"Straße",
"Weg",
)
street_suffixes_short = (
"gasse",
"platz",
"ring",
"straße",
"str.",
"weg",
)
# https://en.wikipedia.org/wiki/List_of_postal_codes_in_Austria
postcode_formats = (
"1###",
"2###",
"3###",
"4###",
"5###",
"6###",
"7###",
"8###",
"9###",
)
# https://en.wikipedia.org/wiki/List_of_cities_and_towns_in_Austria
cities = (
"Allentsteig",
"Altheim",
"Althofen",
"Amstetten",
"Ansfelden",
"Attnang-Puchheim",
"Bad Aussee",
"Bad Hall",
"Bad Ischl",
"Bad Leonfelden",
"Bad Radkersburg",
"Bad Sankt Leonhard im Lavanttal",
"Bad Vöslau",
"Baden",
"Bärnbach",
"Berndorf",
"Bischofshofen",
"Bleiburg",
"Bludenz",
"Braunau am Inn",
"Bregenz",
"Bruck an der Leitha",
"Bruck an der Mur",
"Deutsch-Wagram",
"Deutschlandsberg",
"Dornbirn",
"Drosendorf-Zissersdorf 1",
"Dürnstein",
"Ebenfurth",
"Ebreichsdorf",
"Eferding",
"Eggenburg",
"Eisenerz",
"Eisenstadt",
"Enns",
"Fehring",
"Feldbach",
"Feldkirch",
"Feldkirchen",
"Ferlach",
"Fischamend",
"Frauenkirchen",
"Freistadt",
"Friedberg",
"Friesach",
"Frohnleiten",
"Fürstenfeld",
"Gallneukirchen",
"Gänserndorf",
"Geras",
"Gerasdorf bei Wien",
"Gföhl",
"Gleisdorf",
"Gloggnitz",
"Gmünd",
"Gmünd in Kärnten",
"Gmunden",
"Graz",
"Grein",
"Grieskirchen",
"Groß-Enzersdorf",
"Groß-Gerungs",
"Groß-Siegharts",
"Güssing",
"Haag",
"Hainburg an der Donau",
"Hainfeld",
"Hall in Tirol",
"Hallein",
"Hardegg",
"Hartberg",
"Heidenreichstein",
"Herzogenburg",
"Imst",
"Innsbruck",
"Jennersdorf",
"Judenburg",
"Kapfenberg",
"Kindberg",
"Klagenfurt",
"Klosterneuburg",
"Knittelfeld",
"Köflach",
"Korneuburg",
"Krems an der Donau",
"Kufstein",
"Laa an der Thaya",
"Laakirchen",
"Landeck",
"Langenlois",
"Leibnitz",
"Leoben",
"Lienz",
"Liezen",
"Lilienfeld",
"Linz",
"Litschau",
"Maissau",
"Mank",
"Mannersdorf am Leithagebirge",
"Marchegg",
"Marchtrenk",
"Mariazell",
"Mattersburg",
"Mattighofen",
"Mautern an der Donau",
"Melk",
"Mistelbach an der Zaya",
"Mödling",
"Murau",
"Mureck",
"Mürzzuschlag",
"Neulengbach",
"Neumarkt am Wallersee",
"Neunkirchen",
"Neusiedl am See",
"Oberndorf bei Salzburg",
"Oberpullendorf",
"Oberwart",
"Oberwälz",
"Perg",
"Peuerbach",
"Pinkafeld",
"Pöchlarn",
"Poysdorf",
"Pregarten",
"Pulkau",
"Purbach am Neusiedler See",
"Purkersdorf",
"Raabs an der Thaya",
"Radenthein",
"Radstadt",
"Rattenberg",
"Retz",
"Ried im Innkreis",
"Rohrbach in Oberösterreich",
"Rottenmann",
"Rust",
"Saalfelden am Steinernen Meer",
"Salzburg",
"Sankt Andrä im Lavanttal",
"Sankt Johann im Pongau",
"Sankt Pölten",
"Sankt Valentin",
"Sankt Veit an der Glan",
"Schärding",
"Scheibbs",
"Schladming",
"Schrattenthal",
"Schrems",
"Schwanenstadt",
"Schwaz",
"Schwechat",
"Spittal an der Drau",
"Stadtschlaining",
"Steyr",
"Steyregg",
"Stockerau",
"Straßburg",
"Ternitz",
"Traiskirchen",
"Traismauer",
"Traun",
"Trieben",
"Trofaiach",
"Tulln an der Donau",
"Villach",
"Vils",
"Vöcklabruck",
"Voitsberg",
"Völkermarkt",
"Waidhofen an der Thaya",
"Waidhofen an der Ybbs",
"Weitra",
"Weiz",
"Wels",
"Wien",
"Wiener Neustadt",
"Wieselburg",
"Wilhelmsburg",
"Wolfsberg",
"Wolkersdorf",
"Wörgl",
"Ybbs an der Donau",
"Zell am See",
"Zeltweg",
"Zistersdorf",
"Zwettl",
)
# https://en.wikipedia.org/wiki/States_of_Austria
states = (
"Wien",
"Steiermark",
"Burgenland",
"Tirol",
"Niederösterreich",
"Oberösterreich",
"Salzburg",
"Kärnten",
"Vorarlberg",
)
municipality_key_formats = (
"1####",
"2####",
"3####",
"4####",
"5####",
"6####",
"7####",
"8####",
"9####",
)
def street_suffix_short(self) -> str:
return self.random_element(self.street_suffixes_short)
def street_suffix_long(self) -> str:
return self.random_element(self.street_suffixes_long)
def city_name(self) -> str:
return self.random_element(self.cities)
def administrative_unit(self) -> str:
return self.random_element(self.states)
state = administrative_unit
def city_with_postcode(self) -> str:
pattern: str = self.random_element(self.city_with_postcode_formats)
return self.generator.parse(pattern)
| Provider |
python | sanic-org__sanic | sanic/exceptions.py | {
"start": 22514,
"end": 23080
} | class ____(SanicException):
def __init__(
self,
file,
status_code: Optional[int] = None,
*,
quiet: Optional[bool] = None,
context: Optional[dict[str, Any]] = None,
extra: Optional[dict[str, Any]] = None,
headers: Optional[dict[str, Any]] = None,
):
super().__init__(
"could not execute config file %s" % file,
status_code=status_code,
quiet=quiet,
context=context,
extra=extra,
headers=headers,
)
| PyFileError |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/static_analysis/reaching_definitions.py | {
"start": 3262,
"end": 5175
} | class ____(cfg.GraphVisitor):
"""CFG visitor that determines reaching definitions at statement level."""
def __init__(self, graph, definition_factory):
self._definition_factory = definition_factory
super(Analyzer, self).__init__(graph)
self.gen_map = {}
def init_state(self, _):
return _NodeState()
def visit_node(self, node):
prev_defs_out = self.out[node]
defs_in = _NodeState()
for n in node.prev:
defs_in |= self.out[n]
if anno.hasanno(node.ast_node, anno.Static.SCOPE):
node_scope = anno.getanno(node.ast_node, anno.Static.SCOPE)
# The definition objects created by each node must be singletons because
# their ids are used in equality checks.
if node not in self.gen_map:
node_symbols = {}
# Every binding operation (assign, nonlocal, global, etc.) counts as a
# definition, with the exception of del, which only deletes without
# creating a new variable.
newly_defined = ((node_scope.bound | node_scope.globals) -
node_scope.deleted)
for s in newly_defined:
def_ = self._definition_factory()
node_symbols[s] = def_
# Every param receives a definition. Params are not necessarily
# considered as "modified".
for s, p in node_scope.params.items():
def_ = self._definition_factory()
def_.param_of = weakref.ref(p)
node_symbols[s] = def_
self.gen_map[node] = _NodeState(node_symbols)
gen = self.gen_map[node]
kill = node_scope.modified | node_scope.deleted
defs_out = gen | (defs_in - kill)
gen = self.gen_map[node]
defs_out = gen | (defs_in - kill)
else:
assert self.can_ignore(node), (node.ast_node, node)
defs_out = defs_in
self.in_[node] = defs_in
self.out[node] = defs_out
return prev_defs_out != defs_out
| Analyzer |
python | keras-team__keras | keras/src/ops/nn.py | {
"start": 22641,
"end": 25025
} | class ____(Operation):
def __init__(self, axis=-1, *, name=None):
super().__init__(name=name)
self.axis = axis
def call(self, x):
return backend.nn.softmax(x, axis=self.axis)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.softmax", "keras.ops.nn.softmax"])
def softmax(x, axis=-1):
"""Softmax activation function.
The elements of the output vector lie within the range `(0, 1)`, and their
total sum is exactly 1 (excluding the floating point rounding error).
Each vector is processed independently. The `axis` argument specifies the
axis along which the function is applied within the input.
It is defined as:
`f(x) = exp(x) / sum(exp(x))`
Args:
x: Input tensor.
axis: Integer, axis along which the softmax is applied.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-1., 0., 1.])
>>> x_softmax = keras.ops.softmax(x)
>>> print(x_softmax)
array([0.09003057, 0.24472847, 0.66524096], shape=(3,), dtype=float64)
"""
# Don't use `backend.shape` since TensorFlow returns
# symbolic tensors for unknown shape which can trigger
# an error in TensorFlow graph execution.
if isinstance(axis, int) and x.shape[axis] == 1:
warnings.warn(
f"You are using a softmax over axis {axis} "
f"of a tensor of shape {x.shape}. This axis "
"has size 1. The softmax operation will always return "
"the value 1, which is likely not what you intended. "
"Did you mean to use a sigmoid instead?"
)
if any_symbolic_tensors((x,)):
return Softmax(axis).symbolic_call(x)
if isinstance(axis, tuple):
axis_to_keep = [v for v in range(len(x.shape)) if v not in axis]
x_transposed = backend.numpy.transpose(x, axes=(*axis_to_keep, *axis))
x_reshaped = backend.numpy.reshape(
x_transposed, (*[x.shape[v] for v in axis_to_keep], -1)
)
x = backend.nn.softmax(x_reshaped, axis=-1)
x = backend.numpy.reshape(x, x_transposed.shape)
x = backend.numpy.transpose(
x, axes=list(backend.numpy.argsort([*axis_to_keep, *axis]))
)
return x
else:
return backend.nn.softmax(x, axis=axis)
| Softmax |
python | lazyprogrammer__machine_learning_examples | rl2/cartpole/td_lambda.py | {
"start": 663,
"end": 949
} | class ____:
def __init__(self, D):
self.w = np.random.randn(D) / np.sqrt(D)
def partial_fit(self, x, y, e, lr=1e-1):
self.w += lr*(y - x.dot(self.w))*e
def predict(self, X):
X = np.array(X)
return X.dot(self.w)
# Holds one SGDRegressor for each action
| SGDRegressor |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 57354,
"end": 57698
} | class ____(BaseModel):
usage: Optional["Usage"] = Field(default=None, description="")
time: Optional[float] = Field(default=None, description="Time spent to process this request")
status: Optional[str] = Field(default=None, description="")
result: Optional["GroupsResult"] = Field(default=None, description="")
| InlineResponse20018 |
python | pydata__xarray | xarray/backends/h5netcdf_.py | {
"start": 1587,
"end": 3072
} | class ____(BaseNetCDF4Array):
def get_array(self, needs_lock=True):
ds = self.datastore._acquire(needs_lock)
return ds.variables[self.variable_name]
def __getitem__(self, key):
return indexing.explicit_indexing_adapter(
key, self.shape, indexing.IndexingSupport.OUTER_1VECTOR, self._getitem
)
def _getitem(self, key):
with self.datastore.lock:
array = self.get_array(needs_lock=False)
return array[key]
def _read_attributes(h5netcdf_var):
# GH451
# to ensure conventions decoding works properly on Python 3, decode all
# bytes attributes to strings
attrs = {}
for k, v in h5netcdf_var.attrs.items():
if k not in ["_FillValue", "missing_value"] and isinstance(v, bytes):
try:
v = v.decode("utf-8")
except UnicodeDecodeError:
emit_user_level_warning(
f"'utf-8' codec can't decode bytes for attribute "
f"{k!r} of h5netcdf object {h5netcdf_var.name!r}, "
f"returning bytes undecoded.",
UnicodeWarning,
)
attrs[k] = v
return attrs
_extract_h5nc_encoding = functools.partial(
_extract_nc4_variable_encoding,
lsd_okay=False,
h5py_okay=True,
backend="h5netcdf",
unlimited_dims=None,
)
def _h5netcdf_create_group(dataset, name):
return dataset.create_group(name)
| H5NetCDFArrayWrapper |
python | encode__django-rest-framework | tests/test_utils.py | {
"start": 5471,
"end": 6028
} | class ____(TestCase):
"""
Internally, wrapped json functions should adhere to strict float handling
"""
def test_dumps(self):
with self.assertRaises(ValueError):
json.dumps(float('inf'))
with self.assertRaises(ValueError):
json.dumps(float('nan'))
def test_loads(self):
with self.assertRaises(ValueError):
json.loads("Infinity")
with self.assertRaises(ValueError):
json.loads("NaN")
@override_settings(REST_FRAMEWORK={'STRICT_JSON': False})
| JsonFloatTests |
python | dateutil__dateutil | src/dateutil/tz/tz.py | {
"start": 28028,
"end": 33828
} | class ____(tzrangebase):
"""
The ``tzrange`` object is a time zone specified by a set of offsets and
abbreviations, equivalent to the way the ``TZ`` variable can be specified
in POSIX-like systems, but using Python delta objects to specify DST
start, end and offsets.
:param stdabbr:
The abbreviation for standard time (e.g. ``'EST'``).
:param stdoffset:
An integer or :class:`datetime.timedelta` object or equivalent
specifying the base offset from UTC.
If unspecified, +00:00 is used.
:param dstabbr:
The abbreviation for DST / "Summer" time (e.g. ``'EDT'``).
If specified, with no other DST information, DST is assumed to occur
and the default behavior or ``dstoffset``, ``start`` and ``end`` is
used. If unspecified and no other DST information is specified, it
is assumed that this zone has no DST.
If this is unspecified and other DST information is *is* specified,
DST occurs in the zone but the time zone abbreviation is left
unchanged.
:param dstoffset:
A an integer or :class:`datetime.timedelta` object or equivalent
specifying the UTC offset during DST. If unspecified and any other DST
information is specified, it is assumed to be the STD offset +1 hour.
:param start:
A :class:`relativedelta.relativedelta` object or equivalent specifying
the time and time of year that daylight savings time starts. To
specify, for example, that DST starts at 2AM on the 2nd Sunday in
March, pass:
``relativedelta(hours=2, month=3, day=1, weekday=SU(+2))``
If unspecified and any other DST information is specified, the default
value is 2 AM on the first Sunday in April.
:param end:
A :class:`relativedelta.relativedelta` object or equivalent
representing the time and time of year that daylight savings time
ends, with the same specification method as in ``start``. One note is
that this should point to the first time in the *standard* zone, so if
a transition occurs at 2AM in the DST zone and the clocks are set back
1 hour to 1AM, set the ``hours`` parameter to +1.
**Examples:**
.. testsetup:: tzrange
from dateutil.tz import tzrange, tzstr
.. doctest:: tzrange
>>> tzstr('EST5EDT') == tzrange("EST", -18000, "EDT")
True
>>> from dateutil.relativedelta import *
>>> range1 = tzrange("EST", -18000, "EDT")
>>> range2 = tzrange("EST", -18000, "EDT", -14400,
... relativedelta(hours=+2, month=4, day=1,
... weekday=SU(+1)),
... relativedelta(hours=+1, month=10, day=31,
... weekday=SU(-1)))
>>> tzstr('EST5EDT') == range1 == range2
True
"""
def __init__(self, stdabbr, stdoffset=None,
dstabbr=None, dstoffset=None,
start=None, end=None):
global relativedelta
from dateutil import relativedelta
self._std_abbr = stdabbr
self._dst_abbr = dstabbr
try:
stdoffset = stdoffset.total_seconds()
except (TypeError, AttributeError):
pass
try:
dstoffset = dstoffset.total_seconds()
except (TypeError, AttributeError):
pass
if stdoffset is not None:
self._std_offset = datetime.timedelta(seconds=stdoffset)
else:
self._std_offset = ZERO
if dstoffset is not None:
self._dst_offset = datetime.timedelta(seconds=dstoffset)
elif dstabbr and stdoffset is not None:
self._dst_offset = self._std_offset + datetime.timedelta(hours=+1)
else:
self._dst_offset = ZERO
if dstabbr and start is None:
self._start_delta = relativedelta.relativedelta(
hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
else:
self._start_delta = start
if dstabbr and end is None:
self._end_delta = relativedelta.relativedelta(
hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
else:
self._end_delta = end
self._dst_base_offset_ = self._dst_offset - self._std_offset
self.hasdst = bool(self._start_delta)
def transitions(self, year):
"""
For a given year, get the DST on and off transition times, expressed
always on the standard time side. For zones with no transitions, this
function returns ``None``.
:param year:
The year whose transitions you would like to query.
:return:
Returns a :class:`tuple` of :class:`datetime.datetime` objects,
``(dston, dstoff)`` for zones with an annual DST transition, or
``None`` for fixed offset zones.
"""
if not self.hasdst:
return None
base_year = datetime.datetime(year, 1, 1)
start = base_year + self._start_delta
end = base_year + self._end_delta
return (start, end)
def __eq__(self, other):
if not isinstance(other, tzrange):
return NotImplemented
return (self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr and
self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._start_delta == other._start_delta and
self._end_delta == other._end_delta)
@property
def _dst_base_offset(self):
return self._dst_base_offset_
@six.add_metaclass(_TzStrFactory)
| tzrange |
python | google__jax | tests/debug_info_test.py | {
"start": 2915,
"end": 3740
} | class ____:
"""Use to inspect tracers.
We can `append` tracers from tracing contexts to this object. We collect
the tracer, along with the error message we get when we try to concretize
it. This is meant to simulate errors like concretization or leaking.
"""
tracers: list[tuple[core.Tracer, Exception]]
def __init__(self):
self.tracers = []
def append(self, t: Any) -> None:
if isinstance(t, core.Tracer):
try:
# We plan to do boolean conversion and catch the exception, but this works
# only for scalars
t_scalar = t
while t_scalar.shape:
t_scalar = t_scalar[0]
if t_scalar:
pass
assert False, t_scalar
except Exception as e:
self.tracers.append((t, e))
@jtu.with_config(jax_mutable_array_checks=True)
| TracerSpy |
python | eventlet__eventlet | tests/mock.py | {
"start": 35170,
"end": 52471
} | class ____:
attribute_name = None
_active_patches = set()
def __init__(
self, getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
):
if new_callable is not None:
if new is not DEFAULT:
raise ValueError(
"Cannot use 'new' and 'new_callable' together"
)
if autospec is not None:
raise ValueError(
"Cannot use 'autospec' and 'new_callable' together"
)
self.getter = getter
self.attribute = attribute
self.new = new
self.new_callable = new_callable
self.spec = spec
self.create = create
self.has_local = False
self.spec_set = spec_set
self.autospec = autospec
self.kwargs = kwargs
self.additional_patchers = []
def copy(self):
patcher = _patch(
self.getter, self.attribute, self.new, self.spec,
self.create, self.spec_set,
self.autospec, self.new_callable, self.kwargs
)
patcher.attribute_name = self.attribute_name
patcher.additional_patchers = [
p.copy() for p in self.additional_patchers
]
return patcher
def __call__(self, func):
if isinstance(func, ClassTypes):
return self.decorate_class(func)
return self.decorate_callable(func)
def decorate_class(self, klass):
for attr in dir(klass):
if not attr.startswith(patch.TEST_PREFIX):
continue
attr_value = getattr(klass, attr)
if not hasattr(attr_value, "__call__"):
continue
patcher = self.copy()
setattr(klass, attr, patcher(attr_value))
return klass
def decorate_callable(self, func):
if hasattr(func, 'patchings'):
func.patchings.append(self)
return func
@wraps(func)
def patched(*args, **keywargs):
# don't use a with here (backwards compatibility with Python 2.4)
extra_args = []
entered_patchers = []
# can't use try...except...finally because of Python 2.4
# compatibility
exc_info = tuple()
try:
try:
for patching in patched.patchings:
arg = patching.__enter__()
entered_patchers.append(patching)
if patching.attribute_name is not None:
keywargs.update(arg)
elif patching.new is DEFAULT:
extra_args.append(arg)
args += tuple(extra_args)
return func(*args, **keywargs)
except:
if (patching not in entered_patchers and
_is_started(patching)):
# the patcher may have been started, but an exception
# raised whilst entering one of its additional_patchers
entered_patchers.append(patching)
# Pass the exception to __exit__
exc_info = sys.exc_info()
# re-raise the exception
raise
finally:
for patching in reversed(entered_patchers):
patching.__exit__(*exc_info)
patched.patchings = [self]
if hasattr(func, 'func_code'):
# not in Python 3
patched.compat_co_firstlineno = getattr(
func, "compat_co_firstlineno",
func.func_code.co_firstlineno
)
return patched
def get_original(self):
target = self.getter()
name = self.attribute
original = DEFAULT
local = False
try:
original = target.__dict__[name]
except (AttributeError, KeyError):
original = getattr(target, name, DEFAULT)
else:
local = True
if not self.create and original is DEFAULT:
raise AttributeError(
"%s does not have the attribute %r" % (target, name)
)
return original, local
def __enter__(self):
"""Perform the patch."""
new, spec, spec_set = self.new, self.spec, self.spec_set
autospec, kwargs = self.autospec, self.kwargs
new_callable = self.new_callable
self.target = self.getter()
# normalise False to None
if spec is False:
spec = None
if spec_set is False:
spec_set = None
if autospec is False:
autospec = None
if spec is not None and autospec is not None:
raise TypeError("Can't specify spec and autospec")
if ((spec is not None or autospec is not None) and
spec_set not in (True, None)):
raise TypeError("Can't provide explicit spec_set *and* spec or autospec")
original, local = self.get_original()
if new is DEFAULT and autospec is None:
inherit = False
if spec is True:
# set spec to the object we are replacing
spec = original
if spec_set is True:
spec_set = original
spec = None
elif spec is not None:
if spec_set is True:
spec_set = spec
spec = None
elif spec_set is True:
spec_set = original
if spec is not None or spec_set is not None:
if original is DEFAULT:
raise TypeError("Can't use 'spec' with create=True")
if isinstance(original, ClassTypes):
# If we're patching out a class and there is a spec
inherit = True
Klass = MagicMock
_kwargs = {}
if new_callable is not None:
Klass = new_callable
elif spec is not None or spec_set is not None:
this_spec = spec
if spec_set is not None:
this_spec = spec_set
if _is_list(this_spec):
not_callable = '__call__' not in this_spec
else:
not_callable = not _callable(this_spec)
if not_callable:
Klass = NonCallableMagicMock
if spec is not None:
_kwargs['spec'] = spec
if spec_set is not None:
_kwargs['spec_set'] = spec_set
# add a name to mocks
if (isinstance(Klass, type) and
issubclass(Klass, NonCallableMock) and self.attribute):
_kwargs['name'] = self.attribute
_kwargs.update(kwargs)
new = Klass(**_kwargs)
if inherit and _is_instance_mock(new):
# we can only tell if the instance should be callable if the
# spec is not a list
this_spec = spec
if spec_set is not None:
this_spec = spec_set
if (not _is_list(this_spec) and not
_instance_callable(this_spec)):
Klass = NonCallableMagicMock
_kwargs.pop('name')
new.return_value = Klass(_new_parent=new, _new_name='()',
**_kwargs)
elif autospec is not None:
# spec is ignored, new *must* be default, spec_set is treated
# as a boolean. Should we check spec is not None and that spec_set
# is a bool?
if new is not DEFAULT:
raise TypeError(
"autospec creates the mock for you. Can't specify "
"autospec and new."
)
if original is DEFAULT:
raise TypeError("Can't use 'autospec' with create=True")
spec_set = bool(spec_set)
if autospec is True:
autospec = original
new = create_autospec(autospec, spec_set=spec_set,
_name=self.attribute, **kwargs)
elif kwargs:
# can't set keyword args when we aren't creating the mock
# XXXX If new is a Mock we could call new.configure_mock(**kwargs)
raise TypeError("Can't pass kwargs to a mock we aren't creating")
new_attr = new
self.temp_original = original
self.is_local = local
setattr(self.target, self.attribute, new_attr)
if self.attribute_name is not None:
extra_args = {}
if self.new is DEFAULT:
extra_args[self.attribute_name] = new
for patching in self.additional_patchers:
arg = patching.__enter__()
if patching.new is DEFAULT:
extra_args.update(arg)
return extra_args
return new
def __exit__(self, *exc_info):
"""Undo the patch."""
if not _is_started(self):
raise RuntimeError('stop called on unstarted patcher')
if self.is_local and self.temp_original is not DEFAULT:
setattr(self.target, self.attribute, self.temp_original)
else:
delattr(self.target, self.attribute)
if not self.create and not hasattr(self.target, self.attribute):
# needed for proxy objects like django settings
setattr(self.target, self.attribute, self.temp_original)
del self.temp_original
del self.is_local
del self.target
for patcher in reversed(self.additional_patchers):
if _is_started(patcher):
patcher.__exit__(*exc_info)
def start(self):
"""Activate a patch, returning any created mock."""
result = self.__enter__()
self._active_patches.add(self)
return result
def stop(self):
"""Stop an active patch."""
self._active_patches.discard(self)
return self.__exit__()
def _get_target(target):
try:
target, attribute = target.rsplit('.', 1)
except (TypeError, ValueError):
raise TypeError("Need a valid target to patch. You supplied: %r" %
(target,))
getter = lambda: _importer(target)
return getter, attribute
def _patch_object(
target, attribute, new=DEFAULT, spec=None,
create=False, spec_set=None, autospec=None,
new_callable=None, **kwargs
):
"""
patch.object(target, attribute, new=DEFAULT, spec=None, create=False,
spec_set=None, autospec=None, new_callable=None, **kwargs)
patch the named member (`attribute`) on an object (`target`) with a mock
object.
`patch.object` can be used as a decorator, class decorator or a context
manager. Arguments `new`, `spec`, `create`, `spec_set`,
`autospec` and `new_callable` have the same meaning as for `patch`. Like
`patch`, `patch.object` takes arbitrary keyword arguments for configuring
the mock object it creates.
When used as a class decorator `patch.object` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
getter = lambda: target
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
)
def _patch_multiple(target, spec=None, create=False, spec_set=None,
autospec=None, new_callable=None, **kwargs):
"""Perform multiple patches in a single call. It takes the object to be
patched (either as an object or a string to fetch the object by importing)
and keyword arguments for the patches::
with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'):
...
Use `DEFAULT` as the value if you want `patch.multiple` to create
mocks for you. In this case the created mocks are passed into a decorated
function by keyword, and a dictionary is returned when `patch.multiple` is
used as a context manager.
`patch.multiple` can be used as a decorator, class decorator or a context
manager. The arguments `spec`, `spec_set`, `create`,
`autospec` and `new_callable` have the same meaning as for `patch`. These
arguments will be applied to *all* patches done by `patch.multiple`.
When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
if type(target) in (unicode, str):
getter = lambda: _importer(target)
else:
getter = lambda: target
if not kwargs:
raise ValueError(
'Must supply at least one keyword argument with patch.multiple'
)
# need to wrap in a list for python 3, where items is a view
items = list(kwargs.items())
attribute, new = items[0]
patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
patcher.attribute_name = attribute
for attribute, new in items[1:]:
this_patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
this_patcher.attribute_name = attribute
patcher.additional_patchers.append(this_patcher)
return patcher
def patch(
target, new=DEFAULT, spec=None, create=False,
spec_set=None, autospec=None, new_callable=None, **kwargs
):
"""
`patch` acts as a function decorator, class decorator or a context
manager. Inside the body of the function or with statement, the `target`
is patched with a `new` object. When the function/with statement exits
the patch is undone.
If `new` is omitted, then the target is replaced with a
`MagicMock`. If `patch` is used as a decorator and `new` is
omitted, the created mock is passed in as an extra argument to the
decorated function. If `patch` is used as a context manager the created
mock is returned by the context manager.
`target` should be a string in the form `'package.module.ClassName'`. The
`target` is imported and the specified object replaced with the `new`
object, so the `target` must be importable from the environment you are
calling `patch` from. The target is imported when the decorated function
is executed, not at decoration time.
The `spec` and `spec_set` keyword arguments are passed to the `MagicMock`
if patch is creating one for you.
In addition you can pass `spec=True` or `spec_set=True`, which causes
patch to pass in the object being mocked as the spec/spec_set object.
`new_callable` allows you to specify a different class, or callable object,
that will be called to create the `new` object. By default `MagicMock` is
used.
A more powerful form of `spec` is `autospec`. If you set `autospec=True`
then the mock with be created with a spec from the object being replaced.
All attributes of the mock will also have the spec of the corresponding
attribute of the object being replaced. Methods and functions being
mocked will have their arguments checked and will raise a `TypeError` if
they are called with the wrong signature. For mocks replacing a class,
their return value (the 'instance') will have the same spec as the class.
Instead of `autospec=True` you can pass `autospec=some_object` to use an
arbitrary object as the spec instead of the one being replaced.
By default `patch` will fail to replace attributes that don't exist. If
you pass in `create=True`, and the attribute doesn't exist, patch will
create the attribute for you when the patched function is called, and
delete it again afterwards. This is useful for writing tests against
attributes that your production code creates at runtime. It is off by by
default because it can be dangerous. With it switched on you can write
passing tests against APIs that don't actually exist!
Patch can be used as a `TestCase` class decorator. It works by
decorating each test method in the class. This reduces the boilerplate
code when your test methods share a common patchings set. `patch` finds
tests by looking for method names that start with `patch.TEST_PREFIX`.
By default this is `test`, which matches the way `unittest` finds tests.
You can specify an alternative prefix by setting `patch.TEST_PREFIX`.
Patch can be used as a context manager, with the with statement. Here the
patching applies to the indented block after the with statement. If you
use "as" then the patched object will be bound to the name after the
"as"; very useful if `patch` is creating a mock object for you.
`patch` takes arbitrary keyword arguments. These will be passed to
the `Mock` (or `new_callable`) on construction.
`patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are
available for alternate use-cases.
"""
getter, attribute = _get_target(target)
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
)
| _patch |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 225323,
"end": 225717
} | class ____(GeneratedAirbyteSource):
@public
def __init__(
self,
name: str,
):
"""Airbyte Source for Whisky Hunter.
Documentation can be found at https://docs.airbyte.io/integrations/sources/whisky-hunter
Args:
name (str): The name of the destination.
"""
super().__init__("Whisky Hunter", name)
| WhiskyHunterSource |
python | gevent__gevent | src/gevent/tests/test__queue.py | {
"start": 15869,
"end": 15948
} | class ____(TestGetInterrupt):
kind = queue.LifoQueue
| TestGetInterruptLifoQueue |
python | Lightning-AI__lightning | tests/tests_fabric/plugins/precision/test_xla_integration.py | {
"start": 801,
"end": 2382
} | class ____(nn.Module):
def __init__(self, expected_dtype):
super().__init__()
self.expected_dtype = expected_dtype
self.layer = torch.nn.Linear(32, 2)
def forward(self, x):
# TODO: These should be float16/bfloat16
assert x.dtype == torch.float32
assert torch.tensor([0.0]).dtype == torch.float32
return self.layer(x)
def _run_xla_precision(fabric, expected_dtype):
with fabric.init_module():
model = BoringPrecisionModule(expected_dtype)
optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
model, optimizer = fabric.setup(model, optimizer)
batch = torch.rand(2, 32, device=fabric.device)
# TODO: This should be float16/bfloat16
assert model.layer.weight.dtype == model.layer.bias.dtype == torch.float32
assert batch.dtype == torch.float32
output = model(batch)
assert output.dtype == torch.float32
loss = torch.nn.functional.mse_loss(output, torch.ones_like(output))
fabric.backward(loss)
assert model.layer.weight.grad.dtype == torch.float32
optimizer.step()
optimizer.zero_grad()
@pytest.mark.parametrize(("precision", "expected_dtype"), [("16-true", torch.float16), ("bf16-true", torch.bfloat16)])
@RunIf(tpu=True, standalone=True)
@mock.patch.dict(os.environ, os.environ.copy(), clear=True)
def test_xla_precision(precision, expected_dtype):
fabric = Fabric(devices=1, precision=precision)
assert isinstance(fabric._precision, XLAPrecision)
fabric.launch(_run_xla_precision, expected_dtype=expected_dtype)
| BoringPrecisionModule |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/orchestrator/orchestrator/templates/render.py | {
"start": 1335,
"end": 7558
} | class ____:
column: str
title: str
formatter: Optional[Callable[[Any], str]] = None
def dataframe_to_table_html(df: pd.DataFrame, column_mapping: List[ColumnInfo]) -> str:
"""
Convert a dataframe to an HTML table.
"""
# convert true and false to checkmarks and x's
df.replace({True: "✅", False: "❌"}, inplace=True)
title_mapping = {column_info["column"]: column_info["title"] for column_info in column_mapping}
df.rename(columns=title_mapping, inplace=True)
html_formatters = {column_info["title"]: column_info["formatter"] for column_info in column_mapping if "formatter" in column_info}
columns = [column_info["title"] for column_info in column_mapping]
return df.to_html(
columns=columns,
justify="left",
index=False,
formatters=html_formatters,
escape=False,
classes="styled-table",
na_rep="❌",
render_links=True,
)
def value_to_emoji(value: Any) -> str:
if value is True:
return "✅"
elif value is False:
return "❌"
elif value is None or pd.isna(value):
return "❓"
else:
return str(value)
def calculated_report_columns(row: pd.Series) -> dict:
# Add a new column called past_runs
# This column will be a string of checkmarks and x's from oldest to newest "❌❌✅❓✅✅✅✅✅❌"
past_runs = "".join([value_to_emoji(value) for value in row])
# if there is only one build, then the second to last build status cannot be determined, and we will default to true
last_build_status = row.iloc[-1]
second_to_last_build_status = True if len(row) == 1 else row.iloc[-2]
only_failed_last_build = last_build_status == False and second_to_last_build_status == True
failed_last_build_two_builds = last_build_status == False and second_to_last_build_status == False
test_report_url = f"https://connectors.airbyte.com/files/generated_reports/test_summary/{row.name}/index.html"
return {
"past_runs": past_runs,
"last_build_status": last_build_status,
"only_failed_last_build": only_failed_last_build,
"failed_last_build_two_builds": failed_last_build_two_builds,
"test_report_url": test_report_url,
}
def enhance_nightly_report(nightly_report_df: pd.DataFrame) -> str:
nightly_report_df = nightly_report_df.reindex(sorted(nightly_report_df.columns), axis=1)
calculated_report_columns_df = nightly_report_df.apply(lambda row: calculated_report_columns(row), axis="columns", result_type="expand")
enhance_nightly_report_df = pd.concat([nightly_report_df, calculated_report_columns_df], axis="columns")
return enhance_nightly_report_df
def nightly_report_df_to_md(nightly_report_df: pd.DataFrame) -> str:
return nightly_report_df[["past_runs", "test_report_url"]].to_markdown(index=True)
def get_stats_for_connector_type(enhanced_nightly_report_df: pd.DataFrame, connector_type: str) -> str:
specific_connector_type_df = enhanced_nightly_report_df[enhanced_nightly_report_df.index.str.contains(connector_type)]
total = len(specific_connector_type_df)
tested = len(specific_connector_type_df[specific_connector_type_df["last_build_status"].notna()])
success = len(specific_connector_type_df[specific_connector_type_df["last_build_status"] == True])
failure = len(specific_connector_type_df[specific_connector_type_df["last_build_status"] == False])
# Safely calculate percentage and Handle the case where there are no tests, or divide by zero
success_percent = 0
if tested > 0:
success_percent = round(success / tested * 100, 2)
return {
"total": total,
"tested": tested,
"success": success,
"failure": failure,
"success_percent": success_percent,
}
def get_latest_nightly_report_df(nightly_report_complete_df: pd.DataFrame) -> pd.DataFrame:
nightly_report_complete_df = nightly_report_complete_df.sort_values(by=["parent_prefix"])
latest_run = nightly_report_complete_df.iloc[-1]
return latest_run
# Templates
def render_connector_registry_locations_html(destinations_table_html: str, sources_table_html: str) -> str:
env = Environment(loader=PackageLoader("orchestrator", "templates"))
template = env.get_template("connector_registry_locations.html")
return template.render(destinations_table_html=destinations_table_html, sources_table_html=sources_table_html)
@deep_copy_params
def render_connector_test_badge(test_summary: pd.DataFrame) -> str:
number_of_passes = len(test_summary[test_summary["success"] == True])
number_of_fails = len(test_summary[test_summary["success"] == False])
latest_test = test_summary.iloc[0]
logo_svg_string = '<svg version="1.0" xmlns="http://www.w3.org/2000/svg"\n width="32.000000pt" height="32.000000pt" viewBox="0 0 32.000000 32.000000"\n preserveAspectRatio="xMidYMid meet">\n\n<g transform="translate(0.000000,32.000000) scale(0.100000,-0.100000)"\nfill="#000000" stroke="none">\n<path d="M136 279 c-28 -22 -111 -157 -102 -166 8 -8 34 16 41 38 8 23 21 25\n29 3 3 -8 -6 -35 -20 -60 -18 -31 -22 -44 -12 -44 20 0 72 90 59 103 -6 6 -11\n27 -11 47 0 77 89 103 137 41 18 -23 16 -62 -5 -96 -66 -109 -74 -125 -59\n-125 24 0 97 140 97 185 0 78 -92 123 -154 74z"/>\n<path d="M168 219 c-22 -13 -23 -37 -2 -61 12 -12 14 -22 7 -30 -5 -7 -22 -34\n-37 -60 -20 -36 -23 -48 -12 -48 13 0 106 147 106 169 0 11 -28 41 -38 41 -4\n0 -15 -5 -24 -11z m32 -34 c0 -8 -4 -15 -10 -15 -5 0 -10 7 -10 15 0 8 5 15\n10 15 6 0 10 -7 10 -15z"/>\n</g>\n</svg>\n'
message = ""
color = "red"
if number_of_passes > 0:
message += f"✔ {number_of_passes}"
if number_of_passes > 0 and number_of_fails > 0:
color = "yellow"
message += " | "
if number_of_fails > 0:
message += f"✘ {number_of_fails}"
if latest_test["success"] == True:
color = "green"
badge_dict = {
"schemaVersion": 1,
"label": "",
"labelColor": "#c5c4ff",
"message": message,
"color": color,
"cacheSeconds": 300,
"logoSvg": logo_svg_string,
}
json_string = json.dumps(badge_dict)
return json_string
| ColumnInfo |
python | huggingface__transformers | src/transformers/models/omdet_turbo/modeling_omdet_turbo.py | {
"start": 4045,
"end": 7766
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor`):
The loss value.
decoder_coord_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
The predicted coordinates logits of the objects.
decoder_class_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes)`):
The predicted class of the objects.
init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
The initial reference points.
intermediate_reference_points (`tuple[tuple[torch.FloatTensor]]`):
The intermediate reference points.
encoder_coord_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
The predicted coordinates of the objects from the encoder.
encoder_class_logits (`tuple[torch.FloatTensor]`):
The predicted class of the objects from the encoder.
encoder_extracted_states (`torch.FloatTensor`):
The extracted states from the Feature Pyramid Network (FPN) and Path Aggregation Network (PAN) of the encoder.
decoder_hidden_states (`tuple[torch.FloatTensor]`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
plus the initial embedding outputs.
decoder_attentions (`tuple[tuple[torch.FloatTensor]]`, *optional*):
Tuple of tuples of `torch.FloatTensor` (one for attention for each layer) of shape `(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention, cross-attention and multi-scale deformable attention heads.
encoder_hidden_states (`tuple[torch.FloatTensor]`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
plus the initial embedding outputs.
encoder_attentions (`tuple[tuple[torch.FloatTensor]]`, *optional*):
Tuple of tuples of `torch.FloatTensor` (one for attention for each layer) of shape `(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention, cross-attention and multi-scale deformable attention heads.
classes_structure (`torch.LongTensor`, *optional*):
The number of queried classes for each image.
"""
loss: Optional[torch.FloatTensor] = None
decoder_coord_logits: Optional[torch.FloatTensor] = None
decoder_class_logits: Optional[torch.FloatTensor] = None
init_reference_points: Optional[torch.FloatTensor] = None
intermediate_reference_points: Optional[tuple[tuple[torch.FloatTensor]]] = None
encoder_coord_logits: Optional[torch.FloatTensor] = None
encoder_class_logits: Optional[tuple[torch.FloatTensor]] = None
encoder_extracted_states: Optional[torch.FloatTensor] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[tuple[tuple[torch.FloatTensor]]] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[tuple[tuple[torch.FloatTensor]]] = None
classes_structure: Optional[torch.LongTensor] = None
@use_kernel_forward_from_hub("MultiScaleDeformableAttention")
# Copied from transformers.models.deformable_detr.modeling_deformable_detr.MultiScaleDeformableAttention
| OmDetTurboObjectDetectionOutput |
python | scrapy__scrapy | tests/test_feedexport.py | {
"start": 61637,
"end": 79888
} | class ____(TestFeedExportBase):
items = [{"foo": "bar"}]
expected = b"foo\r\nbar\r\n"
class MyPlugin1:
def __init__(self, file, feed_options):
self.file = file
self.feed_options = feed_options
self.char = self.feed_options.get("plugin1_char", b"")
def write(self, data):
written_count = self.file.write(data)
written_count += self.file.write(self.char)
return written_count
def close(self):
self.file.close()
def _named_tempfile(self, name) -> str:
return str(Path(self.temp_dir, name))
async def run_and_export(
self, spider_cls: type[Spider], settings: dict[str, Any]
) -> dict[str, bytes | None]:
"""Run spider with specified settings; return exported data with filename."""
FEEDS = settings.get("FEEDS") or {}
settings["FEEDS"] = {
printf_escape(path_to_url(file_path)): feed_options
for file_path, feed_options in FEEDS.items()
}
content: dict[str, bytes | None] = {}
try:
spider_cls.start_urls = [self.mockserver.url("/")]
crawler = get_crawler(spider_cls, settings)
await maybe_deferred_to_future(crawler.crawl())
for file_path in FEEDS:
content[str(file_path)] = (
Path(file_path).read_bytes() if Path(file_path).exists() else None
)
finally:
for file_path in FEEDS:
if not Path(file_path).exists():
continue
Path(file_path).unlink()
return content
def get_gzip_compressed(self, data, compresslevel=9, mtime=0, filename=""):
data_stream = BytesIO()
gzipf = gzip.GzipFile(
fileobj=data_stream,
filename=filename,
mtime=mtime,
compresslevel=compresslevel,
mode="wb",
)
gzipf.write(data)
gzipf.close()
data_stream.seek(0)
return data_stream.read()
@deferred_f_from_coro_f
async def test_gzip_plugin(self):
filename = self._named_tempfile("gzip_file")
settings = {
"FEEDS": {
filename: {
"format": "csv",
"postprocessing": ["scrapy.extensions.postprocessing.GzipPlugin"],
},
},
}
data = await self.exported_data(self.items, settings)
try:
gzip.decompress(data[filename])
except OSError:
pytest.fail("Received invalid gzip data.")
@deferred_f_from_coro_f
async def test_gzip_plugin_compresslevel(self):
filename_to_compressed = {
self._named_tempfile("compresslevel_0"): self.get_gzip_compressed(
self.expected, compresslevel=0
),
self._named_tempfile("compresslevel_9"): self.get_gzip_compressed(
self.expected, compresslevel=9
),
}
settings = {
"FEEDS": {
self._named_tempfile("compresslevel_0"): {
"format": "csv",
"postprocessing": ["scrapy.extensions.postprocessing.GzipPlugin"],
"gzip_compresslevel": 0,
"gzip_mtime": 0,
"gzip_filename": "",
},
self._named_tempfile("compresslevel_9"): {
"format": "csv",
"postprocessing": ["scrapy.extensions.postprocessing.GzipPlugin"],
"gzip_compresslevel": 9,
"gzip_mtime": 0,
"gzip_filename": "",
},
},
}
data = await self.exported_data(self.items, settings)
for filename, compressed in filename_to_compressed.items():
result = gzip.decompress(data[filename])
assert compressed == data[filename]
assert result == self.expected
@deferred_f_from_coro_f
async def test_gzip_plugin_mtime(self):
filename_to_compressed = {
self._named_tempfile("mtime_123"): self.get_gzip_compressed(
self.expected, mtime=123
),
self._named_tempfile("mtime_123456789"): self.get_gzip_compressed(
self.expected, mtime=123456789
),
}
settings = {
"FEEDS": {
self._named_tempfile("mtime_123"): {
"format": "csv",
"postprocessing": ["scrapy.extensions.postprocessing.GzipPlugin"],
"gzip_mtime": 123,
"gzip_filename": "",
},
self._named_tempfile("mtime_123456789"): {
"format": "csv",
"postprocessing": ["scrapy.extensions.postprocessing.GzipPlugin"],
"gzip_mtime": 123456789,
"gzip_filename": "",
},
},
}
data = await self.exported_data(self.items, settings)
for filename, compressed in filename_to_compressed.items():
result = gzip.decompress(data[filename])
assert compressed == data[filename]
assert result == self.expected
@deferred_f_from_coro_f
async def test_gzip_plugin_filename(self):
filename_to_compressed = {
self._named_tempfile("filename_FILE1"): self.get_gzip_compressed(
self.expected, filename="FILE1"
),
self._named_tempfile("filename_FILE2"): self.get_gzip_compressed(
self.expected, filename="FILE2"
),
}
settings = {
"FEEDS": {
self._named_tempfile("filename_FILE1"): {
"format": "csv",
"postprocessing": ["scrapy.extensions.postprocessing.GzipPlugin"],
"gzip_mtime": 0,
"gzip_filename": "FILE1",
},
self._named_tempfile("filename_FILE2"): {
"format": "csv",
"postprocessing": ["scrapy.extensions.postprocessing.GzipPlugin"],
"gzip_mtime": 0,
"gzip_filename": "FILE2",
},
},
}
data = await self.exported_data(self.items, settings)
for filename, compressed in filename_to_compressed.items():
result = gzip.decompress(data[filename])
assert compressed == data[filename]
assert result == self.expected
@deferred_f_from_coro_f
async def test_lzma_plugin(self):
filename = self._named_tempfile("lzma_file")
settings = {
"FEEDS": {
filename: {
"format": "csv",
"postprocessing": ["scrapy.extensions.postprocessing.LZMAPlugin"],
},
},
}
data = await self.exported_data(self.items, settings)
try:
lzma.decompress(data[filename])
except lzma.LZMAError:
pytest.fail("Received invalid lzma data.")
@deferred_f_from_coro_f
async def test_lzma_plugin_format(self):
filename_to_compressed = {
self._named_tempfile("format_FORMAT_XZ"): lzma.compress(
self.expected, format=lzma.FORMAT_XZ
),
self._named_tempfile("format_FORMAT_ALONE"): lzma.compress(
self.expected, format=lzma.FORMAT_ALONE
),
}
settings = {
"FEEDS": {
self._named_tempfile("format_FORMAT_XZ"): {
"format": "csv",
"postprocessing": ["scrapy.extensions.postprocessing.LZMAPlugin"],
"lzma_format": lzma.FORMAT_XZ,
},
self._named_tempfile("format_FORMAT_ALONE"): {
"format": "csv",
"postprocessing": ["scrapy.extensions.postprocessing.LZMAPlugin"],
"lzma_format": lzma.FORMAT_ALONE,
},
},
}
data = await self.exported_data(self.items, settings)
for filename, compressed in filename_to_compressed.items():
result = lzma.decompress(data[filename])
assert compressed == data[filename]
assert result == self.expected
@deferred_f_from_coro_f
async def test_lzma_plugin_check(self):
filename_to_compressed = {
self._named_tempfile("check_CHECK_NONE"): lzma.compress(
self.expected, check=lzma.CHECK_NONE
),
self._named_tempfile("check_CHECK_CRC256"): lzma.compress(
self.expected, check=lzma.CHECK_SHA256
),
}
settings = {
"FEEDS": {
self._named_tempfile("check_CHECK_NONE"): {
"format": "csv",
"postprocessing": ["scrapy.extensions.postprocessing.LZMAPlugin"],
"lzma_check": lzma.CHECK_NONE,
},
self._named_tempfile("check_CHECK_CRC256"): {
"format": "csv",
"postprocessing": ["scrapy.extensions.postprocessing.LZMAPlugin"],
"lzma_check": lzma.CHECK_SHA256,
},
},
}
data = await self.exported_data(self.items, settings)
for filename, compressed in filename_to_compressed.items():
result = lzma.decompress(data[filename])
assert compressed == data[filename]
assert result == self.expected
@deferred_f_from_coro_f
async def test_lzma_plugin_preset(self):
filename_to_compressed = {
self._named_tempfile("preset_PRESET_0"): lzma.compress(
self.expected, preset=0
),
self._named_tempfile("preset_PRESET_9"): lzma.compress(
self.expected, preset=9
),
}
settings = {
"FEEDS": {
self._named_tempfile("preset_PRESET_0"): {
"format": "csv",
"postprocessing": ["scrapy.extensions.postprocessing.LZMAPlugin"],
"lzma_preset": 0,
},
self._named_tempfile("preset_PRESET_9"): {
"format": "csv",
"postprocessing": ["scrapy.extensions.postprocessing.LZMAPlugin"],
"lzma_preset": 9,
},
},
}
data = await self.exported_data(self.items, settings)
for filename, compressed in filename_to_compressed.items():
result = lzma.decompress(data[filename])
assert compressed == data[filename]
assert result == self.expected
@deferred_f_from_coro_f
async def test_lzma_plugin_filters(self):
if "PyPy" in sys.version:
# https://foss.heptapod.net/pypy/pypy/-/issues/3527
pytest.skip("lzma filters doesn't work in PyPy")
filters = [{"id": lzma.FILTER_LZMA2}]
compressed = lzma.compress(self.expected, filters=filters)
filename = self._named_tempfile("filters")
settings = {
"FEEDS": {
filename: {
"format": "csv",
"postprocessing": ["scrapy.extensions.postprocessing.LZMAPlugin"],
"lzma_filters": filters,
},
},
}
data = await self.exported_data(self.items, settings)
assert compressed == data[filename]
result = lzma.decompress(data[filename])
assert result == self.expected
@deferred_f_from_coro_f
async def test_bz2_plugin(self):
filename = self._named_tempfile("bz2_file")
settings = {
"FEEDS": {
filename: {
"format": "csv",
"postprocessing": ["scrapy.extensions.postprocessing.Bz2Plugin"],
},
},
}
data = await self.exported_data(self.items, settings)
try:
bz2.decompress(data[filename])
except OSError:
pytest.fail("Received invalid bz2 data.")
@deferred_f_from_coro_f
async def test_bz2_plugin_compresslevel(self):
filename_to_compressed = {
self._named_tempfile("compresslevel_1"): bz2.compress(
self.expected, compresslevel=1
),
self._named_tempfile("compresslevel_9"): bz2.compress(
self.expected, compresslevel=9
),
}
settings = {
"FEEDS": {
self._named_tempfile("compresslevel_1"): {
"format": "csv",
"postprocessing": ["scrapy.extensions.postprocessing.Bz2Plugin"],
"bz2_compresslevel": 1,
},
self._named_tempfile("compresslevel_9"): {
"format": "csv",
"postprocessing": ["scrapy.extensions.postprocessing.Bz2Plugin"],
"bz2_compresslevel": 9,
},
},
}
data = await self.exported_data(self.items, settings)
for filename, compressed in filename_to_compressed.items():
result = bz2.decompress(data[filename])
assert compressed == data[filename]
assert result == self.expected
@deferred_f_from_coro_f
async def test_custom_plugin(self):
filename = self._named_tempfile("csv_file")
settings = {
"FEEDS": {
filename: {
"format": "csv",
"postprocessing": [self.MyPlugin1],
},
},
}
data = await self.exported_data(self.items, settings)
assert data[filename] == self.expected
@deferred_f_from_coro_f
async def test_custom_plugin_with_parameter(self):
expected = b"foo\r\n\nbar\r\n\n"
filename = self._named_tempfile("newline")
settings = {
"FEEDS": {
filename: {
"format": "csv",
"postprocessing": [self.MyPlugin1],
"plugin1_char": b"\n",
},
},
}
data = await self.exported_data(self.items, settings)
assert data[filename] == expected
@deferred_f_from_coro_f
async def test_custom_plugin_with_compression(self):
expected = b"foo\r\n\nbar\r\n\n"
filename_to_decompressor = {
self._named_tempfile("bz2"): bz2.decompress,
self._named_tempfile("lzma"): lzma.decompress,
self._named_tempfile("gzip"): gzip.decompress,
}
settings = {
"FEEDS": {
self._named_tempfile("bz2"): {
"format": "csv",
"postprocessing": [
self.MyPlugin1,
"scrapy.extensions.postprocessing.Bz2Plugin",
],
"plugin1_char": b"\n",
},
self._named_tempfile("lzma"): {
"format": "csv",
"postprocessing": [
self.MyPlugin1,
"scrapy.extensions.postprocessing.LZMAPlugin",
],
"plugin1_char": b"\n",
},
self._named_tempfile("gzip"): {
"format": "csv",
"postprocessing": [
self.MyPlugin1,
"scrapy.extensions.postprocessing.GzipPlugin",
],
"plugin1_char": b"\n",
},
},
}
data = await self.exported_data(self.items, settings)
for filename, decompressor in filename_to_decompressor.items():
result = decompressor(data[filename])
assert result == expected
@deferred_f_from_coro_f
async def test_exports_compatibility_with_postproc(self):
filename_to_expected = {
self._named_tempfile("csv"): b"foo\r\nbar\r\n",
self._named_tempfile("json"): b'[\n{"foo": "bar"}\n]',
self._named_tempfile("jsonlines"): b'{"foo": "bar"}\n',
self._named_tempfile("xml"): b'<?xml version="1.0" encoding="utf-8"?>\n'
b"<items>\n<item><foo>bar</foo></item>\n</items>",
}
settings = {
"FEEDS": {
self._named_tempfile("csv"): {
"format": "csv",
"postprocessing": [self.MyPlugin1],
# empty plugin to activate postprocessing.PostProcessingManager
},
self._named_tempfile("json"): {
"format": "json",
"postprocessing": [self.MyPlugin1],
},
self._named_tempfile("jsonlines"): {
"format": "jsonlines",
"postprocessing": [self.MyPlugin1],
},
self._named_tempfile("xml"): {
"format": "xml",
"postprocessing": [self.MyPlugin1],
},
self._named_tempfile("marshal"): {
"format": "marshal",
"postprocessing": [self.MyPlugin1],
},
self._named_tempfile("pickle"): {
"format": "pickle",
"postprocessing": [self.MyPlugin1],
},
},
}
data = await self.exported_data(self.items, settings)
for filename, result in data.items():
if "pickle" in filename:
expected, result = self.items[0], pickle.loads(result)
elif "marshal" in filename:
expected, result = self.items[0], marshal.loads(result)
else:
expected = filename_to_expected[filename]
assert result == expected
| TestFeedPostProcessedExports |
python | scikit-learn__scikit-learn | sklearn/utils/deprecation.py | {
"start": 171,
"end": 3531
} | class ____:
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
Examples
--------
>>> from sklearn.utils import deprecated
>>> deprecated()
<sklearn.utils.deprecation.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
Parameters
----------
extra : str, default=''
To be added to the deprecation messages.
"""
# Adapted from https://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=""):
self.extra = extra
def __call__(self, obj):
"""Call method
Parameters
----------
obj : object
"""
if isinstance(obj, type):
return self._decorate_class(obj)
elif isinstance(obj, property):
# Note that this is only triggered properly if the `deprecated`
# decorator is placed before the `property` decorator, like so:
#
# @deprecated(msg)
# @property
# def deprecated_attribute_(self):
# ...
return self._decorate_property(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
new = cls.__new__
sig = signature(cls)
def wrapped(cls, *args, **kwargs):
warnings.warn(msg, category=FutureWarning)
if new is object.__new__:
return object.__new__(cls)
return new(cls, *args, **kwargs)
cls.__new__ = wrapped
wrapped.__name__ = "__new__"
wrapped.deprecated_original = new
# Restore the original signature, see PEP 362.
cls.__signature__ = sig
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
@functools.wraps(fun)
def wrapped(*args, **kwargs):
warnings.warn(msg, category=FutureWarning)
return fun(*args, **kwargs)
# Add a reference to the wrapped function so that we can introspect
# on function arguments in Python 2 (already works in Python 3)
wrapped.__wrapped__ = fun
return wrapped
def _decorate_property(self, prop):
msg = self.extra
@property
@functools.wraps(prop.fget)
def wrapped(*args, **kwargs):
warnings.warn(msg, category=FutureWarning)
return prop.fget(*args, **kwargs)
return wrapped
def _is_deprecated(func):
"""Helper to check if func is wrapped by our deprecated decorator"""
closures = getattr(func, "__closure__", [])
if closures is None:
closures = []
is_deprecated = "deprecated" in "".join(
[c.cell_contents for c in closures if isinstance(c.cell_contents, str)]
)
return is_deprecated
| deprecated |
python | matplotlib__matplotlib | doc/sphinxext/redirect_from.py | {
"start": 2523,
"end": 4301
} | class ____(SphinxDirective):
required_arguments = 1
def run(self):
redirected_doc, = self.arguments
domain = self.env.get_domain('redirect_from')
current_doc = self.env.path2doc(self.state.document.current_source)
redirected_reldoc, _ = self.env.relfn2path(redirected_doc, current_doc)
if (
redirected_reldoc in domain.redirects
and domain.redirects[redirected_reldoc] != current_doc
):
raise ValueError(
f"{redirected_reldoc} is already noted as redirecting to "
f"{domain.redirects[redirected_reldoc]}\n"
f"Cannot also redirect it to {current_doc}"
)
domain.redirects[redirected_reldoc] = current_doc
return []
def _generate_redirects(app, exception):
builder = app.builder
if builder.name != "html" or exception:
return
for k, v in app.env.get_domain('redirect_from').redirects.items():
p = Path(app.outdir, k + builder.out_suffix)
html = HTML_TEMPLATE.format(v=builder.get_relative_uri(k, v))
if p.is_file():
if p.read_text() != html:
logger.warning('A redirect-from directive is trying to '
'create %s, but that file already exists '
'(perhaps you need to run "make clean")', p)
else:
logger.info('making refresh html file: %s redirect to %s', k, v)
p.parent.mkdir(parents=True, exist_ok=True)
p.write_text(html, encoding='utf-8')
def _clear_redirects(app):
domain = app.env.get_domain('redirect_from')
if domain.redirects:
logger.info('clearing cached redirects')
domain.redirects.clear()
| RedirectFrom |
python | rq__rq | rq/dependency.py | {
"start": 136,
"end": 1000
} | class ____:
@classmethod
def get_jobs_with_met_dependencies(cls, jobs: Iterable['Job'], pipeline: Pipeline):
jobs_with_met_dependencies = []
jobs_with_unmet_dependencies = []
for job in jobs:
while True:
try:
pipeline.watch(*[Job.key_for(dependency_id) for dependency_id in job._dependency_ids])
job.register_dependency(pipeline=pipeline)
if job.dependencies_are_met(pipeline=pipeline):
jobs_with_met_dependencies.append(job)
else:
jobs_with_unmet_dependencies.append(job)
pipeline.execute()
except WatchError:
continue
break
return jobs_with_met_dependencies, jobs_with_unmet_dependencies
| Dependency |
python | pytorch__pytorch | test/onnx/test_models_quantized_onnxruntime.py | {
"start": 988,
"end": 1600
} | class ____(nn.Module):
def __init__(self, base_model):
super().__init__()
self.base_model = base_model
def forward(self, x):
x = self.base_model(x)
_, topk_id = torch.topk(x[0], 1)
return topk_id
# TODO: All torchvision quantized model test can be written as single parameterized test case,
# after per-parameter test decoration is supported via #79979, or after they are all enabled,
# whichever is first.
@parameterized.parameterized_class(
("is_script",),
[(True,), (False,)],
class_name_func=onnx_test_common.parameterize_class_name,
)
| _TopPredictor |
python | fluentpython__example-code-2e | 23-descriptor/bulkfood/bulkfood_v3.py | {
"start": 780,
"end": 1279
} | class ____: # <1>
def __init__(self, storage_name):
self.storage_name = storage_name # <2>
def __set__(self, instance, value): # <3>
if value > 0:
instance.__dict__[self.storage_name] = value # <4>
else:
msg = f'{self.storage_name} must be > 0'
raise ValueError(msg)
def __get__(self, instance, owner): # <5>
return instance.__dict__[self.storage_name]
# end::LINEITEM_QUANTITY_V3[]
# tag::LINEITEM_V3[]
| Quantity |
python | joke2k__faker | faker/providers/job/cs_CZ/__init__.py | {
"start": 41,
"end": 15462
} | class ____(JobProvider):
"""Translated from Super class"""
jobs = (
"Administrátor, umění",
"Administrátor, státní služba",
"Advokát",
"Advokát pro ochranné známky",
"Akademický knihovník",
"Akupunkturista",
"Analytický chemik",
"Analytik finančního rizika",
"Angličtina jako lektorka cizího jazyka",
"Angličtina jako učitel druhého jazyka",
"Animátor",
"Arborista",
"Archeológ",
"Architekt",
"Architektonický technológ",
"Archivář",
"Arteterapeut",
"Asistent politika",
"Astronóm",
"Audiologický vědec",
"Automobilový inženýr",
"Autorizovaný likvidátor ztrát",
"Autorizovaný účetní",
"Autorizovaný účetní v oblasti veřejných financí",
"Bankéř",
"Báňský inženýr",
"Barista",
"Biochemik, klinický",
"Biomedicínsky inženýr",
"Biomedicínsky vědec",
"Bylinkář",
"Bytový manažér / referent",
"Charitatívní úředník",
"Chemický inženýr",
"Chemik, analytický",
"Chiropraktik",
"Chirurg",
"Copywriter, reklama",
"Cytogenetik",
"Daňový poradce",
"Dětská sestra",
"Dětský psychoterapeut",
"Diagnostický rádiograf",
"Dietológ",
"Dyzajnér, foukané sklo / vitráž",
"Dyzajnér, grafik",
"Dyzajnér, interiér / protor",
"Dyzajnér, keramika / hrčířství",
"Dyzajnér, multimédiá",
"Dyzajnér, móda / oblečení",
"Dyzajnér, nábytek",
"Dyzajnér, průmyslový / produkt",
"Dyzajnér, televíize / film",
"Dyzajnér, textil",
"Dyzajnér, výstava / výstava",
"Dyzajnér, šperky",
"Docent",
"Dodávateľ",
"Dospělý poradentský pracovník",
"Dozorce",
"Dramatický terapeut",
"Důstojník obchodního námořníctví",
"Důstojník pro ochranu přírody",
"Důstojník pro výcvik a vzdělávní ozbrojených síl",
"Editor funkcie časopisu",
"Ekológ",
"Ekonom",
"Elektroinženýr",
"Embryológ, klinický",
"Energetický inženýr",
"Energetický manažér",
"Environmentálny manažér",
"Ergonóm",
"Barevný technológ",
"Farmaceut Spoločenstva",
"Farmakológ",
"Filmový / video editor",
"Finanční kontrolor",
"Finanční manažér",
"Finanční obchodník",
"Finanční plánovač",
"Finanční poradce",
"Finanční ředitel",
"Firemní sekretářka",
"Fotograf",
"Fytoterapeut",
"Fyzik zdraví",
"Fyzik, lékař",
"Fyziologický vědec",
"Fyziológ cvičení",
"Fyzioterapeut",
"Foukač akla / dyzajnér",
"Genetik, molekulárny",
"Geochemik",
"Geodet minerálu",
"Geodet pojištění rizika",
"Geofyzik / terénní seismológ",
"Geológ, strojař",
"Geológ",
"Geovedec",
"Grafický dyzajnér",
"Grafik",
"Hasič",
"Hematológ",
"Herec",
"Herpetológ",
"Hlavní marketingový ředitel",
"Homeopat",
"Hotelový manažér",
"Hudebník",
"Hudební lektor",
"Hudební terapeut",
"Hutník",
"Hydrogeológ",
"Hydrografický geodet",
"Hydrológ",
"Hygienik práce",
"IT konzultant",
"Ilustrátor",
"Imunológ",
"Informační úředník",
"Investiční analytik",
"Investiční bankář, funkční",
"Investiční bankář, podnikový",
"Inspektor / hodnotitel reklamací",
"Inspektor historických budov / referent památkové ochrany",
"Inspektor plánovaní a rozvoje",
"Inspektor zdraví a bezpečnosti",
"Inženýr budov",
"Inženýr elektroniky",
"Inženyr kontroly a přístrojového vybavení",
"Inženýr zemědělství",
"Inženýr pro automobilový průmysl",
"Inženýr výrobních systémovů",
"Inženýr, bankovnictví",
"Inženýr, biomedicíny",
"Inženýr, chemický",
"Inženýr, elektronika",
"Inženýr, elektrotechnik",
"Inženýr, energie",
"Inženýr, komunikace",
"Inženýr, letecký",
"Inženýr, materiály",
"Inženýr, pozemky",
"Inženýr, zemědělství",
"Inženýr, řízení a přístrojové vybavení",
"Inženýr, ropa",
"Inženýr, statik",
"Inženýr, stavební služby",
"Inženýr, stavební (smluvní)",
"Inženýr, stavební inženier (poradenství)",
"Inženýr, technický prodej",
"Inženýr, voda",
"Inženýr, vysílání (provoz)",
"Inženýr, výroba",
"Inženýr, výroba",
"Inženýr, výrobní systémy",
"Inženýr, vrtaní",
"Inženýr, web",
"Inženýr, údržba",
"Inženýr, údržba (IT)",
"Inženýrský geológ",
"Kameraman",
"Kariérní informační úředník",
"Kariérní poradce",
"Kariérní poradce pro vysokoškolské vzdělání",
"Kartograf",
"Klinický biochemik",
"Klinický cytogenetik",
"Klinický embryológ",
"Klinický molekulárny genetik",
"Klinický psychológ",
"Klinický vědec, histokompatibilita a imunogenetika",
"Knihovník",
"Knihovník, veřejný",
"Kníhkupec",
"Komerční / rezidenční geodet",
"Komerční záhradník",
"Komunikační inženýr",
"Komunitní umělecký pracovník",
"Jednatel spoločnosti",
"Kontrolór",
"Konzervátor / restauratér nábytku",
"Konzervátor muzea / galérie",
"Konzervátor, muzeum / galéria",
"Konzervátor, nábytku",
"Konzultant pro důchody",
"Konzultace se stavebním inženýrem",
"Koordinátor dobrovolnictví",
"Kupující, maloobchod",
"Kurátor",
"Kurátor muzea / galérie",
"Lektor dalšího vzdělávání",
"Lektor, vysokoškolské vzdělání",
"Lektor, další vzdělání",
"Lékař všeobecného lekařství",
"Lékař, nemocnice",
"Lékař, všeobecná praxe",
"Lékárnik, komunita",
"Lékárnik, nemocnice",
"Lékářsky fyzik",
"Lékářsky ilustrátor",
"Lékářsky obchodní zástupca",
"Lékářsky sekretář",
"Lékářsky technický pracovník",
"Letecký dispečer",
"Letecký inženýr",
"Letecký sprostředkovateľ",
"Lexikograf",
"Licencovaný dopravce",
"Lobista",
"Logistika / podpora / administratívní důstojník ozbrojených síl",
"Manažér call centra",
"Manažér cestovní kanceláře",
"Manažér divadelní scény",
"Manažér farmy",
"Manažér fitnescentra",
"Manažér informačných systému",
"Manažér komerční umělecké galérie",
"Manažér logistiky a distribuce",
"Manažér stravování",
"Manažér umělecké galérie",
"Manažér zařízení",
"Manažér zábavného parku",
"Manžérsky konzultant",
"Marketingový manažér",
"Materiálový inženýr",
"Mediální plánovač",
"Meteorológ",
"Mikrobiológ",
"Moderátor, vysílání",
"Mořský vědec",
"Multimediální programy",
"Módní návrhář",
"Nemocniční lékař",
"Nemocniční lekárniík",
"Neurochirurg",
"Novinář novín",
"Novinář časopisu",
"Novinář, noviny",
"Novinář, vysílání",
"Novinář, časopis",
"Nákupčí médií",
"Nákupčí, průmyslu",
"Námořní architekt",
"Návrhář interiérů a prostor",
"Návrhář nábytku",
"Návrhář výstavy",
"Návrhář šperkov",
"Návrhářka keramiky",
"Obchodník s akciemi",
"Obchodník s dluhopisy",
"Obchodník s futures",
"Oceánograf",
"Ochranář, historické budovy",
"Odborník na životní prostředí",
"Oděvní / textilní technológ",
"Onkológ",
"Operatívní výzkumník",
"Operační důstojních diplomatických služeb",
"Operačn důstojník ozbrojených síl",
"Optik, výdej",
"Optometrista",
"Ortoptista",
"Osobní asistent",
"Osteopat",
"Oční lékař",
"Palubní průvodce",
"Patent attorney",
"Patológ",
"Pedagogický psychológ",
"Pedikér",
"Personalista",
"Pilot letecké společnosti",
"Plánovač dopravy",
"Plánovač reklamního účtu",
"Plánovač tisku",
"Podnikový investiční bankéř",
"Podnikový pokladník",
"Pojistný matematik",
"Pojišťovací makléř",
"Pojišťovák",
"Police officer",
"Poradce pro zdraví a bezpečnosť",
"Poradce pro životní prostředí",
"Poradenský pracovník",
"Poradenský psychológ",
"Potravinářsky technológ",
"Zemědělský konzultant",
"Pracovník medzinárodní pomoci / rozvoje",
"Pracovník pomoci",
"Pracovník rozvoje komunity",
"Pracovník s mládeží",
"Pracovní psychológ",
"Pracovní terapeut",
"Predejce",
"Překladateľ",
"Prevozovatel televizní kamery",
"Provozní geológ",
"Provozní investiční bankéř",
"Provozní ředitel",
"Průmyslový / produktový dizajnér",
"Průmyslový kupující",
"Průzkumník trhu",
"Probační úředník",
"Producent, rádio",
"Producent, televize / film / video",
"Production assistant, radio",
"Production assistant, televize",
"Production designer, theatre/television/film",
"Production engineer",
"Production manager",
"Produktový dizajnér",
"Produktový manažér",
"Professor Emeritus",
"Programátor, applikací",
"Programátor, multimedia",
"Programátor, systems",
"Korektor",
"Právnik",
"Právní tajemník",
"Psychiatrická sestra",
"Psychitr",
"Psycholog, klinický",
"Psycholog, poradenství",
"Psycholog, vzdělání",
"Psycholog, forézní",
"Psycholog, pracovní",
"Psycholog, vězeňské a probační služby",
"Psycholog, sport a cvičení",
"Psychoterapeut tanečního pohybu",
"Psychoterapeut",
"Porodní asistentka",
"Manažér kvality",
"Poradce",
"Realitní makléř",
"Redaktor, uvedení do provozu",
"Redakční asistent",
"Referent cestovního ruchu",
"Referent environmentální výchovy",
"Referent geografických informačných systému",
"Referent komunitního vzdělávání",
"Referent múzejního vzdělávání",
"Referent obchodních norem",
"Referent ochrany přírody",
"Referent odborné přípravy a rozvoje",
"Referent odborového výzkumu",
"Referent zemědělských pokusu",
"Referent pro nouzové plánování / řízení",
"Referent pro výstavy v muzeich / galeriich",
"Referent rozvoje umění",
"Referent technické podpory IT",
"Referent výstavy, muzeum / galérii",
"Referent lidských zdrojů",
"Školní referent, komunita",
"Školení referent, muzeum",
"Regulátor ztrát, objednaný",
"Reklamní textař",
"Reklamní umělecký ředitel",
"Ředitel pro stretegii",
"Ropný inženýr",
"Rozvojový pracovník, komunita",
"Rozvojový pracovník, mezinárodní pomoc",
"Sanitka",
"Sestra pro dospělé",
"Sestra pro duševní zdraví",
"Sestra s poruchami učení",
"Sestra, dětská",
"Sestra, dospělý",
"Sestra, porucha učení",
"Sietový inženýr",
"Spisovateľ",
"Spolupracovník pro klinický výzkum",
"Spracovatel geofyzikálnych údajů",
"Spravodajský analytik",
"Správce",
"Správce databázy",
"Správce dědictví",
"Správce duchodového systému",
"Správce lesů",
"Správce nemovitostí / pozemkový agent",
"Správce pojisných účtu",
"Správce polohy",
"Správce zpracovaní údajů",
"Správce umění",
"Správce zákazníckého centra",
"Správce školní",
"Správce státní služby",
"Správce, charitatívní / dobrovolnické organizáce",
"Správce, místní samospráva",
"Správce, vzdělávání",
"Správce, sport",
"Stavební geodet",
"Stavební inženýr, poradenství",
"Stavební inženýr, uzavírání smluv",
"Střihač, film / video",
"Strojní inženýr",
"Strážce",
"Osvětlovací technik, vysílání / film / video",
"Soudce psychológ",
"Soudní vědec",
"Soukromý učitel hudby",
"Tanečnice",
"Technický důstojník ozbrojených síl",
"Technik údržby",
"Technológ pro zvířata",
"Technológ vaření piva",
"Terapeut, drama",
"Terapeut, hudba",
"Terapeut, záhradnícký",
"Terapeut, sport",
"Terénní seismológ",
"Tlumočník",
"Toxikológ",
"Umělec",
"Učicí se mentor",
"Učitel, angličtina jako cizí jazyk",
"Učitel, hudba",
"Učitel, vzdělání dospělých",
"Učitel, základní škola",
"Učitel na základní škole",
"Vědec pro kvalitu vody",
"Vědec vývoj produktů / procesů",
"Vědecký pracovník lékařské laboratoře",
"Vedoucí kanceláře",
"Vedoucí konferenčního centra",
"Vedoucí osobní dopravy",
"Vedoucí outdoorových aktivít / vzdělávání",
"Vedoucí reklamního účtu",
"Vedoucí restaurace rychlého občerstvení",
"Vedoucí rybí farmy",
"Vedoucí skladu",
"Vedoucí střediska volného času",
"Vedoucí turistického informačného centra",
"Vedoucí ubytování",
"Vedoucí zdravotní služby",
"Vedoucí úseku",
"Veterinární chirurg",
"Video editor",
"Vizuální obchodník",
"Vládní úředník pro sociální výzkum",
"Vodní inženýr",
"Vrtný inženýr",
"Zprostředkovatel pojistných událostí",
"Vysokoškolský lektor",
"Výkonný ředitel",
"Výkonný technický ředitel",
"Výrobní inženýr",
"Výtvarný umělec",
"Vývojář aplikací",
"Vývojář her",
"Vývojář počítačových her",
"Vývojář systémů",
"Výživový poradca pro zvířata",
"Výživový terapeut",
"Web designer",
"Zaměstnanec imigračního úřadu",
"Zdravotní sestra, duševní zdraví",
"Zeměměřič / geomatik",
"Zmluvní stavební inženýr",
"Zubař",
"Záchranář",
"Záhradnícký konzultant",
"Záhradnícký terapeut",
"Záhradník, komerční",
"Záhradní architekt",
"Úředník místní samosprávy",
"Úřadník pro rybolov",
"Účetní, autorizované veřejné finance",
"Účetní, autorizovaný",
"Účetní, autorizovaný / certifikovaný",
"Účetní technik",
"Specialista na multimédiá",
"Specialista na podporu zdraví",
"Dopravce",
"Šlechtitel rostlin / genetik",
)
def job(self) -> str:
return self.random_element(self.jobs)
| Provider |
python | mwaskom__seaborn | tests/test_categorical.py | {
"start": 26809,
"end": 40413
} | class ____(SharedAxesLevelTests, SharedPatchArtistTests):
func = staticmethod(boxplot)
@pytest.fixture
def common_kws(self):
return {"saturation": 1}
def get_last_color(self, ax):
colors = [b.get_facecolor() for b in ax.containers[-1].boxes]
unique_colors = np.unique(colors, axis=0)
assert len(unique_colors) == 1
return to_rgba(unique_colors.squeeze())
def get_box_verts(self, box):
path = box.get_path()
visible_codes = [mpl.path.Path.MOVETO, mpl.path.Path.LINETO]
visible = np.isin(path.codes, visible_codes)
return path.vertices[visible].T
def check_box(self, bxp, data, orient, pos, width=0.8):
pos_idx, val_idx = self.orient_indices(orient)
p25, p50, p75 = np.percentile(data, [25, 50, 75])
box = self.get_box_verts(bxp.box)
assert box[val_idx].min() == approx(p25, 1e-3)
assert box[val_idx].max() == approx(p75, 1e-3)
assert box[pos_idx].min() == approx(pos - width / 2)
assert box[pos_idx].max() == approx(pos + width / 2)
med = bxp.median.get_xydata().T
assert np.allclose(med[val_idx], (p50, p50), rtol=1e-3)
assert np.allclose(med[pos_idx], (pos - width / 2, pos + width / 2))
def check_whiskers(self, bxp, data, orient, pos, capsize=0.4, whis=1.5):
pos_idx, val_idx = self.orient_indices(orient)
whis_lo = bxp.whiskers[0].get_xydata().T
whis_hi = bxp.whiskers[1].get_xydata().T
caps_lo = bxp.caps[0].get_xydata().T
caps_hi = bxp.caps[1].get_xydata().T
fliers = bxp.fliers.get_xydata().T
p25, p75 = np.percentile(data, [25, 75])
iqr = p75 - p25
adj_lo = data[data >= (p25 - iqr * whis)].min()
adj_hi = data[data <= (p75 + iqr * whis)].max()
assert whis_lo[val_idx].max() == approx(p25, 1e-3)
assert whis_lo[val_idx].min() == approx(adj_lo)
assert np.allclose(whis_lo[pos_idx], (pos, pos))
assert np.allclose(caps_lo[val_idx], (adj_lo, adj_lo))
assert np.allclose(caps_lo[pos_idx], (pos - capsize / 2, pos + capsize / 2))
assert whis_hi[val_idx].min() == approx(p75, 1e-3)
assert whis_hi[val_idx].max() == approx(adj_hi)
assert np.allclose(whis_hi[pos_idx], (pos, pos))
assert np.allclose(caps_hi[val_idx], (adj_hi, adj_hi))
assert np.allclose(caps_hi[pos_idx], (pos - capsize / 2, pos + capsize / 2))
flier_data = data[(data < adj_lo) | (data > adj_hi)]
assert sorted(fliers[val_idx]) == sorted(flier_data)
assert np.allclose(fliers[pos_idx], pos)
@pytest.mark.parametrize("orient,col", [("x", "y"), ("y", "z")])
def test_single_var(self, long_df, orient, col):
var = {"x": "y", "y": "x"}[orient]
ax = boxplot(long_df, **{var: col})
bxp = ax.containers[0][0]
self.check_box(bxp, long_df[col], orient, 0)
self.check_whiskers(bxp, long_df[col], orient, 0)
@pytest.mark.parametrize("orient,col", [(None, "x"), ("x", "y"), ("y", "z")])
def test_vector_data(self, long_df, orient, col):
ax = boxplot(long_df[col], orient=orient)
orient = "x" if orient is None else orient
bxp = ax.containers[0][0]
self.check_box(bxp, long_df[col], orient, 0)
self.check_whiskers(bxp, long_df[col], orient, 0)
@pytest.mark.parametrize("orient", ["h", "v"])
def test_wide_data(self, wide_df, orient):
orient = {"h": "y", "v": "x"}[orient]
ax = boxplot(wide_df, orient=orient, color="C0")
for i, bxp in enumerate(ax.containers):
col = wide_df.columns[i]
self.check_box(bxp[i], wide_df[col], orient, i)
self.check_whiskers(bxp[i], wide_df[col], orient, i)
@pytest.mark.parametrize("orient", ["x", "y"])
def test_grouped(self, long_df, orient):
value = {"x": "y", "y": "x"}[orient]
ax = boxplot(long_df, **{orient: "a", value: "z"})
bxp, = ax.containers
levels = categorical_order(long_df["a"])
for i, level in enumerate(levels):
data = long_df.loc[long_df["a"] == level, "z"]
self.check_box(bxp[i], data, orient, i)
self.check_whiskers(bxp[i], data, orient, i)
@pytest.mark.parametrize("orient", ["x", "y"])
def test_hue_grouped(self, long_df, orient):
value = {"x": "y", "y": "x"}[orient]
ax = boxplot(long_df, hue="c", **{orient: "a", value: "z"})
for i, hue_level in enumerate(categorical_order(long_df["c"])):
bxp = ax.containers[i]
for j, level in enumerate(categorical_order(long_df["a"])):
rows = (long_df["a"] == level) & (long_df["c"] == hue_level)
data = long_df.loc[rows, "z"]
pos = j + [-.2, +.2][i]
width, capsize = 0.4, 0.2
self.check_box(bxp[j], data, orient, pos, width)
self.check_whiskers(bxp[j], data, orient, pos, capsize)
def test_hue_not_dodged(self, long_df):
levels = categorical_order(long_df["b"])
hue = long_df["b"].isin(levels[:2])
ax = boxplot(long_df, x="b", y="z", hue=hue)
bxps = ax.containers
for i, level in enumerate(levels):
idx = int(i < 2)
data = long_df.loc[long_df["b"] == level, "z"]
self.check_box(bxps[idx][i % 2], data, "x", i)
self.check_whiskers(bxps[idx][i % 2], data, "x", i)
def test_dodge_native_scale(self, long_df):
centers = categorical_order(long_df["s"])
hue_levels = categorical_order(long_df["c"])
spacing = min(np.diff(centers))
width = 0.8 * spacing / len(hue_levels)
offset = width / len(hue_levels)
ax = boxplot(long_df, x="s", y="z", hue="c", native_scale=True)
for i, hue_level in enumerate(hue_levels):
bxp = ax.containers[i]
for j, center in enumerate(centers):
rows = (long_df["s"] == center) & (long_df["c"] == hue_level)
data = long_df.loc[rows, "z"]
pos = center + [-offset, +offset][i]
self.check_box(bxp[j], data, "x", pos, width)
self.check_whiskers(bxp[j], data, "x", pos, width / 2)
def test_dodge_native_scale_log(self, long_df):
pos = 10 ** long_df["s"]
ax = mpl.figure.Figure().subplots()
ax.set_xscale("log")
boxplot(long_df, x=pos, y="z", hue="c", native_scale=True, ax=ax)
widths = []
for bxp in ax.containers:
for box in bxp.boxes:
coords = np.log10(box.get_path().vertices.T[0])
widths.append(np.ptp(coords))
assert np.std(widths) == approx(0)
def test_dodge_without_hue(self, long_df):
ax = boxplot(long_df, x="a", y="y", dodge=True)
bxp, = ax.containers
levels = categorical_order(long_df["a"])
for i, level in enumerate(levels):
data = long_df.loc[long_df["a"] == level, "y"]
self.check_box(bxp[i], data, "x", i)
self.check_whiskers(bxp[i], data, "x", i)
@pytest.mark.parametrize("orient", ["x", "y"])
def test_log_data_scale(self, long_df, orient):
var = {"x": "y", "y": "x"}[orient]
s = long_df["z"]
ax = mpl.figure.Figure().subplots()
getattr(ax, f"set_{var}scale")("log")
boxplot(**{var: s}, whis=np.inf, ax=ax)
bxp = ax.containers[0][0]
self.check_box(bxp, s, orient, 0)
self.check_whiskers(bxp, s, orient, 0, whis=np.inf)
def test_color(self, long_df):
color = "#123456"
ax = boxplot(long_df, x="a", y="y", color=color, saturation=1)
for box in ax.containers[0].boxes:
assert same_color(box.get_facecolor(), color)
def test_wide_data_multicolored(self, wide_df):
ax = boxplot(wide_df)
assert len(ax.containers) == wide_df.shape[1]
def test_wide_data_single_color(self, wide_df):
ax = boxplot(wide_df, color="C1", saturation=1)
assert len(ax.containers) == 1
for box in ax.containers[0].boxes:
assert same_color(box.get_facecolor(), "C1")
def test_hue_colors(self, long_df):
ax = boxplot(long_df, x="a", y="y", hue="b", saturation=1)
for i, bxp in enumerate(ax.containers):
for box in bxp.boxes:
assert same_color(box.get_facecolor(), f"C{i}")
def test_linecolor(self, long_df):
color = "#778815"
ax = boxplot(long_df, x="a", y="y", linecolor=color)
bxp = ax.containers[0]
for line in [*bxp.medians, *bxp.whiskers, *bxp.caps]:
assert same_color(line.get_color(), color)
for box in bxp.boxes:
assert same_color(box.get_edgecolor(), color)
for flier in bxp.fliers:
assert same_color(flier.get_markeredgecolor(), color)
def test_linecolor_gray_warning(self, long_df):
with pytest.warns(FutureWarning, match="Use \"auto\" to set automatic"):
boxplot(long_df, x="y", linecolor="gray")
def test_saturation(self, long_df):
color = "#8912b0"
ax = boxplot(long_df["x"], color=color, saturation=.5)
box = ax.containers[0].boxes[0]
assert np.allclose(box.get_facecolor()[:3], desaturate(color, 0.5))
def test_linewidth(self, long_df):
width = 5
ax = boxplot(long_df, x="a", y="y", linewidth=width)
bxp = ax.containers[0]
for line in [*bxp.boxes, *bxp.medians, *bxp.whiskers, *bxp.caps]:
assert line.get_linewidth() == width
def test_fill(self, long_df):
color = "#459900"
ax = boxplot(x=long_df["z"], fill=False, color=color)
bxp = ax.containers[0]
assert isinstance(bxp.boxes[0], mpl.lines.Line2D)
for line in [*bxp.boxes, *bxp.medians, *bxp.whiskers, *bxp.caps]:
assert same_color(line.get_color(), color)
@pytest.mark.parametrize("notch_param", ["notch", "shownotches"])
def test_notch(self, long_df, notch_param):
ax = boxplot(x=long_df["z"], **{notch_param: True})
verts = ax.containers[0].boxes[0].get_path().vertices
assert len(verts) == 12
def test_whis(self, long_df):
data = long_df["z"]
ax = boxplot(x=data, whis=2)
bxp = ax.containers[0][0]
self.check_whiskers(bxp, data, "y", 0, whis=2)
def test_gap(self, long_df):
ax = boxplot(long_df, x="a", y="z", hue="c", gap=.1)
for i, hue_level in enumerate(categorical_order(long_df["c"])):
bxp = ax.containers[i]
for j, level in enumerate(categorical_order(long_df["a"])):
rows = (long_df["a"] == level) & (long_df["c"] == hue_level)
data = long_df.loc[rows, "z"]
pos = j + [-.2, +.2][i]
width = 0.9 * 0.4
self.check_box(bxp[j], data, "x", pos, width)
def test_prop_dicts(self, long_df):
prop_dicts = dict(
boxprops=dict(linewidth=3),
medianprops=dict(color=".1"),
whiskerprops=dict(linestyle="--"),
capprops=dict(solid_capstyle="butt"),
flierprops=dict(marker="s"),
)
attr_map = dict(box="boxes", flier="fliers")
ax = boxplot(long_df, x="a", y="z", hue="c", **prop_dicts)
for bxp in ax.containers:
for element in ["box", "median", "whisker", "cap", "flier"]:
attr = attr_map.get(element, f"{element}s")
for artist in getattr(bxp, attr):
for k, v in prop_dicts[f"{element}props"].items():
assert plt.getp(artist, k) == v
def test_showfliers(self, long_df):
ax = boxplot(long_df["x"], showfliers=False)
assert not ax.containers[0].fliers
@pytest.mark.parametrize(
"kwargs",
[
dict(data="wide"),
dict(data="wide", orient="h"),
dict(data="flat"),
dict(data="long", x="a", y="y"),
dict(data=None, x="a", y="y"),
dict(data="long", x="a", y="y", hue="a"),
dict(data=None, x="a", y="y", hue="a"),
dict(data="long", x="a", y="y", hue="b"),
dict(data=None, x="s", y="y", hue="a"),
dict(data="long", x="a", y="y", hue="s"),
dict(data="null", x="a", y="y", hue="a"),
dict(data="long", x="s", y="y", hue="a", native_scale=True),
dict(data="long", x="d", y="y", hue="a", native_scale=True),
dict(data="null", x="a", y="y", hue="b", fill=False, gap=.2),
dict(data="null", x="a", y="y", whis=1, showfliers=False),
dict(data="null", x="a", y="y", linecolor="r", linewidth=5),
dict(data="null", x="a", y="y", shownotches=True, showcaps=False),
]
)
def test_vs_catplot(self, long_df, wide_df, null_df, flat_series, kwargs):
if kwargs["data"] == "long":
kwargs["data"] = long_df
elif kwargs["data"] == "wide":
kwargs["data"] = wide_df
elif kwargs["data"] == "flat":
kwargs["data"] = flat_series
elif kwargs["data"] == "null":
kwargs["data"] = null_df
elif kwargs["data"] is None:
for var in ["x", "y", "hue"]:
if var in kwargs:
kwargs[var] = long_df[kwargs[var]]
ax = boxplot(**kwargs)
g = catplot(**kwargs, kind="box")
assert_plots_equal(ax, g.ax)
| TestBoxPlot |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/events/__init__.py | {
"start": 3443,
"end": 9964
} | class ____(str, Enum):
"""The types of events that may be yielded by op and job execution."""
STEP_OUTPUT = "STEP_OUTPUT"
STEP_INPUT = "STEP_INPUT"
STEP_FAILURE = "STEP_FAILURE"
STEP_START = "STEP_START"
STEP_SUCCESS = "STEP_SUCCESS"
STEP_SKIPPED = "STEP_SKIPPED"
# The process carrying out step execution is starting/started. Shown as a
# marker start/end in the Dagster UI.
STEP_WORKER_STARTING = "STEP_WORKER_STARTING"
STEP_WORKER_STARTED = "STEP_WORKER_STARTED"
# Resource initialization for execution has started/succeede/failed. Shown
# as a marker start/end in the Dagster UI.
RESOURCE_INIT_STARTED = "RESOURCE_INIT_STARTED"
RESOURCE_INIT_SUCCESS = "RESOURCE_INIT_SUCCESS"
RESOURCE_INIT_FAILURE = "RESOURCE_INIT_FAILURE"
STEP_UP_FOR_RETRY = "STEP_UP_FOR_RETRY" # "failed" but want to retry
STEP_RESTARTED = "STEP_RESTARTED"
ASSET_MATERIALIZATION = "ASSET_MATERIALIZATION"
ASSET_MATERIALIZATION_PLANNED = "ASSET_MATERIALIZATION_PLANNED"
ASSET_FAILED_TO_MATERIALIZE = "ASSET_FAILED_TO_MATERIALIZE"
ASSET_OBSERVATION = "ASSET_OBSERVATION"
STEP_EXPECTATION_RESULT = "STEP_EXPECTATION_RESULT"
ASSET_CHECK_EVALUATION_PLANNED = "ASSET_CHECK_EVALUATION_PLANNED"
ASSET_CHECK_EVALUATION = "ASSET_CHECK_EVALUATION"
ASSET_HEALTH_CHANGED = "ASSET_HEALTH_CHANGED"
ASSET_WIPED = "ASSET_WIPED"
# We want to display RUN_* events in the Dagster UI and in our LogManager output, but in order to
# support backcompat for our storage layer, we need to keep the persisted value to be strings
# of the form "PIPELINE_*". We may have user code that pass in the DagsterEventType
# enum values into storage APIs (like get_event_records, which takes in an EventRecordsFilter).
RUN_ENQUEUED = "PIPELINE_ENQUEUED"
RUN_DEQUEUED = "PIPELINE_DEQUEUED"
RUN_STARTING = "PIPELINE_STARTING" # Launch is happening, execution hasn't started yet
RUN_START = "PIPELINE_START" # Execution has started
RUN_SUCCESS = "PIPELINE_SUCCESS"
RUN_FAILURE = "PIPELINE_FAILURE"
RUN_CANCELING = "PIPELINE_CANCELING"
RUN_CANCELED = "PIPELINE_CANCELED"
# Keep these legacy enum values around, to keep back-compatability for user code that might be
# using these constants to filter event records
PIPELINE_ENQUEUED = RUN_ENQUEUED
PIPELINE_DEQUEUED = RUN_DEQUEUED
PIPELINE_STARTING = RUN_STARTING
PIPELINE_START = RUN_START
PIPELINE_SUCCESS = RUN_SUCCESS
PIPELINE_FAILURE = RUN_FAILURE
PIPELINE_CANCELING = RUN_CANCELING
PIPELINE_CANCELED = RUN_CANCELED
OBJECT_STORE_OPERATION = "OBJECT_STORE_OPERATION"
ASSET_STORE_OPERATION = "ASSET_STORE_OPERATION"
LOADED_INPUT = "LOADED_INPUT"
HANDLED_OUTPUT = "HANDLED_OUTPUT"
ENGINE_EVENT = "ENGINE_EVENT"
HOOK_COMPLETED = "HOOK_COMPLETED"
HOOK_ERRORED = "HOOK_ERRORED"
HOOK_SKIPPED = "HOOK_SKIPPED"
ALERT_START = "ALERT_START"
ALERT_SUCCESS = "ALERT_SUCCESS"
ALERT_FAILURE = "ALERT_FAILURE"
LOGS_CAPTURED = "LOGS_CAPTURED"
FRESHNESS_STATE_EVALUATION = "FRESHNESS_STATE_EVALUATION"
FRESHNESS_STATE_CHANGE = "FRESHNESS_STATE_CHANGE"
EVENT_TYPE_TO_DISPLAY_STRING = {
DagsterEventType.PIPELINE_ENQUEUED: "RUN_ENQUEUED",
DagsterEventType.PIPELINE_DEQUEUED: "RUN_DEQUEUED",
DagsterEventType.PIPELINE_STARTING: "RUN_STARTING",
DagsterEventType.PIPELINE_START: "RUN_START",
DagsterEventType.PIPELINE_SUCCESS: "RUN_SUCCESS",
DagsterEventType.PIPELINE_FAILURE: "RUN_FAILURE",
DagsterEventType.PIPELINE_CANCELING: "RUN_CANCELING",
DagsterEventType.PIPELINE_CANCELED: "RUN_CANCELED",
}
STEP_EVENTS = {
DagsterEventType.STEP_INPUT,
DagsterEventType.STEP_START,
DagsterEventType.STEP_OUTPUT,
DagsterEventType.STEP_FAILURE,
DagsterEventType.STEP_SUCCESS,
DagsterEventType.STEP_SKIPPED,
DagsterEventType.ASSET_MATERIALIZATION,
DagsterEventType.ASSET_OBSERVATION,
DagsterEventType.STEP_EXPECTATION_RESULT,
DagsterEventType.ASSET_CHECK_EVALUATION,
DagsterEventType.OBJECT_STORE_OPERATION,
DagsterEventType.HANDLED_OUTPUT,
DagsterEventType.LOADED_INPUT,
DagsterEventType.STEP_RESTARTED,
DagsterEventType.STEP_UP_FOR_RETRY,
}
FAILURE_EVENTS = {
DagsterEventType.RUN_FAILURE,
DagsterEventType.STEP_FAILURE,
DagsterEventType.RUN_CANCELED,
}
PIPELINE_EVENTS = {
DagsterEventType.RUN_ENQUEUED,
DagsterEventType.RUN_DEQUEUED,
DagsterEventType.RUN_STARTING,
DagsterEventType.RUN_START,
DagsterEventType.RUN_SUCCESS,
DagsterEventType.RUN_FAILURE,
DagsterEventType.RUN_CANCELING,
DagsterEventType.RUN_CANCELED,
}
HOOK_EVENTS = {
DagsterEventType.HOOK_COMPLETED,
DagsterEventType.HOOK_ERRORED,
DagsterEventType.HOOK_SKIPPED,
}
ALERT_EVENTS = {
DagsterEventType.ALERT_START,
DagsterEventType.ALERT_SUCCESS,
DagsterEventType.ALERT_FAILURE,
}
MARKER_EVENTS = {
DagsterEventType.ENGINE_EVENT,
DagsterEventType.STEP_WORKER_STARTING,
DagsterEventType.STEP_WORKER_STARTED,
DagsterEventType.RESOURCE_INIT_STARTED,
DagsterEventType.RESOURCE_INIT_SUCCESS,
DagsterEventType.RESOURCE_INIT_FAILURE,
}
EVENT_TYPE_TO_PIPELINE_RUN_STATUS = {
DagsterEventType.RUN_START: DagsterRunStatus.STARTED,
DagsterEventType.RUN_SUCCESS: DagsterRunStatus.SUCCESS,
DagsterEventType.RUN_FAILURE: DagsterRunStatus.FAILURE,
DagsterEventType.RUN_ENQUEUED: DagsterRunStatus.QUEUED,
DagsterEventType.RUN_STARTING: DagsterRunStatus.STARTING,
DagsterEventType.RUN_CANCELING: DagsterRunStatus.CANCELING,
DagsterEventType.RUN_CANCELED: DagsterRunStatus.CANCELED,
}
PIPELINE_RUN_STATUS_TO_EVENT_TYPE = {v: k for k, v in EVENT_TYPE_TO_PIPELINE_RUN_STATUS.items()}
# These are the only events currently supported in `EventLogStorage.store_event_batch`
BATCH_WRITABLE_EVENTS = {
DagsterEventType.ASSET_MATERIALIZATION,
DagsterEventType.ASSET_OBSERVATION,
DagsterEventType.ASSET_FAILED_TO_MATERIALIZE,
DagsterEventType.ASSET_MATERIALIZATION_PLANNED,
DagsterEventType.ASSET_CHECK_EVALUATION_PLANNED,
}
ASSET_EVENTS = {
DagsterEventType.ASSET_MATERIALIZATION,
DagsterEventType.ASSET_OBSERVATION,
DagsterEventType.ASSET_MATERIALIZATION_PLANNED,
DagsterEventType.ASSET_FAILED_TO_MATERIALIZE,
DagsterEventType.FRESHNESS_STATE_CHANGE,
}
ASSET_CHECK_EVENTS = {
DagsterEventType.ASSET_CHECK_EVALUATION,
DagsterEventType.ASSET_CHECK_EVALUATION_PLANNED,
}
| DagsterEventType |
python | huggingface__transformers | src/transformers/models/glm4v_moe/modular_glm4v_moe.py | {
"start": 2241,
"end": 11217
} | class ____(Glm4MoeConfig, RotaryEmbeddingConfigMixin):
r"""
This is the configuration class to store the configuration of a [`Glm4vMoeModel`]. It is used to instantiate a
GLM-4.5V model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of
GLM-4.5V [zai-org/GLM-4.5V](https://huggingface.co/zai-org/GLM-4.5V).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 151424):
Vocabulary size of the Glm4vMoe model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Glm4vMoeModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 10944):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 46):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 96):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 65536):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, defaults to `True`, *optional*, defaults to `True`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
moe_intermediate_size (`int`, *optional*, defaults to 1408):
Intermediate size of the routed expert.
num_experts_per_tok (`int`, *optional*, defaults to 8):
number of experts per token.
n_shared_experts (`int`, *optional*, defaults to 1):
Number of shared experts.
n_routed_experts (`int`, *optional*, defaults to 128):
Number of routed experts.
routed_scaling_factor (`float`, *optional*, defaults to 1.0):
Scaling factor or routed experts.
n_group (`int`, *optional*, defaults to 1):
Number of groups for routed experts.
topk_group (`int`, *optional*, defaults to 1):
Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
first_k_dense_replace (`int`, *optional*, defaults to 1):
Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
\--k dense layers--/
norm_topk_prob (`bool`, *optional*, defaults to `True`):
Whether to normalize the topk probabilities.
router_aux_loss_coef (`float`, *optional*, defaults to 0.0001):
The aux loss factor for the loss.
```python
>>> from transformers import Glm4vMoeTextModel, Glm4vMoeConfig
>>> # Initializing a GLM-4.5V style configuration
>>> configuration = Glm4vMoeConfig()
>>> # Initializing a model from the GLM-4.5V style configuration
>>> model = Glm4vMoeTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "glm4v_moe_text"
base_config_key = "text_config"
keys_to_ignore_at_inference = ["past_key_values"]
# Default tensor parallel plan for base model `Glm4vMoe`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 151424,
hidden_size: Optional[int] = 4096,
intermediate_size: Optional[int] = 10944,
num_hidden_layers: Optional[int] = 46,
num_attention_heads: Optional[int] = 96,
num_key_value_heads: Optional[int] = 8,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 65536,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-5,
use_cache: Optional[bool] = True,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
attention_bias: Optional[bool] = True,
attention_dropout: Optional[float] = 0.0,
moe_intermediate_size: Optional[int] = 1408,
num_experts_per_tok: Optional[int] = 8,
n_shared_experts: Optional[int] = 1,
n_routed_experts: Optional[int] = 128,
routed_scaling_factor: Optional[float] = 1.0,
n_group: Optional[int] = 1,
topk_group: Optional[int] = 1,
first_k_dense_replace: Optional[int] = 1,
norm_topk_prob: Optional[bool] = True,
router_aux_loss_coef: Optional[float] = 0.0001,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.rope_parameters = rope_parameters
kwargs.setdefault("partial_rotary_factor", 0.5) # assign default for BC
# MoE arguments
self.moe_intermediate_size = moe_intermediate_size
self.num_experts_per_tok = num_experts_per_tok
self.n_group = n_group
self.topk_group = topk_group
self.n_shared_experts = n_shared_experts
self.n_routed_experts = n_routed_experts
self.routed_scaling_factor = routed_scaling_factor
self.first_k_dense_replace = first_k_dense_replace
self.norm_topk_prob = norm_topk_prob
self.router_aux_loss_coef = router_aux_loss_coef
PreTrainedConfig.__init__(
self, tie_word_embeddings=tie_word_embeddings, ignore_keys_at_rope_validation={"mrope"}, **kwargs
)
| Glm4vMoeTextConfig |
python | PyCQA__pylint | tests/functional/s/slots_checks.py | {
"start": 1669,
"end": 1729
} | class ____: # [invalid-slots]
__slots__ = None
| EleventhBad |
python | doocs__leetcode | solution/1500-1599/1558.Minimum Numbers of Function Calls to Make Target Array/Solution.py | {
"start": 0,
"end": 154
} | class ____:
def minOperations(self, nums: List[int]) -> int:
return sum(v.bit_count() for v in nums) + max(0, max(nums).bit_length() - 1)
| Solution |
python | django-haystack__django-haystack | haystack/fields.py | {
"start": 7152,
"end": 7565
} | class ____(SearchField):
field_type = "string"
def __init__(self, **kwargs):
if kwargs.get("facet_class") is None:
kwargs["facet_class"] = FacetCharField
super().__init__(**kwargs)
def prepare(self, obj):
return self.convert(super().prepare(obj))
def convert(self, value):
if value is None:
return None
return str(value)
| CharField |
python | falconry__falcon | tests/test_alias.py | {
"start": 241,
"end": 1314
} | class ____:
def on_get(self, req, resp):
resp.set_cookie('foo', 'bar')
@pytest.fixture
def alias_client():
with pytest.warns(DeprecatedWarning, match='API class will be removed'):
api = falcon.API()
api.add_route('/get-cookie', CookieResource())
return testing.TestClient(api)
@pytest.fixture
def app_client():
app = falcon.App()
app.add_route('/get-cookie', CookieResource())
return testing.TestClient(app)
def test_cookies(alias_client, app_client):
alias_result = alias_client.simulate_get('/get-cookie')
alias_cookie = alias_result.cookies['foo']
assert alias_cookie.name == 'foo'
assert alias_cookie.value == 'bar'
app_client_result = app_client.simulate_get('/get-cookie')
app_cookie = app_client_result.cookies['foo']
assert app_cookie.name == 'foo'
assert app_cookie.value == 'bar'
def test_alias_equals_to_app(alias_client):
with pytest.warns(DeprecatedWarning, match='API class will be removed'):
api = falcon.API()
assert isinstance(api, falcon.API)
| CookieResource |
python | PrefectHQ__prefect | src/integrations/prefect-snowflake/tests/test_database.py | {
"start": 2538,
"end": 3095
} | class ____:
def __enter__(self):
return self
def __exit__(self, *exc):
return False
def execute_async(self, query, params):
query_id = "1234"
self.result = {query_id: [(query, params)]}
return {"queryId": query_id}
def get_results_from_sfqid(self, query_id):
self.query_result = self.result[query_id]
def fetchall(self):
return self.query_result
def execute(self, query, params=None):
self.query_result = [(query, params, "sync")]
return self
| SnowflakeCursor |
python | kamyu104__LeetCode-Solutions | Python/minimum-moves-to-spread-stones-over-grid.py | {
"start": 3444,
"end": 4302
} | class ____(object):
def minimumMoves(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
def dist(a, b):
return abs(a[0]-b[0])+abs(a[1]-b[1])
def backtracking(curr):
if curr == len(zero):
return 0
result = float("inf")
i, j = zero[curr]
for ni in xrange(len(grid)):
for nj in xrange(len(grid[0])):
if not (grid[ni][nj] >= 2):
continue
grid[ni][nj] -= 1
result = min(result, dist((i, j), (ni, nj))+backtracking(curr+1))
grid[ni][nj] += 1
return result
zero = [(i, j) for i in xrange(len(grid)) for j in xrange(len(grid[0])) if grid[i][j] == 0]
return backtracking(0)
| Solution3 |
python | django__django | tests/staticfiles_tests/test_storage.py | {
"start": 33463,
"end": 36663
} | class ____(CollectionTestCase):
command_params = {
"interactive": False,
"verbosity": 0,
"ignore_patterns": ["*.ignoreme"],
}
def setUp(self):
self.umask = 0o027
old_umask = os.umask(self.umask)
self.addCleanup(os.umask, old_umask)
super().setUp()
# Don't run collectstatic command in this test class.
def run_collectstatic(self, **kwargs):
pass
@override_settings(
FILE_UPLOAD_PERMISSIONS=0o655,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765,
)
def test_collect_static_files_permissions(self):
call_command("collectstatic", **self.command_params)
static_root = Path(settings.STATIC_ROOT)
test_file = static_root / "test.txt"
file_mode = test_file.stat().st_mode & 0o777
self.assertEqual(file_mode, 0o655)
tests = [
static_root / "subdir",
static_root / "nested",
static_root / "nested" / "css",
]
for directory in tests:
with self.subTest(directory=directory):
dir_mode = directory.stat().st_mode & 0o777
self.assertEqual(dir_mode, 0o765)
@override_settings(
FILE_UPLOAD_PERMISSIONS=None,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=None,
)
def test_collect_static_files_default_permissions(self):
call_command("collectstatic", **self.command_params)
static_root = Path(settings.STATIC_ROOT)
test_file = static_root / "test.txt"
file_mode = test_file.stat().st_mode & 0o777
self.assertEqual(file_mode, 0o666 & ~self.umask)
tests = [
static_root / "subdir",
static_root / "nested",
static_root / "nested" / "css",
]
for directory in tests:
with self.subTest(directory=directory):
dir_mode = directory.stat().st_mode & 0o777
self.assertEqual(dir_mode, 0o777 & ~self.umask)
@override_settings(
FILE_UPLOAD_PERMISSIONS=0o655,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765,
STORAGES={
**settings.STORAGES,
STATICFILES_STORAGE_ALIAS: {
"BACKEND": "staticfiles_tests.test_storage.CustomStaticFilesStorage",
},
},
)
def test_collect_static_files_subclass_of_static_storage(self):
call_command("collectstatic", **self.command_params)
static_root = Path(settings.STATIC_ROOT)
test_file = static_root / "test.txt"
file_mode = test_file.stat().st_mode & 0o777
self.assertEqual(file_mode, 0o640)
tests = [
static_root / "subdir",
static_root / "nested",
static_root / "nested" / "css",
]
for directory in tests:
with self.subTest(directory=directory):
dir_mode = directory.stat().st_mode & 0o777
self.assertEqual(dir_mode, 0o740)
@override_settings(
STORAGES={
**settings.STORAGES,
STATICFILES_STORAGE_ALIAS: {
"BACKEND": "django.contrib.staticfiles.storage.ManifestStaticFilesStorage",
},
}
)
| TestStaticFilePermissions |
python | allegroai__clearml | clearml/backend_api/services/v2_23/models.py | {
"start": 39379,
"end": 40910
} | class ____(Response):
"""
Response of models.create endpoint.
:param id: ID of the model
:type id: str
:param created: Was the model created
:type created: bool
"""
_service = "models"
_action = "create"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"created": {
"description": "Was the model created",
"type": ["boolean", "null"],
},
"id": {"description": "ID of the model", "type": ["string", "null"]},
},
"type": "object",
}
def __init__(self, id: Optional[str] = None, created: Optional[bool] = None, **kwargs: Any) -> None:
super(CreateResponse, self).__init__(**kwargs)
self.id = id
self.created = created
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("created")
def created(self) -> Optional[bool]:
return self._property_created
@created.setter
def created(self, value: Optional[bool]) -> None:
if value is None:
self._property_created = None
return
self.assert_isinstance(value, "created", (bool,))
self._property_created = value
| CreateResponse |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-instagram/unit_tests/integration/test_api.py | {
"start": 1469,
"end": 4088
} | class ____(TestCase):
@staticmethod
def _read(config_: ConfigBuilder, expecting_exception: bool = False) -> EntrypointOutput:
return read_output(
config_builder=config_,
stream_name=_STREAM_NAME,
sync_mode=SyncMode.full_refresh,
expecting_exception=expecting_exception,
)
@HttpMocker()
def test_given_one_page_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
get_account_request().build(),
get_account_response(),
)
output = self._read(config_=config())
assert len(output.records) == 1
@HttpMocker()
def test_accounts_with_no_instagram_business_account_field(self, http_mocker: HttpMocker) -> None:
test = "not_instagram_business_account"
mocked_response = json.dumps(find_template(f"api_for_{test}", __file__))
http_mocker.get(get_account_request().build(), HttpResponse(mocked_response, 200))
original_records = json.loads(mocked_response)
output = self._read(config_=config())
# accounts which are not business will be filtered
assert len(original_records["data"]) > len(output.records)
assert len(output.records) == 13
for single_record in output.records:
assert "id" in single_record.record.data
assert "account" in single_record.record.data
assert "business_account_id" in single_record.record.data["account"]
assert "page_id" in single_record.record.data["account"]
@HttpMocker()
def test_given_multiple_pages_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
get_account_request().build(),
_get_response().with_pagination().with_record(_record()).build(),
)
next_account_url = get_account_request().with_next_page_token(NEXT_PAGE_TOKEN).build()
http_mocker.get(
next_account_url,
_get_response().with_record(_record()).with_record(_record()).build(),
)
output = self._read(config_=config())
assert len(output.records) == 3
@HttpMocker()
def test_exception_on_accounts_request(self, http_mocker: HttpMocker) -> None:
get_account_request().build()
http_mocker.get(get_account_request().build(), build_response(status_code=HTTPStatus.FORBIDDEN, body={}))
is_successful, error = get_source(config={}).check_connection(logger=logger, config={})
assert not is_successful
assert "Forbidden" in error
| TestFullRefresh |
python | ray-project__ray | python/ray/data/_internal/execution/interfaces/physical_operator.py | {
"start": 9817,
"end": 10669
} | class ____(OpTask):
"""Represents an OpTask that only handles metadata, instead of Block data."""
def __init__(
self,
task_index: int,
object_ref: ray.ObjectRef,
task_done_callback: Callable[[], None],
task_resource_bundle: Optional[ExecutionResources] = None,
):
"""
Args:
object_ref: The ObjectRef of the task.
task_done_callback: The callback to call when the task is done.
"""
super().__init__(task_index, task_resource_bundle)
self._object_ref = object_ref
self._task_done_callback = task_done_callback
def get_waitable(self) -> ray.ObjectRef:
return self._object_ref
def on_task_finished(self):
"""Callback when the task is finished."""
self._task_done_callback()
@dataclass
| MetadataOpTask |
python | apache__airflow | airflow-core/tests/unit/utils/test_module_loading.py | {
"start": 895,
"end": 1371
} | class ____:
def test_import_string(self):
cls = import_string("airflow.utils.module_loading.import_string")
assert cls == import_string
# Test exceptions raised
with pytest.raises(ImportError):
import_string("no_dots_in_path")
msg = 'Module "airflow.utils" does not define a "nonexistent" attribute'
with pytest.raises(ImportError, match=msg):
import_string("airflow.utils.nonexistent")
| TestModuleImport |
python | joke2k__faker | faker/providers/address/vi_VN/__init__.py | {
"start": 203,
"end": 7391
} | class ____(AddressProvider):
"""Provider for generating Vietnamese addresses.
Sources:
# https://vi.wikipedia.org/wiki/B%E1%BA%A3n_m%E1%BA%ABu:K%C3%BD_hi%E1%BB%87u_quy_%C6%B0%E1%BB%9Bc_c%C3%A1c_t%E1%BB%89nh_th%C3%A0nh_Vi%E1%BB%87t_Nam
"""
city_prefixes = ("Thành phố", "Quận", "Huyện", "Thị xã")
city_suffixes = (
"Thành phố",
"Quận",
"Huyện",
"Thị xã",
"Xã",
"Phường",
)
building_number_formats = ("###", "##", "#")
street_suffixes = (
"Đường",
"Ngõ",
"Hẻm",
"Làng",
"Khu",
"Tổ",
"Số",
"Dãy",
)
postcode_formats = ("######",)
provinces = (
"An Giang",
"Bà Rịa – Vũng Tàu",
"Bạc Liêu",
"Bắc Kạn",
"Bắc Giang",
"Bắc Ninh",
"Bến Tre",
"Bình Dương",
"Bình Định",
"Bình Phước",
"Bình Thuận",
"Cà Mau",
"Cao Bằng",
"Cần Thơ",
"Đà Nẵng",
"Đắk Lắk",
"Đắk Nông",
"Điện Biên",
"Đồng Nai",
"Đồng Tháp",
"Gia Lai",
"Hà Giang",
"Hà Nam",
"Hà Nội",
"Hà Tĩnh",
"Hải Dương",
"Hải Phòng",
"Hậu Giang",
"Hòa Bình",
"Thành phố Hồ Chí Minh",
"Hưng Yên",
"Khánh Hòa",
"Kiên Giang",
"Kon Tum",
"Lai Châu",
"Lạng Sơn",
"Lào Cai",
"Lâm Đồng",
"Long An",
"Nam Định",
"Nghệ An",
"Ninh Bình",
"Ninh Thuận",
"Phú Thọ",
"Phú Yên",
"Quảng Bình",
"Quảng Nam",
"Quảng Ngãi",
"Quảng Ninh",
"Quảng Trị",
"Sóc Trăng",
"Sơn La",
"Tây Ninh",
"Thái Bình",
"Thái Nguyên",
"Thanh Hóa",
"Thừa Thiên Huế",
"Tiền Giang",
"Trà Vinh",
"Tuyên Quang",
"Vĩnh Long",
"Vĩnh Phúc",
"Yên Bái",
)
provinces_abbr = (
"AG",
"BV",
"BL",
"BK",
"BG",
"BN",
"BT",
"BD",
"BĐ",
"BP",
"BTh",
"CM",
"CB",
"CT",
"ĐNa",
"ĐL",
"ĐNo",
"ĐB",
"ĐN",
"ĐT",
"GL",
"HG",
"HNa",
"HN",
"HT",
"HD",
"HP",
"HGi",
"HB",
"SG",
"HY",
"KH",
"KG",
"KT",
"LC",
"LS",
"LCa",
"LĐ",
"LA",
"NĐ",
"NA",
"NB",
"NT",
"PT",
"PY",
"QB",
"QNa",
"QNg",
"QN",
"QT",
"ST",
"SL",
"TN",
"TB",
"TNg",
"TH",
"TTH",
"TG",
"TV",
"TQ",
"VL",
"VP",
"YB",
)
provinces_postcode = {
"AG": (88000, 88999),
"BV": (79000, 79999),
"BL": (96000, 96999),
"BK": (26000, 26999),
"BG": (23000, 23999),
"BN": (22000, 22999),
"BT": (93000, 93999),
"BD": (82000, 82999),
"BĐ": (59000, 59999),
"BP": (83000, 83999),
"BTh": (80000, 80999),
"CM": (97000, 97999),
"CB": (27000, 27999),
"CT": (92000, 92999),
"ĐNa": (55000, 55999),
"ĐL": (63000, 63999),
"ĐNo": (64000, 64999),
"ĐB": (38000, 38999),
"ĐN": (81000, 81999),
"ĐT": (87000, 87999),
"GL": (60000, 60999),
"HG": (31000, 31999),
"HNa": (40000, 40999),
"HN": (10000, 15999),
"HT": (48000, 48999),
"HD": (17000, 17999),
"HP": (18000, 18999),
"HGi": (91000, 91999),
"HB": (35000, 35999),
"SG": (70000, 76999),
"HY": (16000, 16999),
"KH": (65000, 65999),
"KG": (92000, 92999),
"KT": (58000, 58999),
"LC": (39000, 39999),
"LS": (24000, 24999),
"LCa": (33000, 33999),
"LĐ": (67000, 67999),
"LA": (85000, 85999),
"NĐ": (42000, 42999),
"NA": (46000, 47999),
"NB": (43000, 43999),
"NT": (66000, 66999),
"PT": (29000, 29999),
"PY": (62000, 62999),
"QB": (51000, 51999),
"QNa": (56000, 56999),
"QNg": (57000, 57999),
"QN": (20000, 20999),
"QT": (52000, 52999),
"ST": (95000, 95999),
"SL": (36000, 36999),
"TN": (84000, 84999),
"TB": (41000, 41999),
"TNg": (25000, 25999),
"TH": (44000, 45999),
"TTH": (53000, 53999),
"TG": (86000, 86999),
"TV": (94000, 94999),
"TQ": (30000, 30999),
"VL": (89000, 89999),
"VP": (28000, 28999),
"YB": (32000, 32999),
}
address_formats = OrderedDict(
(
("{{street_address}}\n{{city}}, {{postcode}}", 25.0),
("{{city}}\n{{street_address}}, {{postcode}}", 1.0),
)
)
city_formats = (
"{{city_prefix}} {{first_name}}{{city_suffix}}",
"{{first_name}}{{city_suffix}}",
)
street_name_formats = (
"{{first_name}} {{street_suffix}}",
"{{last_name}} {{street_suffix}}",
)
street_address_formats = ("{{building_number}} {{street_name}}",)
def city_prefix(self) -> str:
"""Returns a random city prefix."""
return self.random_element(self.city_prefixes)
def administrative_unit(self) -> str:
"""Returns a random administrative unit (province)."""
return self.random_element(self.provinces)
state = administrative_unit
def state_abbr(self) -> str:
"""
Returns a random two-letter abbreviation for Vietnamese provinces.
"""
abbreviations: Tuple[str, ...] = self.provinces_abbr
return self.random_element(abbreviations)
def postcode(self) -> str:
"""Returns a random postcode."""
return f"{self.generator.random.randint(100000, 999999):06d}"
def postcode_in_state(self, state_abbr: Optional[str] = None) -> str:
"""
Returns a random postcode within the provided province abbreviation.
:param state_abbr: A province abbreviation.
:returns: A random postcode within the provided province abbreviation.
"""
if state_abbr is None:
state_abbr = self.random_element(self.provinces_abbr)
if state_abbr in self.provinces_abbr:
postcode = str(
self.generator.random.randint(
self.provinces_postcode[state_abbr][0], self.provinces_postcode[state_abbr][1]
)
)
# zero left pad up until desired length (length is 6)
target_postcode_len = 6
current_postcode_len = len(postcode)
if current_postcode_len < target_postcode_len:
pad = target_postcode_len - current_postcode_len
postcode = f"{'0' * pad}{postcode}"
return postcode
raise ValueError("Province Abbreviation not found in list")
| Provider |
python | ray-project__ray | python/ray/util/multiprocessing/pool.py | {
"start": 12770,
"end": 14564
} | class ____:
"""An asynchronous interface to task results.
This should not be constructed directly.
"""
def __init__(
self, chunk_object_refs, callback=None, error_callback=None, single_result=False
):
self._single_result = single_result
self._result_thread = ResultThread(
chunk_object_refs, single_result, callback, error_callback
)
self._result_thread.start()
def wait(self, timeout=None):
"""
Returns once the result is ready or the timeout expires (does not
raise TimeoutError).
Args:
timeout: timeout in milliseconds.
"""
self._result_thread.join(timeout)
def get(self, timeout=None):
self.wait(timeout)
if self._result_thread.is_alive():
raise TimeoutError
results = []
for batch in self._result_thread.results():
for result in batch:
if isinstance(result, PoolTaskError):
raise result.underlying
elif isinstance(result, Exception):
raise result
results.extend(batch)
if self._single_result:
return results[0]
return results
def ready(self):
"""
Returns true if the result is ready, else false if the tasks are still
running.
"""
return not self._result_thread.is_alive()
def successful(self):
"""
Returns true if none of the submitted tasks errored, else false. Should
only be called once the result is ready (can be checked using `ready`).
"""
if not self.ready():
raise ValueError(f"{self!r} not ready")
return not self._result_thread.got_error()
| AsyncResult |
python | tensorflow__tensorflow | tensorflow/python/data/ops/scan_op.py | {
"start": 1358,
"end": 7068
} | class ____(dataset_ops.UnaryDataset):
"""A dataset that scans a function across its input."""
def __init__(self,
input_dataset,
initial_state,
scan_func,
use_default_device=None,
name=None):
"""See `scan()` for details."""
self._input_dataset = input_dataset
self._initial_state = structure.normalize_element(initial_state)
# Compute initial values for the state classes, shapes and types based on
# the initial state. The shapes may be refined by running `tf_scan_func` one
# or more times below.
self._state_structure = structure.type_spec_from_value(self._initial_state)
# Iteratively rerun the scan function until reaching a fixed point on
# `self._state_shapes`.
need_to_rerun = True
while need_to_rerun:
wrapped_func = structured_function.StructuredFunctionWrapper(
scan_func,
self._transformation_name(),
input_structure=(self._state_structure, input_dataset.element_spec),
add_to_graph=False)
if not (isinstance(wrapped_func.output_types, collections_abc.Sequence)
and len(wrapped_func.output_types) == 2):
raise TypeError(f"Invalid `scan_func`. `scan_func` should return a "
f"pair consisting of new state and the output value "
f"but its return type is "
f"{wrapped_func.output_structure}.")
new_state_classes, self._output_classes = wrapped_func.output_classes
# Extract and validate class information from the returned values.
new_state_classes, output_classes = wrapped_func.output_classes
old_state_classes = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
self._state_structure)
for new_state_class, old_state_class in zip(
nest.flatten(new_state_classes), nest.flatten(old_state_classes)):
if not issubclass(new_state_class, old_state_class):
raise TypeError(f"Invalid `scan_func`. The element classes for the "
f"new state must match the initial state. Expected "
f"{old_state_classes}, got {new_state_classes}.")
# Extract and validate type information from the returned values.
new_state_types, output_types = wrapped_func.output_types
old_state_types = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
self._state_structure)
for new_state_type, old_state_type in zip(
nest.flatten(new_state_types), nest.flatten(old_state_types)):
if new_state_type != old_state_type:
raise TypeError(f"Invalid `scan_func`. The element types for the "
f"new state must match the initial state. Expected "
f"{old_state_types}, got {new_state_types}.")
# Extract shape information from the returned values.
new_state_shapes, output_shapes = wrapped_func.output_shapes
old_state_shapes = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
self._state_structure)
self._element_spec = structure.convert_legacy_structure(
output_types, output_shapes, output_classes)
flat_state_shapes = nest.flatten(old_state_shapes)
flat_new_state_shapes = nest.flatten(new_state_shapes)
weakened_state_shapes = [
original.most_specific_compatible_shape(new)
for original, new in zip(flat_state_shapes, flat_new_state_shapes)
]
need_to_rerun = False
for original_shape, weakened_shape in zip(flat_state_shapes,
weakened_state_shapes):
if original_shape.ndims is not None and (
weakened_shape.ndims is None or
original_shape.as_list() != weakened_shape.as_list()):
need_to_rerun = True
break
if need_to_rerun:
# TODO(b/110122868): Support a "most specific compatible structure"
# method for combining structures, to avoid using legacy structures
# in this method.
self._state_structure = structure.convert_legacy_structure(
old_state_types,
nest.pack_sequence_as(old_state_shapes, weakened_state_shapes),
old_state_classes)
self._scan_func = wrapped_func
self._scan_func.function.add_to_graph(ops.get_default_graph())
self._name = name
# pylint: disable=protected-access
if use_default_device is not None:
variant_tensor = ged_ops.scan_dataset(
self._input_dataset._variant_tensor,
structure.to_tensor_list(self._state_structure, self._initial_state),
self._scan_func.function.captured_inputs,
f=self._scan_func.function,
preserve_cardinality=True,
use_default_device=use_default_device,
**self._common_args)
else:
variant_tensor = ged_ops.scan_dataset(
self._input_dataset._variant_tensor,
structure.to_tensor_list(self._state_structure, self._initial_state),
self._scan_func.function.captured_inputs,
f=self._scan_func.function,
preserve_cardinality=True,
**self._common_args)
super().__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._scan_func]
@property
def element_spec(self):
return self._element_spec
def _transformation_name(self):
return "Dataset.scan()"
| _ScanDataset |
python | keras-team__keras | keras/src/layers/core/embedding_test.py | {
"start": 365,
"end": 21555
} | class ____(test_case.TestCase):
@pytest.mark.requires_trainable_backend
def test_embedding_basics(self):
self.run_layer_test(
layers.Embedding,
{"input_dim": 4, "output_dim": 3},
input_shape=(2,),
input_dtype="int32",
expected_output_shape=(2, 3),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
layers.Embedding,
{"input_dim": 5, "output_dim": 4, "mask_zero": True},
input_shape=(2, 3),
input_dtype="int64",
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_sparse(self):
self.run_layer_test(
layers.Embedding,
{"input_dim": 5, "output_dim": 4},
input_shape=(2, 3),
input_dtype="int32",
input_sparse=True,
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
@pytest.mark.skipif(
not backend.SUPPORTS_RAGGED_TENSORS,
reason="Backend does not support ragged tensors.",
)
def test_ragged(self):
self.run_layer_test(
layers.Embedding,
{"input_dim": 5, "output_dim": 4},
input_shape=(2, 3),
input_dtype="int32",
input_ragged=True,
expected_output_shape=(2, None, 4),
expected_output_ragged=True,
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
# run_training_check=False,
)
def test_correctness(self):
layer = layers.Embedding(input_dim=3, output_dim=2)
layer.build()
layer.embeddings.assign(np.array([[0.0, 0.0], [2.0, 2.0], [3.0, 3.0]]))
out = layer(np.array([2, 1, 0]))
self.assertAllClose(out, np.array([[3.0, 3.0], [2.0, 2.0], [0.0, 0.0]]))
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_correctness_sparse(self):
layer = layers.Embedding(input_dim=3, output_dim=2)
layer.build()
layer.embeddings.assign(np.array([[0.0, 0.0], [2.0, 2.0], [3.0, 3.0]]))
if backend.backend() == "tensorflow":
import tensorflow as tf
x = tf.SparseTensor([[0, 0], [1, 2]], [2, 1], (2, 3))
elif backend.backend() == "jax":
import jax.experimental.sparse as jax_sparse
x = jax_sparse.BCOO(([2, 1], [[0, 0], [1, 2]]), shape=(2, 3))
else:
self.fail(f"Sparse is unsupported with backend {backend.backend()}")
self.assertAllClose(
layer(x),
np.array(
[
[[3.0, 3.0], [0.0, 0.0], [0.0, 0.0]],
[[0.0, 0.0], [0.0, 0.0], [2.0, 2.0]],
]
),
)
def test_masking(self):
layer = layers.Embedding(input_dim=3, output_dim=2, mask_zero=True)
layer.build()
out = layer.compute_mask(np.array(([2, 1, 0])))
self.assertAllClose(out, np.array([True, True, False]))
def test_compute_mask_no_masking(self):
layer = layers.Embedding(input_dim=3, output_dim=2, mask_zero=False)
input_data = np.array([2, 1, 0])
mask = layer.compute_mask(input_data)
self.assertIsNone(mask)
def test_embedding_constraints(self):
layer = layers.Embedding(3, 2, embeddings_constraint="non_neg")
layer.build((None, 2))
self.assertIsInstance(layer.embeddings.constraint, constraints.NonNeg)
def test_weights_constructor_arg(self):
layer = layers.Embedding(3, 4, weights=np.ones((3, 4)))
self.assertAllClose(layer.embeddings.numpy(), np.ones((3, 4)))
layer = layers.Embedding(3, 4, weights=[np.ones((3, 4))])
self.assertAllClose(layer.embeddings.numpy(), np.ones((3, 4)))
@pytest.mark.requires_trainable_backend
def test_enable_lora(self):
layer = layers.Embedding(10, 16)
layer.build()
layer.enable_lora(4)
self.assertLen(layer.trainable_weights, 2)
self.assertLen(layer.non_trainable_weights, 1)
if backend.backend() == "torch":
self.assertLen(layer.torch_params, 3)
# Try eager call
x = np.random.randint(0, 9, size=(64, 3))
y = np.random.random((64, 3, 16))
_ = layer(x[:2])
init_lora_a_embeddings_value = layer.lora_embeddings_a.numpy()
init_lora_b_embeddings_value = layer.lora_embeddings_b.numpy()
# Try calling fit()
model = models.Sequential(
[
layer,
]
)
model.compile(optimizer="sgd", loss="mse")
model.fit(x, y)
final_lora_a_embeddings_value = layer.lora_embeddings_a.numpy()
final_lora_b_embeddings_value = layer.lora_embeddings_b.numpy()
diff_a = np.max(
np.abs(init_lora_a_embeddings_value - final_lora_a_embeddings_value)
)
diff_b = np.max(
np.abs(init_lora_b_embeddings_value - final_lora_b_embeddings_value)
)
self.assertGreater(diff_a, 0.0)
self.assertGreater(diff_b, 0.0)
# Try saving and reloading the model
temp_filepath = os.path.join(self.get_temp_dir(), "lora_model.keras")
model.save(temp_filepath)
new_model = saving.load_model(temp_filepath)
self.assertTrue(new_model.layers[0].lora_enabled)
self.assertAllClose(model.predict(x), new_model.predict(x))
# Try saving and reloading the model's weights only
temp_filepath = os.path.join(
self.get_temp_dir(), "lora_model.weights.h5"
)
model.save_weights(temp_filepath)
# Load the file into a fresh, non-lora model
new_model = models.Sequential(
[
layers.Input((3,), dtype="int32"),
layers.Embedding(10, 16),
]
)
new_model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
# Try loading a normal checkpoint into a lora model
new_model.save_weights(temp_filepath)
model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
@pytest.mark.requires_trainable_backend
def test_enable_lora_with_alpha(self):
# Create an `Embedding` layer without specifying `lora_rank`
layer = layers.Embedding(input_dim=3, output_dim=2)
layer.build((None,)) # Build the layer
# Set the base embeddings to known values.
base_emb = np.array(
[[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]], dtype=np.float32
)
layer.embeddings.assign(base_emb)
# Enable LoRA with a custom alpha: `rank`=2, `lora_alpha`=3.0.
layer.enable_lora(2, lora_alpha=3.0)
self.assertEqual(layer.lora_rank, 2)
self.assertEqual(layer.lora_alpha, 3.0)
# Manually assign known values to lora weights.
a_val = np.array([[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], dtype=np.float32)
b_val = np.array([[0.5, 0.5], [0.6, 0.6]], dtype=np.float32)
layer.lora_embeddings_a.assign(a_val)
layer.lora_embeddings_b.assign(b_val)
# Compute the expected delta.
# Scaling factor: (3.0 / 2) = 1.5
effective_delta = 1.5 * np.matmul(a_val, b_val)
expected_embeddings = base_emb + effective_delta
# Verify that the effective embeddings match expectation.
actual_embeddings = ops.convert_to_numpy(layer.embeddings)
self.assertAllClose(actual_embeddings, expected_embeddings)
@pytest.mark.requires_trainable_backend
def test_lora_rank_argument(self):
self.run_layer_test(
layers.Embedding,
init_kwargs={"input_dim": 5, "output_dim": 4, "lora_rank": 2},
input_shape=(2, 3),
input_dtype="int32",
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=1,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
def test_enable_lora_with_embeddings_constraint(self):
layer = layers.Embedding(
input_dim=10, output_dim=16, embeddings_constraint="max_norm"
)
with self.assertRaisesRegex(
ValueError, "incompatible with embedding constraints"
):
layer.enable_lora(rank=2)
def test_enable_lora_when_already_enabled(self):
layer = layers.Embedding(input_dim=10, output_dim=16)
layer.build()
layer.enable_lora(rank=2)
with self.assertRaisesRegex(ValueError, "lora is already enabled"):
layer.enable_lora(rank=2)
# Test quantization-related methods.
@parameterized.named_parameters(
("int8", "int8"),
("int4", "int4"),
)
def test_quantize_int(self, mode):
layer = layers.Embedding(10, 16)
layer.build()
x = np.random.randint(0, 9, size=(64, 3))
y_float = layer(x)
layer.quantize(mode)
# Verify the dtype of the weights.
# The embeddings's dtype is int8, despite the int4 quantization, because
# we pack the int4 values into int8.
self.assertEqual(
backend.standardize_dtype(layer._embeddings.dtype), "int8"
)
self.assertEqual(
backend.standardize_dtype(layer.embeddings_scale.dtype),
layer.variable_dtype,
)
# Verify the unpacked embeddings for int4 quantization.
if mode == "int4":
self.assertAllClose(
layer.embeddings,
quantizers.unpack_int4(
layer._embeddings, layer.output_dim, axis=-1
),
)
# Verify the correctness of the outputs.
y_quantized = layer(x)
mse = ops.mean(ops.square(y_float - y_quantized))
self.assertLess(mse, 1e-3) # A weak correctness test
# Check model save / load round-trip.
model = models.Sequential([layer])
temp_filepath = os.path.join(
self.get_temp_dir(), "quantized_model.keras"
)
model.save(temp_filepath)
new_model = saving.load_model(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
# Check weights-only save / load round-trip.
temp_filepath = os.path.join(
self.get_temp_dir(), "quantized_model.weights.h5"
)
model.save_weights(temp_filepath)
new_model = models.Sequential([layers.Embedding(10, 16)])
new_model.build((None, 3))
new_model.quantize(mode)
new_model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
@parameterized.named_parameters(
("int8", "int8"),
("int4", "int4"),
)
def test_quantize_on_unbuilt_layer(self, mode):
layer = layers.Embedding(10, 16)
with self.assertRaisesRegex(
ValueError, "Cannot quantize a layer that isn't yet built."
):
layer.quantize(mode)
@parameterized.named_parameters(
("int8", "int8"),
("int4", "int4"),
)
def test_quantize_on_subclass(self, mode):
class MyEmbedding(layers.Embedding):
pass
layer = MyEmbedding(10, 16)
layer.build()
with self.assertRaises(NotImplementedError):
layer.quantize(mode)
layer.quantize(mode, type_check=False) # No error
@parameterized.named_parameters(
("int8", "int8"),
("int4", "int4"),
)
def test_quantize_when_already_quantized(self, mode):
layer = layers.Embedding(10, 16)
layer.build()
layer.quantize(mode)
for m in ("int8", "int4"):
with self.assertRaisesRegex(
ValueError, "is already quantized with dtype_policy="
):
layer.quantize(m)
layer = layers.Embedding(10, 16, dtype=f"{mode}_from_float32")
layer.build()
for m in ("int8", "int4"):
with self.assertRaisesRegex(
ValueError, "is already quantized with dtype_policy="
):
layer.quantize(m)
@parameterized.named_parameters(
("int8", "int8_from_float32", 2),
("int4", "int4_from_float32", 2),
)
def test_quantize_by_setting_dtype_policy(
self, policy, expected_num_variables
):
layer = layers.Embedding(10, 16)
layer.build()
layer.dtype_policy = policy
self.assertLen(layer.variables, expected_num_variables)
@parameterized.named_parameters(
("int7", "int7"),
("float7", "float7"),
)
def test_quantize_invalid_mode(self, mode):
layer = layers.Embedding(10, 16)
layer.build()
x = np.random.randint(0, 9, size=(1, 3))
# dtype_policy should not be altered by failed quantization
original_dtype_policy = layer.dtype_policy
# Test quantize
with self.assertRaisesRegex(ValueError, "Invalid quantization mode."):
layer.quantize(mode)
self.assertEqual(layer.dtype_policy, original_dtype_policy)
# Test quantized_build
with self.assertRaisesRegex(
NotImplementedError, "Invalid quantization mode."
):
layer.quantized_build((None, 2), mode)
self.assertEqual(layer.dtype_policy, original_dtype_policy)
# Test quantized_call
with self.assertRaisesRegex(
NotImplementedError, "Invalid quantization mode."
):
# Explicitly set quantization_mode
layer._dtype_policy._quantization_mode = mode
layer.quantized_call(x)
self.assertEqual(layer.dtype_policy, original_dtype_policy)
@parameterized.named_parameters(
("int8", "int8_from_mixed_bfloat16", 0, 2),
("int4", "int4_from_mixed_bfloat16", 0, 2),
)
@pytest.mark.requires_trainable_backend
def test_quantize_dtype_argument(
self, dtype, num_trainable_weights, num_non_trainable_weights
):
self.run_layer_test(
layers.Embedding,
{"input_dim": 4, "output_dim": 3, "dtype": dtype},
input_shape=(2,),
input_dtype="int32",
expected_output_shape=(2, 3),
expected_num_trainable_weights=num_trainable_weights,
expected_num_non_trainable_weights=num_non_trainable_weights,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
layers.Embedding,
{
"input_dim": 5,
"output_dim": 4,
"mask_zero": True,
"dtype": dtype,
},
input_shape=(2, 3),
input_dtype="int64",
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=num_trainable_weights,
expected_num_non_trainable_weights=num_non_trainable_weights,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
@parameterized.named_parameters(
("int8", "int8", 2, 2, 4),
("int4", "int4", 2, 2, 4),
)
@pytest.mark.requires_trainable_backend
def test_quantize_lora_integration(
self,
mode,
num_trainable_weights,
num_non_trainable_weights,
num_torch_params,
):
layer = layers.Embedding(10, 16)
layer.build()
layer.enable_lora(4)
layer.quantize(mode)
self.assertLen(layer.trainable_weights, num_trainable_weights)
self.assertLen(layer.non_trainable_weights, num_non_trainable_weights)
if backend.backend() == "torch":
self.assertLen(layer.torch_params, num_torch_params)
# Try calling fit()
init_lora_a_embeddings_value = layer.lora_embeddings_a.numpy()
init_lora_b_embeddings_value = layer.lora_embeddings_b.numpy()
x = np.random.randint(0, 9, size=(64, 3))
y = np.random.random((64, 3, 16))
model = models.Sequential([layer])
model.compile(optimizer="sgd", loss="mse")
model.fit(x, y)
final_lora_a_embeddings_value = layer.lora_embeddings_a.numpy()
final_lora_b_embeddings_value = layer.lora_embeddings_b.numpy()
diff_a = np.max(
np.abs(init_lora_a_embeddings_value - final_lora_a_embeddings_value)
)
diff_b = np.max(
np.abs(init_lora_b_embeddings_value - final_lora_b_embeddings_value)
)
self.assertGreater(diff_a, 0.0)
self.assertGreater(diff_b, 0.0)
# Try saving and reloading the model
temp_filepath = os.path.join(
self.get_temp_dir(), "quantized_lora_model.keras"
)
model.save(temp_filepath)
new_model = saving.load_model(temp_filepath)
self.assertTrue(new_model.layers[0].lora_enabled)
self.assertAllClose(model.predict(x), new_model.predict(x), atol=0.5)
# Try saving and reloading the model's weights only
temp_filepath = os.path.join(
self.get_temp_dir(), "quantized_lora_model.weights.h5"
)
model.save_weights(temp_filepath)
new_model = models.Sequential(
[layers.Input((3,), dtype="int32"), layers.Embedding(10, 16)]
)
new_model.quantize(mode)
new_model.load_weights(temp_filepath)
self.assertFalse(new_model.layers[0].lora_enabled)
self.assertAllClose(model.predict(x), new_model.predict(x), atol=0.5)
# Try loading a normal checkpoint into a lora model
new_model.save_weights(temp_filepath)
model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x), atol=0.5)
# Test export and TFSMLayer reloading when using tensorflow backend
if backend.backend() == "tensorflow":
import tensorflow as tf
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
ref_input = tf.random.normal((32, 3))
ref_output = model(ref_input)
model.export(temp_filepath, format="tf_saved_model")
reloaded_layer = export.TFSMLayer(temp_filepath)
self.assertAllClose(
reloaded_layer(ref_input), ref_output, atol=1e-7
)
self.assertLen(reloaded_layer.weights, len(model.weights))
self.assertLen(
reloaded_layer.trainable_weights, len(model.trainable_weights)
)
self.assertLen(
reloaded_layer.non_trainable_weights,
len(model.non_trainable_weights),
)
def test_legacy_load_own_variables(self):
# In previous versions, `load_own_variables` accepted a store with
# numeric keys.
float32_store = {
"0": np.random.random((10, 16)).astype("float32"),
}
int8_store = {
"0": np.random.randint(-128, 127, size=(10, 16), dtype="int8"),
"1": np.random.random((10,)).astype("float32"),
}
int4_store = {
"0": np.random.randint(-128, 127, size=(10, 8), dtype="int8"),
"1": np.random.random((10,)).astype("float32"),
}
# Test float32 layer.
layer = layers.Embedding(10, 16)
layer.build()
layer.load_own_variables(float32_store)
self.assertAllClose(layer._embeddings, float32_store["0"])
# Test int8-quantized layer.
layer = layers.Embedding(10, 16, dtype="int8_from_float32")
layer.build()
layer.load_own_variables(int8_store)
self.assertAllClose(layer._embeddings, int8_store["0"])
self.assertAllClose(layer.embeddings_scale, int8_store["1"])
# Test int4-quantized layer.
layer = layers.Embedding(10, 16, dtype="int4_from_float32")
layer.build()
layer.load_own_variables(int4_store)
self.assertAllClose(layer._embeddings, int4_store["0"])
self.assertAllClose(layer.embeddings_scale, int4_store["1"])
| EmbeddingTest |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI019_0.py | {
"start": 4226,
"end": 4364
} | class ____:
def m[_T](self: _T) -> _T:
x = cast('list[_\x54]', self)
return x
| ButStrangeStringizedReferencesCannotBeFixed |
python | walkccc__LeetCode | solutions/718. Maximum Length of Repeated Subarray/718-2.py | {
"start": 0,
"end": 314
} | class ____:
def findLength(self, nums1: list[int], nums2: list[int]) -> int:
ans = 0
dp = [0] * (len(nums2) + 1)
for a in reversed(nums1):
for j, b in enumerate(nums2): # The order is important.
dp[j] = dp[j + 1] + 1 if a == b else 0
ans = max(ans, dp[j])
return ans
| Solution |
python | weaviate__weaviate-python-client | weaviate/users/async_.py | {
"start": 305,
"end": 400
} | class ____(_UsersOIDCExecutor[ConnectionAsync]):
pass
@executor.wrap("async")
| _UsersOIDCAsync |
python | Farama-Foundation__Gymnasium | gymnasium/envs/phys2d/cartpole.py | {
"start": 1233,
"end": 8130
} | class ____(
FuncEnv[StateType, jax.Array, int, float, bool, RenderStateType, CartPoleParams]
):
"""Cartpole but in jax and functional."""
observation_space = gym.spaces.Box(-np.inf, np.inf, shape=(4,), dtype=np.float32)
action_space = gym.spaces.Discrete(2)
def initial(
self, rng: PRNGKeyType, params: CartPoleParams = CartPoleParams
) -> StateType:
"""Initial state generation."""
return jax.random.uniform(
key=rng, minval=-params.x_init, maxval=params.x_init, shape=(4,)
)
def transition(
self,
state: StateType,
action: int | jax.Array,
rng: None = None,
params: CartPoleParams = CartPoleParams,
) -> StateType:
"""Cartpole transition."""
x, x_dot, theta, theta_dot = state
force = jnp.sign(action - 0.5) * params.force_mag
costheta = jnp.cos(theta)
sintheta = jnp.sin(theta)
# For the interested reader:
# https://coneural.org/florian/papers/05_cart_pole.pdf
temp = (
force + params.polemass_length * theta_dot**2 * sintheta
) / params.total_mass
thetaacc = (params.gravity * sintheta - costheta * temp) / (
params.length
* (4.0 / 3.0 - params.masspole * costheta**2 / params.total_mass)
)
xacc = temp - params.polemass_length * thetaacc * costheta / params.total_mass
x = x + params.tau * x_dot
x_dot = x_dot + params.tau * xacc
theta = theta + params.tau * theta_dot
theta_dot = theta_dot + params.tau * thetaacc
state = jnp.array((x, x_dot, theta, theta_dot), dtype=jnp.float32)
return state
def observation(
self, state: StateType, rng: Any, params: CartPoleParams = CartPoleParams
) -> jax.Array:
"""Cartpole observation."""
return state
def terminal(
self, state: StateType, rng: Any, params: CartPoleParams = CartPoleParams
) -> jax.Array:
"""Checks if the state is terminal."""
x, _, theta, _ = state
terminated = (
(x < -params.x_threshold)
| (x > params.x_threshold)
| (theta < -params.theta_threshold_radians)
| (theta > params.theta_threshold_radians)
)
return terminated
def reward(
self,
state: StateType,
action: ActType,
next_state: StateType,
rng: Any,
params: CartPoleParams = CartPoleParams,
) -> jax.Array:
"""Computes the reward for the state transition using the action."""
x, _, theta, _ = state
terminated = (
(x < -params.x_threshold)
| (x > params.x_threshold)
| (theta < -params.theta_threshold_radians)
| (theta > params.theta_threshold_radians)
)
reward = jax.lax.cond(
params.sutton_barto_reward,
lambda: jax.lax.cond(terminated, lambda: -1.0, lambda: 0.0),
lambda: 1.0,
)
return reward
def render_image(
self,
state: StateType,
render_state: RenderStateType,
params: CartPoleParams = CartPoleParams,
) -> tuple[RenderStateType, np.ndarray]:
"""Renders an image of the state using the render state."""
try:
import pygame
from pygame import gfxdraw
except ImportError as e:
raise DependencyNotInstalled(
'pygame is not installed, run `pip install "gymnasium[classic_control]"`'
) from e
screen, clock = render_state
world_width = params.x_threshold * 2
scale = params.screen_width / world_width
polewidth = 10.0
polelen = scale * (2 * params.length)
cartwidth = 50.0
cartheight = 30.0
x = state
surf = pygame.Surface((params.screen_width, params.screen_height))
surf.fill((255, 255, 255))
l, r, t, b = -cartwidth / 2, cartwidth / 2, cartheight / 2, -cartheight / 2
axleoffset = cartheight / 4.0
cartx = x[0] * scale + params.screen_width / 2.0 # MIDDLE OF CART
carty = 100 # TOP OF CART
cart_coords = [(l, b), (l, t), (r, t), (r, b)]
cart_coords = [(c[0] + cartx, c[1] + carty) for c in cart_coords]
gfxdraw.aapolygon(surf, cart_coords, (0, 0, 0))
gfxdraw.filled_polygon(surf, cart_coords, (0, 0, 0))
l, r, t, b = (
-polewidth / 2,
polewidth / 2,
polelen - polewidth / 2,
-polewidth / 2,
)
pole_coords = []
for coord in [(l, b), (l, t), (r, t), (r, b)]:
coord = pygame.math.Vector2(coord).rotate_rad(-x[2])
coord = (coord[0] + cartx, coord[1] + carty + axleoffset)
pole_coords.append(coord)
gfxdraw.aapolygon(surf, pole_coords, (202, 152, 101))
gfxdraw.filled_polygon(surf, pole_coords, (202, 152, 101))
gfxdraw.aacircle(
surf,
int(cartx),
int(carty + axleoffset),
int(polewidth / 2),
(129, 132, 203),
)
gfxdraw.filled_circle(
surf,
int(cartx),
int(carty + axleoffset),
int(polewidth / 2),
(129, 132, 203),
)
gfxdraw.hline(surf, 0, params.screen_width, carty, (0, 0, 0))
surf = pygame.transform.flip(surf, False, True)
screen.blit(surf, (0, 0))
return (screen, clock), np.transpose(
np.array(pygame.surfarray.pixels3d(screen)), axes=(1, 0, 2)
)
def render_init(
self,
params: CartPoleParams = CartPoleParams,
screen_width: int = 600,
screen_height: int = 400,
) -> RenderStateType:
"""Initialises the render state for a screen width and height."""
try:
import pygame
except ImportError as e:
raise DependencyNotInstalled(
'pygame is not installed, run `pip install "gymnasium[classic_control]"`'
) from e
pygame.init()
screen = pygame.Surface((screen_width, screen_height))
clock = pygame.time.Clock()
return screen, clock
def render_close(
self, render_state: RenderStateType, params: CartPoleParams = CartPoleParams
) -> None:
"""Closes the render state."""
try:
import pygame
except ImportError as e:
raise DependencyNotInstalled(
'pygame is not installed, run `pip install "gymnasium[classic_control]"`'
) from e
pygame.display.quit()
pygame.quit()
def get_default_params(self, **kwargs) -> CartPoleParams:
"""Returns the default parameters for the environment."""
return CartPoleParams(**kwargs)
| CartPoleFunctional |
python | Netflix__metaflow | test/core/metaflow_test/__init__.py | {
"start": 2290,
"end": 3870
} | class ____(MetaflowException):
headline = "Testing retry"
def __init__(self):
super(TestRetry, self).__init__("This is not an error. " "Testing retry...")
def get_card_container(id=None):
"""
Safetly try to load the card_container object.
"""
try:
return get_cards(current.pathspec, id=id)
except CardNotPresentException:
return None
def is_resumed():
return current.origin_run_id is not None
def origin_run_id_for_resume():
return current.origin_run_id
def assert_equals(expected, got):
if expected != got:
raise ExpectationFailed(expected, got)
def assert_equals_metadata(expected, got, exclude_keys=None):
# Check if the keys match
exclude_keys = set(exclude_keys if exclude_keys is not None else [])
k1_set = set(expected.keys()).difference(exclude_keys)
k2_set = set(got.keys()).difference(exclude_keys)
sym_diff = k1_set.symmetric_difference(k2_set)
if len(sym_diff) > 0:
raise ExpectationFailed("keys: %s" % str(k1_set), "keys: %s" % str(k2_set))
# At this point, we compare the metadata values, types and dates.
for k in k1_set:
if expected[k] != got[k]:
raise ExpectationFailed(
"[%s]: %s" % (k, str(expected[k])), "[%s]: %s" % (k, str(got[k]))
)
def assert_exception(func, exception):
try:
func()
except exception:
return
except Exception as ex:
raise ExpectationFailed(exception, ex)
else:
raise ExpectationFailed(exception, "no exception")
| TestRetry |
python | xlwings__xlwings | xlwings/_xlmac.py | {
"start": 50049,
"end": 53483
} | class ____(base_classes.Chart):
def __init__(self, parent, key):
self._parent = parent
if isinstance(parent, Sheet):
self.xl_obj = parent.xl.chart_objects[key]
self.xl = self.xl_obj.chart
else:
self.xl_obj = None
self.xl = self.charts[key]
@property
def parent(self):
return self._parent
@property
def api(self):
return self.xl_obj, self.xl
def set_source_data(self, rng):
self.xl.set_source_data(source=rng.xl)
@property
def name(self):
if self.xl_obj is not None:
return self.xl_obj.name.get()
else:
return self.xl.name.get()
@name.setter
def name(self, value):
if self.xl_obj is not None:
self.xl_obj.name.set(value)
else:
self.xl.name.get(value)
@property
def chart_type(self):
return chart_types_k2s[self.xl.chart_type.get()]
@chart_type.setter
def chart_type(self, value):
self.xl.chart_type.set(chart_types_s2k[value])
@property
def left(self):
if self.xl_obj is None:
raise Exception("This chart is not embedded.")
return self.xl_obj.left_position.get()
@left.setter
def left(self, value):
if self.xl_obj is None:
raise Exception("This chart is not embedded.")
self.xl_obj.left_position.set(value)
@property
def top(self):
if self.xl_obj is None:
raise Exception("This chart is not embedded.")
return self.xl_obj.top.get()
@top.setter
def top(self, value):
if self.xl_obj is None:
raise Exception("This chart is not embedded.")
self.xl_obj.top.set(value)
@property
def width(self):
if self.xl_obj is None:
raise Exception("This chart is not embedded.")
return self.xl_obj.width.get()
@width.setter
def width(self, value):
if self.xl_obj is None:
raise Exception("This chart is not embedded.")
self.xl_obj.width.set(value)
@property
def height(self):
if self.xl_obj is None:
raise Exception("This chart is not embedded.")
return self.xl_obj.height.get()
@height.setter
def height(self, value):
if self.xl_obj is None:
raise Exception("This chart is not embedded.")
self.xl_obj.height.set(value)
def delete(self):
self.xl_obj.delete()
def to_png(self, path):
raise xlwings.XlwingsError("Chart.to_png() isn't supported on macOS.")
# Both versions should work, but seem to be broken with Excel 2016
#
# Version 1
# import uuid
# temp_path = posix_to_hfs_path(os.path.expanduser("~")
# + f"/Library/Containers/com.microsoft.Excel/"
# f"Data/{uuid.uuid4()}.png")
# self.xl.save_as(filename=temp_path)
# shutil.copy2(temp_path, path)
# try:
# os.unlink(temp_path)
# except:
# pass
#
# Version 2
# self.xl_obj.save_as_picture(file_name=posix_to_hfs_path('...'),
# picture_type=kw.save_as_PNG_file)
def to_pdf(self, path, quality=None):
raise xlwings.XlwingsError("Chart.to_pdf() isn't supported on macOS.")
| Chart |
python | davidhalter__jedi | jedi/inference/value/function.py | {
"start": 6257,
"end": 6961
} | class ____(FunctionValue):
def __init__(self, inference_state, class_context, *args, **kwargs):
super().__init__(inference_state, *args, **kwargs)
self.class_context = class_context
def get_default_param_context(self):
return self.class_context
def get_qualified_names(self):
# Need to implement this, because the parent value of a method
# value is not the class value but the module.
names = self.class_context.get_qualified_names()
if names is None:
return None
return names + (self.py__name__(),)
@property
def name(self):
return FunctionNameInClass(self.class_context, super().name)
| MethodValue |
python | huggingface__transformers | src/transformers/models/zamba2/modeling_zamba2.py | {
"start": 61249,
"end": 70704
} | class ____(Zamba2PreTrainedModel):
"""
Model consisting of *config.num_hidden_layers* layers.
Args:
config: Zamba2Config
"""
def __init__(self, config: Zamba2Config):
super().__init__(config)
self.config = config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
blocks = [Zamba2AttentionDecoderLayer(config, block_id=k) for k in range(config.num_mem_blocks)]
mamba_layers = []
linear_layers = []
self.layers_block_type = config.layers_block_type
for i in range(config.num_hidden_layers):
if config.layers_block_type[i] == "mamba":
mamba_layers.append(Zamba2MambaDecoderLayer(config, layer_idx=i))
elif config.layers_block_type[i] == "hybrid":
linear_layers.append(nn.Linear(self.config.hidden_size, self.config.hidden_size, bias=False))
mamba_layers.append(Zamba2MambaDecoderLayer(config, layer_idx=i))
mamba_layers = iter(mamba_layers)
linear_layers = iter(linear_layers)
blocks = cycle(blocks)
layers = self.get_layers(blocks, linear_layers, mamba_layers)
self.layers = nn.ModuleList(layers)
self._attn_implementation = config._attn_implementation
self.final_layernorm = Zamba2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
if config.use_mem_rope:
if config.use_long_context:
logger.warning_once(
"`use_long_context` set to `True`: using rescaled `rope_theta` and extended `max_position_embeddings`."
)
self.rotary_emb = Zamba2RotaryEmbedding(config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Zamba2HybridDynamicCache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[tuple, BaseModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
)
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
)
use_cache = False
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
hidden_states = inputs_embeds
original_hidden_states = torch.clone(inputs_embeds)
# original_hidden_states: word embedding output that will be concatenated with hidden activations to form the input of the shared transformer layer
if use_cache and past_key_values is None:
batch_size = input_ids.shape[0] if input_ids is not None else inputs_embeds.shape[0]
past_key_values = Zamba2HybridDynamicCache(self.config, batch_size, dtype=self.dtype, device=self.device)
if cache_position is None:
past_seen_tokens = (
past_key_values.get_seq_length(layer_idx=self.first_transformer_layer_id)
if past_key_values is not None
else 0
)
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position)
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for layer_idx, layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = layer(
hidden_states,
original_hidden_states,
layer_idx,
attention_mask,
causal_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
position_embeddings=position_embeddings,
position_ids=position_ids,
)
hidden_states = layer_outputs[0]
if output_attentions:
if layer_outputs[1] is not None:
# append attentions only of attention layers. Mamba layers return `None` as the attention weights
all_self_attns += (layer_outputs[1],)
hidden_states = self.final_layernorm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
if past_key_values is not None and not past_key_values.has_previous_state:
past_key_values.has_previous_state = True
output = BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
return output if return_dict else output.to_tuple()
def _update_causal_mask(self, attention_mask, input_tensor, cache_position):
if self.config._attn_implementation == "flash_attention_2":
if attention_mask is not None and 0.0 in attention_mask:
return attention_mask
return None
dtype, device = input_tensor.dtype, input_tensor.device
min_dtype = torch.finfo(dtype).min
sequence_length = input_tensor.shape[1]
if cache_position is None:
target_length = sequence_length
else:
target_length = cache_position[-1] + 1
causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
if cache_position is not None:
causal_mask *= (torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)).to(torch.bool)
causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
if attention_mask.dim() == 2:
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)
causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype)
if (
self.config._attn_implementation == "sdpa"
and attention_mask is not None
and attention_mask.device.type in ["cuda", "xpu", "npu"]
):
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
# Details: https://github.com/pytorch/pytorch/issues/110213
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
def get_layers(self, blocks, linear_layers, mamba_layers):
layers = []
self._tied_weights_keys = {}
self.first_transformer_layer_id = 0
for layer_id, layer_type in enumerate(self.layers_block_type):
if layer_type == "hybrid":
block = next(blocks)
if self.config.num_mem_blocks * len(self.config.hybrid_layer_ids) > 1:
prefix_pattern = f"layers.{layer_id}.shared_transformer"
self._tied_weights_keys.update({prefix_pattern: "layers.0.shared_transformer"})
layers.append(Zamba2HybridLayer(block, next(linear_layers), next(mamba_layers)))
else:
layers.append(next(mamba_layers))
return layers
# Adapted from transformers.models.jamba.modeling_jamba.JambaForCausalLM with Jamba->Zamba2, JAMBA->ZAMBA2
| Zamba2Model |
python | apache__airflow | scripts/in_container/verify_providers.py | {
"start": 2082,
"end": 32296
} | class ____(NamedTuple):
all_entities: set[str]
wrong_entities: list[tuple[type, str]]
ENTITY_NAMES = {
EntityType.Operators: "Operators",
EntityType.Transfers: "Transfer Operators",
EntityType.Sensors: "Sensors",
EntityType.Hooks: "Hooks",
EntityType.Secrets: "Secrets",
EntityType.Trigger: "Trigger",
EntityType.Notification: "Notification",
}
TOTALS: dict[EntityType, int] = {
EntityType.Operators: 0,
EntityType.Hooks: 0,
EntityType.Sensors: 0,
EntityType.Transfers: 0,
EntityType.Secrets: 0,
EntityType.Trigger: 0,
EntityType.Notification: 0,
}
OPERATORS_PATTERN = r".*Operator$"
SENSORS_PATTERN = r".*Sensor$"
HOOKS_PATTERN = r".*Hook$"
SECRETS_PATTERN = r".*Backend$"
TRANSFERS_PATTERN = r".*To[A-Z0-9].*Operator$"
WRONG_TRANSFERS_PATTERN = r".*Transfer$|.*TransferOperator$"
TRIGGER_PATTERN = r".*Trigger$"
NOTIFICATION_PATTERN = r".*Notifier|.*send_.*_notification$"
ALL_PATTERNS = {
OPERATORS_PATTERN,
SENSORS_PATTERN,
HOOKS_PATTERN,
SECRETS_PATTERN,
TRANSFERS_PATTERN,
WRONG_TRANSFERS_PATTERN,
TRIGGER_PATTERN,
NOTIFICATION_PATTERN,
}
EXPECTED_SUFFIXES: dict[EntityType, str] = {
EntityType.Operators: "Operator",
EntityType.Hooks: "Hook",
EntityType.Sensors: "Sensor",
EntityType.Secrets: "Backend",
EntityType.Transfers: "Operator",
EntityType.Trigger: "Trigger",
EntityType.Notification: "Notifier",
}
def get_all_providers() -> list[str]:
return list(ALL_DEPENDENCIES.keys())
def import_all_classes(
walkable_paths_and_prefixes: dict[str, str],
prefix: str,
provider_ids: list[str] | None = None,
print_imports: bool = False,
print_skips: bool = False,
) -> tuple[list[str], list[str]]:
"""Imports all classes in providers packages.
This method loads and imports all the classes found in providers, so that we
can find all the subclasses of operators/sensors etc.
:param walkable_paths_and_prefixes: dict of paths with accompanying prefixes
to look the provider distributions in
:param prefix: prefix to add
:param provider_ids - provider ids that should be loaded.
:param print_imports - if imported class should also be printed in output
:param print_skips - if skipped classes should also be printed in output
:return: tuple of list of all imported classes and
"""
console.print()
console.print(f"Walking all package with prefixes in {walkable_paths_and_prefixes}")
console.print()
imported_classes = []
classes_with_potential_circular_import = []
tracebacks: list[tuple[str, str]] = []
printed_packages: set[str] = set()
def mk_prefix(provider_id):
return f"{prefix}{provider_id}"
if provider_ids:
provider_prefixes = tuple(mk_prefix(provider_id) for provider_id in provider_ids)
else:
provider_prefixes = (prefix,)
def onerror(_):
nonlocal tracebacks
exception_string = traceback.format_exc()
for provider_prefix in provider_prefixes:
if provider_prefix in exception_string:
start_index = exception_string.find(provider_prefix)
end_index = exception_string.find("\n", start_index + len(provider_prefix))
package = exception_string[start_index:end_index]
tracebacks.append((package, exception_string))
break
for path, prefix in walkable_paths_and_prefixes.items():
for modinfo in pkgutil.walk_packages(path=[path], prefix=prefix, onerror=onerror):
if not modinfo.name.startswith(provider_prefixes):
if print_skips:
console.print(f"Skipping module: {modinfo.name}")
continue
if print_imports:
package_to_print = modinfo.name.rpartition(".")[0]
if package_to_print not in printed_packages:
printed_packages.add(package_to_print)
console.print(f"Importing package: {package_to_print}")
try:
with warnings.catch_warnings(record=True):
warnings.filterwarnings("always", category=DeprecationWarning)
_module = importlib.import_module(modinfo.name)
for attribute_name in dir(_module):
class_name = modinfo.name + "." + attribute_name
attribute = getattr(_module, attribute_name)
# Skip
# - parameterized generics like Sequence[tuple[str, str]]
# - builtins like str, int, etc.
# - non-class objects
if (
isinstance(attribute, GenericAlias)
or (hasattr(attribute, "__module__") and attribute.__module__ == "builtins")
or not isclass(attribute)
):
continue
imported_classes.append(class_name)
# Handle circular imports for specific subclasses
if issubclass(attribute, logging.Handler) or (
isabstract(attribute) and issubclass(attribute, BaseSecretsBackend)
):
classes_with_potential_circular_import.append(class_name)
except AirflowOptionalProviderFeatureException:
# We ignore optional features
...
except Exception as e:
# skip the check as we are temporary vendoring in the google ads client with wrong package
# skip alembic.context which is only available when alembic command is executed from a folder
# containing the alembic.ini file
if "No module named 'google.ads.googleads.v12'" not in str(
e
) and "module 'alembic.context' has no attribute 'config'" not in str(e):
exception_str = traceback.format_exc()
tracebacks.append((modinfo.name, exception_str))
if tracebacks:
if IS_AIRFLOW_VERSION_PROVIDED:
console.print(
f"""
[red]ERROR: There were some import errors[/]
[yellow]Detected that this job is about installing providers in {USE_AIRFLOW_VERSION}[/],
[yellow]most likely you are using features that are not available in Airflow {USE_AIRFLOW_VERSION}[/]
[yellow]and you must implement them in backwards-compatible way![/]
""",
)
console.print("[red]----------------------------------------[/]")
for package, trace in tracebacks:
console.print(f"Exception when importing: {package}\n\n")
console.print(trace)
console.print("[red]----------------------------------------[/]")
sys.exit(1)
else:
return imported_classes, classes_with_potential_circular_import
def is_imported_from_same_module(the_class: str, imported_name: str) -> bool:
"""Is the class imported from another module?
:param the_class: the class object itself
:param imported_name: name of the imported class
:return: true if the class was imported from another module
"""
return imported_name.rpartition(":")[0] == the_class.__module__
def is_example_dag(imported_name: str) -> bool:
"""Is the class an example_dag class?
:param imported_name: name where the class is imported from
:return: true if it is an example_dags class
"""
return ".example_dags." in imported_name
def is_from_the_expected_base_package(the_class: type, expected_package: str) -> bool:
"""Returns true if the class is from the package expected.
:param the_class: the class object
:param expected_package: package expected for the class
"""
return the_class.__module__.startswith(expected_package)
def inherits_from(the_class: type, expected_ancestor: type | None = None) -> bool:
"""Returns true if the class inherits (directly or indirectly) from the class specified.
:param the_class: The class to check
:param expected_ancestor: expected class to inherit from
:return: true is the class inherits from the class expected
"""
if expected_ancestor is None:
return False
import inspect
mro = inspect.getmro(the_class)
return the_class is not expected_ancestor and expected_ancestor in mro
def is_class(the_class: type) -> bool:
"""Returns true if the object passed is a class.
:param the_class: the class to pass
:return: true if it is a class
"""
import inspect
return inspect.isclass(the_class)
def package_name_matches(the_class: type, expected_pattern: str | None = None) -> bool:
"""In case expected_pattern is set, it checks if the package name matches the pattern.
:param the_class: imported class
:param expected_pattern: the pattern that should match the package
:return: true if the expected_pattern is None or the pattern matches the package
"""
return expected_pattern is None or re.match(expected_pattern, the_class.__module__) is not None
def convert_classes_to_table(entity_type: EntityType, entities: list[str], full_package_name: str) -> str:
"""Converts new entities to a Markdown table.
:param entity_type: entity type to convert to markup
:param entities: list of entities
:param full_package_name: name of the provider package
:return: table of new classes
"""
from tabulate import tabulate
headers = [f"New Airflow 2.0 {entity_type.value.lower()}: `{full_package_name}` package"]
table = [(get_class_code_link(full_package_name, class_name, "main"),) for class_name in entities]
return tabulate(table, headers=headers, tablefmt="pipe")
def get_details_about_classes(
entity_type: EntityType,
entities: set[str],
wrong_entities: list[tuple[type, str]],
full_package_name: str,
) -> EntityTypeSummary:
"""Get details about entities.
:param entity_type: type of entity (Operators, Hooks etc.)
:param entities: set of entities found
:param wrong_entities: wrong entities found for that type
:param full_package_name: full package name
"""
all_entities = sorted(entities)
TOTALS[entity_type] += len(all_entities)
return EntityTypeSummary(
entities=all_entities,
new_entities_table=convert_classes_to_table(
entity_type=entity_type,
entities=all_entities,
full_package_name=full_package_name,
),
wrong_entities=wrong_entities,
)
def strip_package_from_class(base_package: str, class_name: str) -> str:
"""Strips base package name from the class (if it starts with the package name)."""
if class_name.startswith(base_package):
return class_name[len(base_package) + 1 :]
return class_name
def convert_class_name_to_url(base_url: str, class_name) -> str:
"""Converts the class name to URL that the class can be reached.
:param base_url: base URL to use
:param class_name: name of the class
:return: URL to the class
"""
return base_url + class_name.rpartition(".")[0].replace(".", "/") + ".py"
def get_class_code_link(base_package: str, class_name: str, git_tag: str) -> str:
"""Provides a Markdown link for the class passed as parameter.
:param base_package: base package to strip from most names
:param class_name: name of the class
:param git_tag: tag to use for the URL link
:return: URL to the class
"""
url_prefix = f"https://github.com/apache/airflow/blob/{git_tag}/"
return (
f"[{strip_package_from_class(base_package, class_name)}]"
f"({convert_class_name_to_url(url_prefix, class_name)})"
)
def print_wrong_naming(entity_type: EntityType, wrong_classes: list[tuple[type, str]]):
"""Prints wrong entities of a given entity type if there are any.
:param entity_type: type of the class to print
:param wrong_classes: list of wrong entities
"""
if wrong_classes:
console.print(f"\n[red]There are wrongly named entities of type {entity_type}:[/]\n")
for wrong_entity_type, message in wrong_classes:
console.print(f"{wrong_entity_type}: {message}")
def find_all_entities(
imported_classes: list[str],
base_package: str,
ancestor_match: type,
sub_package_pattern_match: str,
expected_class_name_pattern: str,
unexpected_class_name_patterns: set[str],
exclude_class_type: type | None = None,
false_positive_class_names: set[str] | None = None,
) -> VerifiedEntities:
"""Returns set of entities containing all subclasses in package specified.
:param imported_classes: entities imported from providers
:param base_package: base package name where to start looking for the entities
:param sub_package_pattern_match: this string is expected to appear in the sub-package name
:param ancestor_match: type of the object the method looks for
:param expected_class_name_pattern: regexp of class name pattern to expect
:param unexpected_class_name_patterns: set of regexp of class name pattern that are not expected
:param exclude_class_type: exclude class of this type (Sensor are also Operators, so
they should be excluded from the list)
:param false_positive_class_names: set of class names that are wrongly recognised as badly named
"""
found_entities: set[str] = set()
wrong_entities: list[tuple[type, str]] = []
for imported_name in imported_classes:
module, class_name = imported_name.rsplit(".", maxsplit=1)
the_class = getattr(importlib.import_module(module), class_name)
if (
is_class(the_class=the_class)
and not is_example_dag(imported_name=imported_name)
and is_from_the_expected_base_package(the_class=the_class, expected_package=base_package)
and is_imported_from_same_module(the_class=the_class, imported_name=imported_name)
and inherits_from(the_class=the_class, expected_ancestor=ancestor_match)
and not inherits_from(the_class=the_class, expected_ancestor=exclude_class_type)
and package_name_matches(the_class=the_class, expected_pattern=sub_package_pattern_match)
):
if not false_positive_class_names or class_name not in false_positive_class_names:
if not re.match(expected_class_name_pattern, class_name):
wrong_entities.append(
(
the_class,
f"The class name {class_name} is wrong. "
f"It should match {expected_class_name_pattern}",
)
)
continue
if unexpected_class_name_patterns:
for unexpected_class_name_pattern in unexpected_class_name_patterns:
if re.match(unexpected_class_name_pattern, class_name):
wrong_entities.append(
(
the_class,
f"The class name {class_name} is wrong. "
f"It should not match {unexpected_class_name_pattern}",
)
)
found_entities.add(imported_name)
return VerifiedEntities(all_entities=found_entities, wrong_entities=wrong_entities)
def get_package_class_summary(
full_package_name: str, imported_classes: list[str]
) -> dict[EntityType, EntityTypeSummary]:
"""Gets summary of the package in the form of dictionary containing all types of entities.
:param full_package_name: full package name
:param imported_classes: entities imported_from providers
:return: dictionary of objects usable as context for JINJA2 templates, or
None if there are some errors
"""
from airflow.models.baseoperator import BaseOperator
from airflow.sdk import BaseHook
from airflow.secrets import BaseSecretsBackend
from airflow.sensors.base import BaseSensorOperator
from airflow.triggers.base import BaseTrigger
# Remove this conditional check after providers are 2.6+ compatible
try:
from airflow.providers.common.compat.notifier import BaseNotifier
has_notifier = True
except ImportError:
has_notifier = False
all_verified_entities: dict[EntityType, VerifiedEntities] = {
EntityType.Operators: find_all_entities(
imported_classes=imported_classes,
base_package=full_package_name,
sub_package_pattern_match=r".*\.operators\..*",
ancestor_match=BaseOperator,
expected_class_name_pattern=OPERATORS_PATTERN,
unexpected_class_name_patterns=ALL_PATTERNS - {OPERATORS_PATTERN},
exclude_class_type=BaseSensorOperator,
false_positive_class_names={
"ProduceToTopicOperator",
"CloudVisionAddProductToProductSetOperator",
"CloudDataTransferServiceGCSToGCSOperator",
"CloudDataTransferServiceS3ToGCSOperator",
"BigQueryCreateDataTransferOperator",
"CloudTextToSpeechSynthesizeOperator",
"CloudSpeechToTextRecognizeSpeechOperator",
},
),
EntityType.Sensors: find_all_entities(
imported_classes=imported_classes,
base_package=full_package_name,
sub_package_pattern_match=r".*\.sensors\..*",
ancestor_match=BaseSensorOperator,
expected_class_name_pattern=SENSORS_PATTERN,
unexpected_class_name_patterns=ALL_PATTERNS - {OPERATORS_PATTERN, SENSORS_PATTERN},
),
EntityType.Hooks: find_all_entities(
imported_classes=imported_classes,
base_package=full_package_name,
sub_package_pattern_match=r".*\.hooks\..*",
ancestor_match=BaseHook,
expected_class_name_pattern=HOOKS_PATTERN,
unexpected_class_name_patterns=ALL_PATTERNS - {HOOKS_PATTERN},
),
EntityType.Secrets: find_all_entities(
imported_classes=imported_classes,
sub_package_pattern_match=r".*\.secrets\..*",
base_package=full_package_name,
ancestor_match=BaseSecretsBackend,
expected_class_name_pattern=SECRETS_PATTERN,
unexpected_class_name_patterns=ALL_PATTERNS - {SECRETS_PATTERN},
),
EntityType.Transfers: find_all_entities(
imported_classes=imported_classes,
base_package=full_package_name,
sub_package_pattern_match=r".*\.transfers\..*",
ancestor_match=BaseOperator,
expected_class_name_pattern=TRANSFERS_PATTERN,
unexpected_class_name_patterns=ALL_PATTERNS - {OPERATORS_PATTERN, TRANSFERS_PATTERN},
),
EntityType.Trigger: find_all_entities(
imported_classes=imported_classes,
base_package=full_package_name,
sub_package_pattern_match=r".*\.triggers\..*",
ancestor_match=BaseTrigger,
expected_class_name_pattern=TRIGGER_PATTERN,
unexpected_class_name_patterns=ALL_PATTERNS - {TRIGGER_PATTERN},
),
}
if has_notifier:
all_verified_entities[EntityType.Notification] = find_all_entities(
imported_classes=imported_classes,
base_package=full_package_name,
sub_package_pattern_match=r".*\.notifications\..*",
ancestor_match=BaseNotifier,
expected_class_name_pattern=NOTIFICATION_PATTERN,
unexpected_class_name_patterns=ALL_PATTERNS - {NOTIFICATION_PATTERN},
)
else:
all_verified_entities[EntityType.Notification] = VerifiedEntities(
all_entities=set(), wrong_entities=[]
)
for entity in EntityType:
print_wrong_naming(entity, all_verified_entities[entity].wrong_entities)
entities_summary: dict[EntityType, EntityTypeSummary] = {}
for entity_type in EntityType:
entities_summary[entity_type] = get_details_about_classes(
entity_type,
all_verified_entities[entity_type].all_entities,
all_verified_entities[entity_type].wrong_entities,
full_package_name,
)
return entities_summary
def is_camel_case_with_acronyms(s: str):
"""Checks if the string passed is Camel Case (with capitalised acronyms allowed).
:param s: string to check
:return: true if the name looks cool as Class name.
"""
if s and s[0] == "_": # Leading underscores are fine.
s = s[1:]
if not s:
return True
return s[0].isupper() and not (s.islower() or s.isupper() or "_" in s)
def check_if_classes_are_properly_named(
entity_summary: dict[EntityType, EntityTypeSummary],
) -> tuple[int, int]:
"""Check if all entities in the dictionary are named properly.
It prints names at the output and returns the status of class names.
:param entity_summary: dictionary of class names to check, grouped by types.
:return: Tuple of 2 ints = total number of entities and number of badly named entities
"""
total_class_number = 0
badly_named_class_number = 0
for entity_type, class_suffix in EXPECTED_SUFFIXES.items():
for class_full_name in entity_summary[entity_type].entities:
_, class_name = class_full_name.rsplit(".", maxsplit=1)
error_encountered = False
if (
class_name.startswith("send_")
and class_name.endswith("_notification")
and entity_type == EntityType.Notification
):
continue
if not is_camel_case_with_acronyms(class_name):
console.print(
f"[red]The class {class_full_name} is wrongly named. The "
f"class name should be CamelCaseWithACRONYMS optionally "
f"with a single leading underscore[/]"
)
error_encountered = True
if not class_name.endswith(class_suffix):
console.print(
f"[red]The class {class_full_name} is wrongly named. It is one of the {entity_type.value}"
f" so it should end with {class_suffix}[/]"
)
error_encountered = True
total_class_number += 1
if error_encountered:
badly_named_class_number += 1
return total_class_number, badly_named_class_number
def verify_provider_classes_for_single_provider(imported_classes: list[str], provider_id: str):
"""Verify naming of provider classes for single provider."""
full_package_name = f"airflow.providers.{provider_id}"
entity_summaries = get_package_class_summary(full_package_name, imported_classes)
total, bad = check_if_classes_are_properly_named(entity_summaries)
bad += sum(len(entity_summary.wrong_entities) for entity_summary in entity_summaries.values())
if bad != 0:
console.print()
console.print(f"[red]There are {bad} errors of {total} entities for {provider_id}[/]")
console.print()
return total, bad
def summarise_total_vs_bad(total: int, bad: int) -> bool:
"""Summarises Bad/Good class names for providers"""
if bad == 0:
console.print()
console.print(f"[green]OK: All {total} entities are properly named[/]")
console.print()
console.print("Totals:")
console.print()
for entity in EntityType:
console.print(f"{entity.value}: {TOTALS[entity]}")
console.print()
else:
console.print()
if os.environ.get("CI") != "":
console.print("::endgroup::")
console.print(
f"[red]ERROR! There are in total: {bad} entities badly named out of {total} entities[/]"
)
console.print()
console.print("[red]Please fix the problems listed above [/]")
return False
return True
def get_providers_paths() -> list[str]:
import airflow.providers
paths = []
# as well as those installed via packages
paths.extend(airflow.providers.__path__) # type: ignore[attr-defined]
return paths
def add_all_namespaced_packages(
walkable_paths_and_prefixes: dict[str, str], provider_path: str, provider_prefix: str
):
"""Find namespace packages.
This needs to be done manually as ``walk_packages`` does not support
namespaced packages and PEP 420.
:param walkable_paths_and_prefixes: pats
:param provider_path:
:param provider_prefix:
"""
main_path = Path(provider_path).resolve()
for candidate_path in main_path.rglob("*"):
if candidate_path.name == "__pycache__":
continue
if candidate_path.is_dir() and not (candidate_path / "__init__.py").exists():
subpackage = str(candidate_path.relative_to(main_path)).replace(os.sep, ".")
walkable_paths_and_prefixes[str(candidate_path)] = provider_prefix + subpackage + "."
def verify_provider_classes() -> tuple[list[str], list[str]]:
"""Verifies all provider classes.
:return: Tuple: list of all classes and list of all classes that have potential recursion side effects
"""
provider_ids = get_all_providers()
walkable_paths_and_prefixes: dict[str, str] = {}
provider_prefix = "airflow.providers."
for provider_path in get_providers_paths():
walkable_paths_and_prefixes[provider_path] = provider_prefix
add_all_namespaced_packages(walkable_paths_and_prefixes, provider_path, provider_prefix)
imported_classes, classes_with_potential_circular_import = import_all_classes(
walkable_paths_and_prefixes=walkable_paths_and_prefixes,
provider_ids=provider_ids,
print_imports=True,
prefix="airflow.providers.",
)
total = 0
bad = 0
for provider_id in provider_ids:
inc_total, inc_bad = verify_provider_classes_for_single_provider(imported_classes, provider_id)
total += inc_total
bad += inc_bad
if not summarise_total_vs_bad(total, bad):
sys.exit(1)
if not imported_classes:
console.print("[red]Something is seriously wrong - no classes imported[/]")
sys.exit(1)
console.print()
console.print("[green]SUCCESS: All provider distributions are importable![/]\n")
console.print(f"Imported {len(imported_classes)} classes.")
console.print()
return imported_classes, classes_with_potential_circular_import
def run_provider_discovery():
console.print("[bright_blue]List all providers[/]\n")
subprocess.run(["airflow", "providers", "list"], check=True)
console.print("[bright_blue]List all hooks[/]\n")
subprocess.run(["airflow", "providers", "hooks"], check=True)
console.print("[bright_blue]List all behaviours[/]\n")
subprocess.run(["airflow", "providers", "behaviours"], check=True)
console.print("[bright_blue]List all widgets[/]\n")
subprocess.run(["airflow", "providers", "widgets"], check=True)
console.print("[bright_blue]List all extra links[/]\n")
subprocess.run(["airflow", "providers", "links"], check=True)
console.print("[bright_blue]List all logging[/]\n")
subprocess.run(["airflow", "providers", "logging"], check=True)
console.print("[bright_blue]List all secrets[/]\n")
subprocess.run(["airflow", "providers", "secrets"], check=True)
console.print("[bright_blue]List all triggers[/]\n")
subprocess.run(["airflow", "providers", "triggers"], check=True)
console.print("[bright_blue]List all executors[/]\n")
subprocess.run(["airflow", "providers", "executors"], check=True)
AIRFLOW_LOCAL_SETTINGS_PATH = Path("/opt/airflow") / "airflow_local_settings.py"
if __name__ == "__main__":
sys.path.insert(0, str(AIRFLOW_ROOT_PATH))
all_imported_classes, all_classes_with_potential_for_circular_import = verify_provider_classes()
try:
AIRFLOW_LOCAL_SETTINGS_PATH.write_text(
"\n".join(
[
"from {} import {}".format(*class_name.rsplit(".", 1))
for class_name in all_classes_with_potential_for_circular_import
]
)
)
console.print(
"[bright_blue]Importing all provider classes with potential for circular imports"
" via airflow_local_settings.py:\n\n"
)
console.print(AIRFLOW_LOCAL_SETTINGS_PATH.read_text())
console.print("\n")
proc = subprocess.run([sys.executable, "-c", "import airflow"], check=False)
if proc.returncode != 0:
console.print(
"[red] Importing all provider classes with potential for recursion "
"via airflow_local_settings.py failed!\n\n"
)
console.print(
"\n[bright_blue]If you see AttributeError or ImportError, it might mean that there is "
"a circular import from a provider that should be solved\n"
)
console.print(
"\nThe reason for the circular imports might be that if Airflow is configured "
"to use some of the provider's logging/secret backends in settings\n"
"the extensions might attempt to import airflow configuration, "
"version or settings packages.\n"
"Accessing those packages will trigger attribute/import errors, because "
"they are not fully imported at this time.\n"
)
console.print(
"\n[info]Look at the stack trace above and see where `airflow` core classes have failed to be"
"imported from and fix it so that the class does not do it.\n"
)
sys.exit(proc.returncode)
finally:
AIRFLOW_LOCAL_SETTINGS_PATH.unlink()
run_provider_discovery()
| VerifiedEntities |
python | networkx__networkx | networkx/classes/tests/test_reportviews.py | {
"start": 20888,
"end": 21651
} | class ____(TestEdgeView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, nx.DiGraph())
cls.eview = nx.reportviews.InEdgeView
def test_repr(self):
ev = self.eview(self.G)
rep = (
"InEdgeView([(0, 1), (1, 2), (2, 3), (3, 4), "
+ "(4, 5), (5, 6), (6, 7), (7, 8)])"
)
assert repr(ev) == rep
def test_contains_with_nbunch(self):
ev = self.eview(self.G)
evn = ev(nbunch=[0, 2])
assert (0, 1) not in evn
assert (1, 2) in evn
assert (2, 3) not in evn
assert (3, 4) not in evn
assert (4, 5) not in evn
assert (5, 6) not in evn
assert (7, 8) not in evn
assert (8, 9) not in evn
| TestInEdgeView |
python | giampaolo__psutil | tests/test_windows.py | {
"start": 21806,
"end": 24333
} | class ____(WindowsTestCase):
"""Compare Process API results with WMI."""
@classmethod
def setUpClass(cls):
cls.pid = spawn_subproc().pid
@classmethod
def tearDownClass(cls):
terminate(cls.pid)
def test_name(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
assert p.name() == w.Caption
# This fail on github because using virtualenv for test environment
@pytest.mark.skipif(
GITHUB_ACTIONS, reason="unreliable path on GITHUB_ACTIONS"
)
def test_exe(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
# Note: wmi reports the exe as a lower case string.
# Being Windows paths case-insensitive we ignore that.
assert p.exe().lower() == w.ExecutablePath.lower()
def test_cmdline(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
assert ' '.join(p.cmdline()) == w.CommandLine.replace('"', '')
def test_username(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
domain, _, username = w.GetOwner()
username = f"{domain}\\{username}"
assert p.username() == username
@retry_on_failure()
def test_memory_rss(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
rss = p.memory_info().rss
assert rss == int(w.WorkingSetSize)
@retry_on_failure()
def test_memory_vms(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
vms = p.memory_info().vms
# http://msdn.microsoft.com/en-us/library/aa394372(VS.85).aspx
# ...claims that PageFileUsage is represented in Kilo
# bytes but funnily enough on certain platforms bytes are
# returned instead.
wmi_usage = int(w.PageFileUsage)
if vms not in {wmi_usage, wmi_usage * 1024}:
return pytest.fail(f"wmi={wmi_usage}, psutil={vms}")
def test_create_time(self):
w = wmi.WMI().Win32_Process(ProcessId=self.pid)[0]
p = psutil.Process(self.pid)
wmic_create = str(w.CreationDate.split('.')[0])
psutil_create = time.strftime(
"%Y%m%d%H%M%S", time.localtime(p.create_time())
)
assert wmic_create == psutil_create
# ---
@pytest.mark.skipif(not WINDOWS, reason="WINDOWS only")
| TestProcessWMI |
python | run-llama__llama_index | llama-index-core/llama_index/core/node_parser/node_utils.py | {
"start": 364,
"end": 3566
} | class ____(Protocol):
def __call__(self, i: int, doc: BaseNode) -> str: ...
def default_id_func(i: int, doc: BaseNode) -> str:
return str(uuid.uuid4())
def build_nodes_from_splits(
text_splits: List[str],
document: BaseNode,
ref_doc: Optional[BaseNode] = None,
id_func: Optional[IdFuncCallable] = None,
) -> List[TextNode]:
"""Build nodes from splits."""
ref_doc = ref_doc or document
id_func = id_func or default_id_func
nodes: List[TextNode] = []
"""Calling as_related_node_info() on a document recomputes the hash for the whole text and metadata"""
"""It is not that bad, when creating relationships between the nodes, but is terrible when adding a relationship"""
"""between the node and a document, hence we create the relationship only once here and pass it to the nodes"""
relationships = {NodeRelationship.SOURCE: ref_doc.as_related_node_info()}
for i, text_chunk in enumerate(text_splits):
logger.debug(f"> Adding chunk: {truncate_text(text_chunk, 50)}")
if isinstance(document, ImageDocument):
image_node = ImageNode(
id_=id_func(i, document),
text=text_chunk,
embedding=document.embedding,
image=document.image,
image_path=document.image_path,
image_url=document.image_url,
excluded_embed_metadata_keys=document.excluded_embed_metadata_keys,
excluded_llm_metadata_keys=document.excluded_llm_metadata_keys,
metadata_seperator=document.metadata_separator,
metadata_template=document.metadata_template,
text_template=document.text_template,
relationships=relationships,
)
nodes.append(image_node) # type: ignore
elif isinstance(document, Document):
node = TextNode(
id_=id_func(i, document),
text=text_chunk,
embedding=document.embedding,
excluded_embed_metadata_keys=document.excluded_embed_metadata_keys,
excluded_llm_metadata_keys=document.excluded_llm_metadata_keys,
metadata_seperator=document.metadata_separator,
metadata_template=document.metadata_template,
text_template=document.text_template,
relationships=relationships,
)
nodes.append(node)
elif isinstance(document, TextNode):
node = TextNode(
id_=id_func(i, document),
text=text_chunk,
embedding=document.embedding,
excluded_embed_metadata_keys=document.excluded_embed_metadata_keys,
excluded_llm_metadata_keys=document.excluded_llm_metadata_keys,
metadata_seperator=document.metadata_seperator,
metadata_template=document.metadata_template,
text_template=document.text_template,
relationships=relationships,
)
nodes.append(node)
else:
raise ValueError(f"Unknown document type: {type(document)}")
return nodes
| IdFuncCallable |
python | Farama-Foundation__Gymnasium | gymnasium/envs/phys2d/pendulum.py | {
"start": 8286,
"end": 9298
} | class ____(FunctionalJaxVectorEnv, EzPickle):
"""Jax-based implementation of the vectorized CartPole environment."""
metadata = {"render_modes": ["rgb_array"], "render_fps": 50, "jax": True}
def __init__(
self,
num_envs: int,
render_mode: str | None = None,
max_episode_steps: int = 200,
**kwargs: Any,
):
"""Constructor for the vectorized CartPole where the kwargs are applied to the functional environment."""
EzPickle.__init__(
self,
num_envs=num_envs,
render_mode=render_mode,
max_episode_steps=max_episode_steps,
**kwargs,
)
env = PendulumFunctional(**kwargs)
env.transform(jax.jit)
FunctionalJaxVectorEnv.__init__(
self,
func_env=env,
num_envs=num_envs,
metadata=self.metadata,
render_mode=render_mode,
max_episode_steps=max_episode_steps,
)
| PendulumJaxVectorEnv |
python | pypa__pipenv | pipenv/patched/pip/_internal/metadata/__init__.py | {
"start": 2946,
"end": 5753
} | class ____(Protocol):
NAME: 'Literal["importlib", "pkg_resources"]'
Distribution: Type[BaseDistribution]
Environment: Type[BaseEnvironment]
@functools.lru_cache(maxsize=None)
def select_backend() -> Backend:
if _should_use_importlib_metadata():
from . import importlib
return cast(Backend, importlib)
_emit_pkg_resources_deprecation_if_needed()
from . import pkg_resources
return cast(Backend, pkg_resources)
def get_default_environment() -> BaseEnvironment:
"""Get the default representation for the current environment.
This returns an Environment instance from the chosen backend. The default
Environment instance should be built from ``sys.path`` and may use caching
to share instance state across calls.
"""
return select_backend().Environment.default()
def get_environment(paths: Optional[List[str]]) -> BaseEnvironment:
"""Get a representation of the environment specified by ``paths``.
This returns an Environment instance from the chosen backend based on the
given import paths. The backend must build a fresh instance representing
the state of installed distributions when this function is called.
"""
return select_backend().Environment.from_paths(paths)
def get_directory_distribution(directory: str) -> BaseDistribution:
"""Get the distribution metadata representation in the specified directory.
This returns a Distribution instance from the chosen backend based on
the given on-disk ``.dist-info`` directory.
"""
return select_backend().Distribution.from_directory(directory)
def get_wheel_distribution(wheel: Wheel, canonical_name: str) -> BaseDistribution:
"""Get the representation of the specified wheel's distribution metadata.
This returns a Distribution instance from the chosen backend based on
the given wheel's ``.dist-info`` directory.
:param canonical_name: Normalized project name of the given wheel.
"""
return select_backend().Distribution.from_wheel(wheel, canonical_name)
def get_metadata_distribution(
metadata_contents: bytes,
filename: str,
canonical_name: str,
) -> BaseDistribution:
"""Get the dist representation of the specified METADATA file contents.
This returns a Distribution instance from the chosen backend sourced from the data
in `metadata_contents`.
:param metadata_contents: Contents of a METADATA file within a dist, or one served
via PEP 658.
:param filename: Filename for the dist this metadata represents.
:param canonical_name: Normalized project name of the given dist.
"""
return select_backend().Distribution.from_metadata_file_contents(
metadata_contents,
filename,
canonical_name,
)
| Backend |
python | optuna__optuna | optuna/visualization/_timeline.py | {
"start": 474,
"end": 651
} | class ____(NamedTuple):
number: int
start: datetime.datetime
complete: datetime.datetime
state: TrialState
hovertext: str
infeasible: bool
| _TimelineBarInfo |
python | django__django | tests/forms_tests/tests/tests.py | {
"start": 1358,
"end": 2230
} | class ____(TestCase):
"""
The return values of ModelMultipleChoiceFields are QuerySets
"""
def test_empty_queryset_return(self):
"""
If a model's ManyToManyField has blank=True and is saved with no data,
a queryset is returned.
"""
option = ChoiceOptionModel.objects.create(name="default")
form = OptionalMultiChoiceModelForm(
{"multi_choice_optional": "", "multi_choice": [option.pk]}
)
self.assertTrue(form.is_valid())
# The empty value is a QuerySet
self.assertIsInstance(
form.cleaned_data["multi_choice_optional"], models.query.QuerySet
)
# While we're at it, test whether a QuerySet is returned if there *is*
# a value.
self.assertIsInstance(form.cleaned_data["multi_choice"], models.query.QuerySet)
| TestTicket14567 |
python | huggingface__transformers | tests/models/timm_wrapper/test_image_processing_timm_wrapper.py | {
"start": 1021,
"end": 4011
} | class ____(unittest.TestCase):
image_processing_class = TimmWrapperImageProcessor if is_vision_available() else None
def setUp(self):
super().setUp()
self.temp_dir = tempfile.TemporaryDirectory()
config = TimmWrapperConfig.from_pretrained("timm/resnet18.a1_in1k")
config.save_pretrained(self.temp_dir.name)
def tearDown(self):
self.temp_dir.cleanup()
def test_load_from_hub(self):
image_processor = TimmWrapperImageProcessor.from_pretrained("timm/resnet18.a1_in1k")
self.assertIsInstance(image_processor, TimmWrapperImageProcessor)
def test_load_from_local_dir(self):
image_processor = TimmWrapperImageProcessor.from_pretrained(self.temp_dir.name)
self.assertIsInstance(image_processor, TimmWrapperImageProcessor)
def test_image_processor_properties(self):
image_processor = TimmWrapperImageProcessor.from_pretrained(self.temp_dir.name)
self.assertTrue(hasattr(image_processor, "data_config"))
self.assertTrue(hasattr(image_processor, "val_transforms"))
self.assertTrue(hasattr(image_processor, "train_transforms"))
def test_image_processor_call_numpy(self):
image_processor = TimmWrapperImageProcessor.from_pretrained(self.temp_dir.name)
single_image = np.random.randint(256, size=(256, 256, 3), dtype=np.uint8)
batch_images = [single_image, single_image, single_image]
# single image
pixel_values = image_processor(single_image).pixel_values
self.assertEqual(pixel_values.shape, (1, 3, 224, 224))
# batch images
pixel_values = image_processor(batch_images).pixel_values
self.assertEqual(pixel_values.shape, (3, 3, 224, 224))
def test_image_processor_call_pil(self):
image_processor = TimmWrapperImageProcessor.from_pretrained(self.temp_dir.name)
single_image = Image.fromarray(np.random.randint(256, size=(256, 256, 3), dtype=np.uint8))
batch_images = [single_image, single_image, single_image]
# single image
pixel_values = image_processor(single_image).pixel_values
self.assertEqual(pixel_values.shape, (1, 3, 224, 224))
# batch images
pixel_values = image_processor(batch_images).pixel_values
self.assertEqual(pixel_values.shape, (3, 3, 224, 224))
def test_image_processor_call_tensor(self):
image_processor = TimmWrapperImageProcessor.from_pretrained(self.temp_dir.name)
single_image = torch.from_numpy(np.random.randint(256, size=(3, 256, 256), dtype=np.uint8)).float()
batch_images = [single_image, single_image, single_image]
# single image
pixel_values = image_processor(single_image).pixel_values
self.assertEqual(pixel_values.shape, (1, 3, 224, 224))
# batch images
pixel_values = image_processor(batch_images).pixel_values
self.assertEqual(pixel_values.shape, (3, 3, 224, 224))
| TimmWrapperImageProcessingTest |
python | huggingface__transformers | src/transformers/integrations/bitsandbytes.py | {
"start": 695,
"end": 2204
} | class ____(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: dict[str, list[torch.Tensor]],
full_layer_name: str | None = None,
model: torch.nn.Module | None = None,
**kwargs,
) -> dict[str, torch.Tensor]:
"""
we need to store some parameters to create the quantized weight. For example, bnb requires 6 values that are stored in the checkpoint to recover the quantized weight. So we store them in a dict that it stored in hf_quantizer for now as we can't save it in the op since we create an op per tensor.
"""
value = list(input_dict.values())[0]
value = value[0] if isinstance(value, list) else value
# update param name to get the weights instead of the quantized stats
module, _ = get_module_from_name(model, full_layer_name)
# Support models using `Conv1D` in place of `nn.Linear` (e.g. openai-community/gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls, Conv1D):
value = value.T
old_value = model.get_parameter_or_buffer(full_layer_name)
new_value = bnb.nn.Params4bit(value, requires_grad=False, **old_value.__dict__).to(value.device)
module._is_hf_initialized = True
return {full_layer_name: new_value}
| Bnb4bitQuantize |
python | tensorflow__tensorflow | tensorflow/tools/ci_build/osx/arm64/tensorflow_metal_plugin_test.py | {
"start": 156970,
"end": 170069
} | class ____(test.TestCase):
def _batch_norm(self, x, mean, var, offset, scale, epsilon):
# We compute the batch norm manually in this function because
# nn_impl.batch_normalization does not support float16 yet.
# TODO(reedwm): Add float16 support to nn_impl.batch_normalization.
inv = math_ops.rsqrt(var + epsilon) * scale
y = math_ops.cast(x, scale.dtype) * inv + (offset - mean * inv)
return math_ops.cast(y, x.dtype)
def _running_mean(self, old_mean, new_val, factor):
if factor == 1.0:
return new_val
else:
return (1.0 - factor) * old_mean + factor * new_val
def _training_ref(
self,
x,
scale,
offset,
old_mean,
old_var,
exponential_avg_factor,
epsilon,
data_format,
):
if data_format not in ["NHWC", "NCHW"]:
raise ValueError(
"data_format must be NCHW or NHWC, got %s." % data_format
)
if data_format == "NCHW":
x = array_ops.transpose(x, [0, 2, 3, 1])
batch_mean, batch_var = nn_impl.moments(
math_ops.cast(x, scale.dtype), [0, 1, 2], keep_dims=False
)
y = self._batch_norm(x, batch_mean, batch_var, offset, scale, epsilon)
if data_format == "NCHW":
y = array_ops.transpose(y, [0, 3, 1, 2])
# This is for Bessel's correction. tf.nn.moments uses n, instead of n-1, as
# the denominator in the formula to calculate variance, while
# tf.compat.v1.nn.fused_batch_norm has Bessel's correction built in.
sample_size = math_ops.cast(
array_ops.size(x) / array_ops.size(scale), scale.dtype
)
batch_var_corrected = (
batch_var * sample_size / (math_ops.maximum(sample_size - 1.0, 1.0))
)
mean = self._running_mean(old_mean, batch_mean, exponential_avg_factor)
var = self._running_mean(
old_var, batch_var_corrected, exponential_avg_factor
)
return self.evaluate(y), self.evaluate(mean), self.evaluate(var)
def _test_training(
self,
x_shape,
x_dtype,
scale_shape,
scale_dtype,
use_gpu=True,
exponential_avg_factor=1.0,
data_format="NHWC",
):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
if exponential_avg_factor == 1.0:
old_mean_val = None
old_var_val = None
else:
old_mean_val = np.random.random_sample(scale_shape).astype(scale_dtype)
old_var_val = np.random.random_sample(scale_shape).astype(scale_dtype)
with self.cached_session(use_gpu=use_gpu) as _:
x = constant_op.constant(x_val, name="x")
scale = constant_op.constant(scale_val, name="scale")
offset = constant_op.constant(offset_val, name="offset")
epsilon = 0.001
y, mean, var = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=old_mean_val,
variance=old_var_val,
epsilon=epsilon,
exponential_avg_factor=exponential_avg_factor,
data_format=data_format,
is_training=True,
)
y_val, mean_val, var_val = self.evaluate([y, mean, var])
y_ref, mean_ref, var_ref = self._training_ref(
x,
scale,
offset,
old_mean_val,
old_var_val,
exponential_avg_factor,
epsilon,
data_format,
)
y_atol = 2e-3 if x_dtype == np.float16 else 1e-3
self.assertAllClose(y_ref, y_val, atol=y_atol)
self.assertAllClose(mean_ref, mean_val, atol=1e-3)
self.assertAllClose(var_ref, var_val, atol=1e-3)
def _inference_ref(self, x, scale, offset, mean, var, epsilon, data_format):
if data_format not in ["NHWC", "NCHW"]:
raise ValueError(
"data_format must be NCHW or NHWC, got %s." % data_format
)
if data_format == "NCHW":
x = array_ops.transpose(x, [0, 2, 3, 1])
y = self._batch_norm(x, mean, var, offset, scale, epsilon)
if data_format == "NCHW":
y = array_ops.transpose(y, [0, 3, 1, 2])
return self.evaluate(y)
def _test_inference(
self,
x_shape,
x_dtype,
scale_shape,
scale_dtype,
use_gpu=True,
exponential_avg_factor=1.0,
data_format="NHWC",
):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
mean_val = np.random.random_sample(scale_shape).astype(scale_dtype)
var_val = np.random.random_sample(scale_shape).astype(scale_dtype)
with self.cached_session(use_gpu=use_gpu) as _:
x = constant_op.constant(x_val, name="x")
scale = constant_op.constant(scale_val, name="scale")
offset = constant_op.constant(offset_val, name="offset")
mean = constant_op.constant(mean_val, name="mean")
var = constant_op.constant(var_val, name="variance")
epsilon = 0.001
y, _, _ = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=mean,
variance=var,
epsilon=epsilon,
exponential_avg_factor=exponential_avg_factor,
data_format=data_format,
is_training=False,
)
y_val = self.evaluate(y)
y_ref = self._inference_ref(
x, scale, offset, mean, var, epsilon, data_format
)
# An atol value of 1e-3 is too small for float16's, because some adjacent
# float16 values that y_val can take are greater than 1e-3 apart, e.g.
# 2.16602 and 2.16797.
atol = 2e-3 if x_dtype == np.float16 else 1e-3
self.assertAllClose(y_ref, y_val, atol=atol)
def _runtests(self, x_shape, is_training, gradient_test=False):
use_gpu_vals = [False]
if test.is_gpu_available(cuda_only=True):
use_gpu_vals += [True]
factors = [
1.0,
]
if compat.forward_compatible(2020, 3, 6):
factors += [
0.6,
]
for dtype in [np.float16, np.float32]:
for use_gpu in use_gpu_vals:
for data_format in ["NHWC", "NCHW"]:
if data_format == "NHWC":
scale_shape = x_shape[-1:]
else:
scale_shape = x_shape[1:2]
for exponential_avg_factor in factors:
if gradient_test:
self._test_gradient(
x_shape,
dtype,
scale_shape,
np.float32,
use_gpu=use_gpu,
data_format=data_format,
is_training=is_training,
)
else:
if is_training:
self._test_training(
x_shape,
dtype,
scale_shape,
np.float32,
use_gpu=use_gpu,
data_format=data_format,
exponential_avg_factor=exponential_avg_factor,
)
else:
self._test_inference(
x_shape,
dtype,
scale_shape,
np.float32,
use_gpu=use_gpu,
data_format=data_format,
exponential_avg_factor=exponential_avg_factor,
)
def testInferenceShape1(self):
x_shape = [1, 1, 6, 1]
self._runtests(x_shape, False)
def testInferenceShape2(self):
x_shape = [1, 1, 6, 2]
self._runtests(x_shape, False)
def testInferenceShape3(self):
x_shape = [1, 2, 1, 6]
self._runtests(x_shape, False)
def testInferenceShape4(self):
x_shape = [27, 131, 127, 6]
self._runtests(x_shape, False)
def testInferenceShape5(self):
x_shape = [0, 131, 127, 6]
self._runtests(x_shape, False)
def testTrainingShape1(self):
x_shape = [1, 1, 6, 1]
self._runtests(x_shape, True)
def testTrainingShape2(self):
x_shape = [1, 1, 6, 2]
self._runtests(x_shape, True)
def testTrainingShape3(self):
x_shape = [1, 2, 1, 6]
self._runtests(x_shape, True)
def testTrainingShape4(self):
x_shape = [27, 131, 127, 6]
self._runtests(x_shape, True)
def _test_gradient(
self,
x_shape,
x_dtype,
scale_shape,
scale_dtype,
use_gpu=True,
data_format="NHWC",
is_training=True,
):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_val, name="x")
scale = constant_op.constant(scale_val, name="scale")
offset = constant_op.constant(offset_val, name="offset")
if is_training:
pop_mean = None
pop_var = None
else:
pop_mean = np.random.random_sample(scale_shape).astype(scale_dtype)
pop_var = np.random.random_sample(scale_shape).astype(scale_dtype)
y, _, _ = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=pop_mean,
variance=pop_var,
data_format=data_format,
is_training=is_training,
)
if x_dtype != np.float16:
err_x = gradient_checker.compute_gradient_error(x, x_shape, y, x_shape)
err_scale = gradient_checker.compute_gradient_error(
scale, scale_shape, y, x_shape
)
err_offset = gradient_checker.compute_gradient_error(
offset, scale_shape, y, x_shape
)
else:
x32 = constant_op.constant(x_val, name="x32", dtype=dtypes.float32)
y32, _, _ = nn_impl.fused_batch_norm(
x32,
scale,
offset,
mean=pop_mean,
variance=pop_var,
data_format=data_format,
is_training=is_training,
)
err_x = self._compute_gradient_error_float16(
x, x32, x_shape, y, y32, x_shape
)
err_scale = self._compute_gradient_error_float16(
scale, scale, scale_shape, y, y32, x_shape
)
err_offset = self._compute_gradient_error_float16(
offset, offset, scale_shape, y, y32, x_shape
)
x_err_tolerance = 2e-3 if x_dtype == np.float16 else 1e-3
scale_err_tolerance = 1e-3
self.assertLess(err_x, x_err_tolerance)
self.assertLess(err_scale, scale_err_tolerance)
self.assertLess(err_offset, scale_err_tolerance)
@test_util.run_deprecated_v1
def testBatchNormGradShape1(self):
for is_training in [True, False]:
x_shape = [1, 1, 6, 1]
for dtype in [np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape,
dtype,
[1],
np.float32,
use_gpu=True,
data_format="NHWC",
is_training=is_training,
)
self._test_gradient(
x_shape,
dtype,
[1],
np.float32,
use_gpu=True,
data_format="NCHW",
is_training=is_training,
)
self._test_gradient(
x_shape,
dtype,
[1],
np.float32,
use_gpu=False,
data_format="NHWC",
is_training=is_training,
)
self._test_gradient(
x_shape,
dtype,
[1],
np.float32,
use_gpu=False,
data_format="NCHW",
is_training=is_training,
)
@test_util.run_deprecated_v1
def testBatchNormGradShape2(self):
for is_training in [True, False]:
x_shape = [1, 1, 6, 2]
for dtype in [np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape,
dtype,
[2],
np.float32,
use_gpu=True,
data_format="NHWC",
is_training=is_training,
)
self._test_gradient(
x_shape,
dtype,
[2],
np.float32,
use_gpu=False,
data_format="NHWC",
is_training=is_training,
)
@test_util.run_deprecated_v1
def testBatchNormGradShape3(self):
for is_training in [True, False]:
x_shape = [1, 2, 1, 6]
for dtype in [np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape,
dtype,
[2],
np.float32,
use_gpu=True,
data_format="NCHW",
is_training=is_training,
)
self._test_gradient(
x_shape,
dtype,
[2],
np.float32,
use_gpu=False,
data_format="NCHW",
is_training=is_training,
)
| BatchNormTest |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/models/contexts/pipeline_context.py | {
"start": 1100,
"end": 15339
} | class ____:
"""The pipeline context is used to store configuration for a specific pipeline run."""
_dagger_client: Optional[Client]
_report: Optional[Report | ConnectorReport]
dockerd_service: Optional[Service]
started_at: Optional[datetime]
stopped_at: Optional[datetime]
secrets_to_mask: List[str]
PRODUCTION = bool(os.environ.get("PRODUCTION", False)) # Set this to True to enable production mode (e.g. to send PR comments)
@lru_cache
def get_default_excluded_files(self) -> list[str]:
return (
[".git", "airbyte-ci/connectors/pipelines/*"]
+ glob("**/build", recursive=True)
+ glob("**/.venv", recursive=True)
+ glob("**/secrets", recursive=True)
+ glob("**/__pycache__", recursive=True)
+ glob("**/*.egg-info", recursive=True)
+ glob("**/.vscode", recursive=True)
+ glob("**/.pytest_cache", recursive=True)
+ glob("**/.eggs", recursive=True)
+ glob("**/.mypy_cache", recursive=True)
+ glob("**/.DS_Store", recursive=True)
+ glob("**/airbyte_ci_logs", recursive=True)
+ glob("**/.gradle", recursive=True)
)
def __init__(
self,
pipeline_name: str,
is_local: bool,
git_branch: str,
git_revision: str,
diffed_branch: str,
git_repo_url: str,
report_output_prefix: str,
gha_workflow_run_url: Optional[str] = None,
dagger_logs_url: Optional[str] = None,
pipeline_start_timestamp: Optional[int] = None,
ci_context: Optional[str] = None,
is_ci_optional: bool = False,
slack_webhook: Optional[str] = None,
pull_request: Optional[PullRequest.PullRequest] = None,
ci_report_bucket: Optional[str] = None,
ci_gcp_credentials: Optional[Secret] = None,
ci_git_user: Optional[str] = None,
ci_github_access_token: Optional[Secret] = None,
run_step_options: RunStepOptions = RunStepOptions(),
enable_report_auto_open: bool = True,
secret_stores: Dict[str, SecretStore] | None = None,
) -> None:
"""Initialize a pipeline context.
Args:
pipeline_name (str): The pipeline name.
is_local (bool): Whether the context is for a local run or a CI run.
git_branch (str): The current git branch name.
git_revision (str): The current git revision, commit hash.
diffed_branch (str): The branch to diff against.
git_repo_url (str): The git repository URL.
report_output_prefix (str): The prefix to use for the report output.
gha_workflow_run_url (Optional[str], optional): URL to the github action workflow run. Only valid for CI run. Defaults to None.
dagger_logs_url (Optional[str], optional): URL to the dagger logs. Only valid for CI run. Defaults to None.
pipeline_start_timestamp (Optional[int], optional): Timestamp at which the pipeline started. Defaults to None.
ci_context (Optional[str], optional): Pull requests, workflow dispatch or nightly build. Defaults to None.
is_ci_optional (bool, optional): Whether the CI is optional. Defaults to False.
slack_webhook (Optional[str], optional): Slack webhook to send messages to. Defaults to None.
pull_request (PullRequest, optional): The pull request object if the pipeline was triggered by a pull request. Defaults to None.
"""
self.pipeline_name = pipeline_name
self.is_local = is_local
self.git_branch = git_branch
self.git_revision = git_revision
self.diffed_branch = diffed_branch
self.git_repo_url = git_repo_url
self.report_output_prefix = report_output_prefix
self.gha_workflow_run_url = gha_workflow_run_url
self.dagger_logs_url = dagger_logs_url
self.pipeline_start_timestamp = pipeline_start_timestamp
self.created_at = datetime.utcnow()
self.ci_context = ci_context
self.state = ContextState.INITIALIZED
self.is_ci_optional = is_ci_optional
self.slack_webhook = slack_webhook
self.pull_request = pull_request
self.logger = logging.getLogger(self.pipeline_name)
self._dagger_client = None
self._report = None
self.dockerd_service = None
self.ci_gcp_credentials = ci_gcp_credentials
self.ci_report_bucket = ci_report_bucket
self.ci_git_user = ci_git_user
self.ci_github_access_token = ci_github_access_token
self.started_at = None
self.stopped_at = None
self.secrets_to_mask = []
self.run_step_options = run_step_options
self.enable_report_auto_open = enable_report_auto_open
self.secret_stores = secret_stores if secret_stores else {}
update_commit_status_check(**self.github_commit_status)
@property
def dagger_client(self) -> Client:
assert self._dagger_client is not None, "The dagger client was not set on this PipelineContext"
return self._dagger_client
@dagger_client.setter
def dagger_client(self, dagger_client: Client) -> None:
self._dagger_client = dagger_client
@property
def is_ci(self) -> bool:
return self.is_local is False
@property
def is_pr(self) -> bool:
return self.ci_context == CIContext.PULL_REQUEST
@property
def repo(self) -> GitRepository:
return self.dagger_client.git(AIRBYTE_GITHUB_REPO_URL, keep_git_dir=True)
@property
def report(self) -> Report | ConnectorReport | None:
return self._report
@report.setter
def report(self, report: Report | ConnectorReport) -> None:
self._report = report
@property
def java_log_scrub_pattern_secret(self) -> Optional[DaggerSecret]:
if not self.secrets_to_mask:
return None
return self.dagger_client.set_secret("log_scrub_pattern", java_log_scrub_pattern(self.secrets_to_mask))
@property
def github_commit_status(self) -> dict:
"""Build a dictionary used as kwargs to the update_commit_status_check function."""
target_url: Optional[str] = self.gha_workflow_run_url
if (
self.remote_storage_enabled
and self.state not in [ContextState.RUNNING, ContextState.INITIALIZED]
and isinstance(self.report, ConnectorReport)
):
target_url = self.report.html_report_url
return {
"sha": self.git_revision,
"state": self.state.value["github_state"],
"target_url": target_url,
"description": self.state.value["description"],
"context": self.pipeline_name,
"should_send": self._should_send_status_check(),
"logger": self.logger,
"is_optional": self.is_ci_optional,
}
@property
def should_send_slack_message(self) -> bool:
return self.slack_webhook is not None
@property
def has_dagger_cloud_token(self) -> bool:
return "_EXPERIMENTAL_DAGGER_CLOUD_TOKEN" in os.environ
@property
def dagger_cloud_url(self) -> Optional[str]:
"""Gets the link to the Dagger Cloud runs page for the current commit."""
if self.is_local or not self.has_dagger_cloud_token:
return None
return f"https://alpha.dagger.cloud/changeByPipelines?filter=dagger.io/git.ref:{self.git_revision}"
@property
def remote_storage_enabled(self) -> bool:
return bool(self.ci_report_bucket) and bool(self.ci_gcp_credentials)
def _should_send_status_check(self) -> bool:
should_send = self.is_pr or any(
self.pipeline_name.startswith(override) for override in MANUAL_PIPELINE_STATUS_CHECK_OVERRIDE_PREFIXES
)
self.logger.info(f"Should send status check: {should_send}")
return should_send
def get_repo_file(self, file_path: str) -> File:
"""Get a file from the current repository.
The file is extracted from the host file system.
Args:
file_path (str): Path to the file to get.
Returns:
Path: The selected repo file.
"""
return self.dagger_client.host().file(file_path)
def get_repo_dir(self, subdir: str = ".", exclude: Optional[List[str]] = None, include: Optional[List[str]] = None) -> Directory:
"""Get a directory from the current repository.
The directory is extracted from the host file system.
A couple of files or directories that could corrupt builds are exclude by default (check DEFAULT_EXCLUDED_FILES).
Args:
subdir (str, optional): Path to the subdirectory to get. Defaults to "." to get the full repository.
exclude ([List[str], optional): List of files or directories to exclude from the directory. Defaults to None.
include ([List[str], optional): List of files or directories to include in the directory. Defaults to None.
Returns:
Directory: The selected repo directory.
"""
if exclude is None:
exclude = self.get_default_excluded_files()
else:
exclude += self.get_default_excluded_files()
exclude = list(set(exclude))
exclude.sort() # sort to make sure the order is always the same to not burst the cache. Casting exclude to set can change the order
if subdir != ".":
subdir = f"{subdir}/" if not subdir.endswith("/") else subdir
exclude = [f.replace(subdir, "") for f in exclude if subdir in f]
return self.dagger_client.host().directory(subdir, exclude=exclude, include=include)
def create_slack_message(self) -> str:
raise NotImplementedError()
def get_slack_channels(self) -> List[str]:
raise NotImplementedError()
async def __aenter__(self) -> PipelineContext:
"""Perform setup operation for the PipelineContext.
Updates the current commit status on Github.
Raises:
Exception: An error is raised when the context was not initialized with a Dagger client
Returns:
PipelineContext: A running instance of the PipelineContext.
"""
if self.dagger_client is None:
raise Exception("A Pipeline can't be entered with an undefined dagger_client")
self.state = ContextState.RUNNING
self.started_at = datetime.utcnow()
self.logger.info("Caching the latest CDK version...")
await asyncify(update_commit_status_check)(**self.github_commit_status)
if self.should_send_slack_message:
# Using a type ignore here because the should_send_slack_message property is checking for non nullity of the slack_webhook
await asyncify(send_message_to_webhook)(self.create_slack_message(), self.get_slack_channels(), self.slack_webhook) # type: ignore
return self
@staticmethod
def determine_final_state(report: Optional[Report], exception_value: Optional[BaseException]) -> ContextState:
"""Determine the final state of the context from the report or the exception value.
Args:
report (Optional[Report]): The pipeline report if any.
exception_value (Optional[BaseException]): The exception value if an exception was raised in the context execution, None otherwise.
Returns:
ContextState: The final state of the context.
"""
if exception_value is not None or report is None:
return ContextState.ERROR
if report is not None and report.considered_failed_steps:
return ContextState.FAILURE
if report is not None and report.success:
return ContextState.SUCCESSFUL
raise Exception(
f"The final state of the context could not be determined for the report and exception value provided. Report: {report}, Exception: {exception_value}"
)
async def __aexit__(
self, exception_type: Optional[type[BaseException]], exception_value: Optional[BaseException], traceback: Optional[TracebackType]
) -> bool:
"""Perform teardown operation for the PipelineContext.
On the context exit the following operations will happen:
- Log the error value if an error was handled.
- Log the test report.
- Update the commit status check on GitHub if running in a CI environment.
It should gracefully handle all the execution errors that happened and always upload a test report and update commit status check.
Args:
exception_type (Optional[type[BaseException]]): The exception type if an exception was raised in the context execution, None otherwise.
exception_value (Optional[BaseException]): The exception value if an exception was raised in the context execution, None otherwise.
traceback (Optional[TracebackType]): The traceback if an exception was raised in the context execution, None otherwise.
Returns:
bool: Whether the teardown operation ran successfully.
"""
if exception_value:
self.logger.error("An error was handled by the Pipeline", exc_info=True)
if self.report is None:
self.logger.error("No test report was provided. This is probably due to an upstream error")
self.report = Report(self, steps_results=[])
self.state = self.determine_final_state(self.report, exception_value)
self.stopped_at = datetime.utcnow()
self.report.print()
await asyncify(update_commit_status_check)(**self.github_commit_status)
if self.should_send_slack_message:
# Using a type ignore here because the should_send_slack_message property is checking for non nullity of the slack_webhook
await asyncify(send_message_to_webhook)(
self.create_slack_message(),
self.get_slack_channels(),
self.slack_webhook, # type: ignore
)
# supress the exception if it was handled
return True
| PipelineContext |
python | getsentry__sentry | src/sentry/integrations/discord/webhooks/handler.py | {
"start": 369,
"end": 1346
} | class ____:
"""
Abstract class defining the shared interface of interaction handlers,
along with some helper methods.
"""
def __init__(self, request: DiscordRequest) -> None:
"""
Request must be *verified*.
"""
self.request: DiscordRequest = request
def send_message(self, message: str | DiscordMessageBuilder, update: bool = False) -> Response:
"""Sends a new follow up message."""
response_type = DiscordResponseTypes.UPDATE if update else DiscordResponseTypes.MESSAGE
if isinstance(message, str):
message = DiscordMessageBuilder(
content=message, flags=DiscordMessageFlags().set_ephemeral()
)
return Response(
{
"type": response_type,
"data": message.build(),
},
status=200,
)
def handle(self) -> Response:
raise NotImplementedError
| DiscordInteractionHandler |
python | gevent__gevent | src/greentest/3.9/test_socket.py | {
"start": 109638,
"end": 112106
} | class ____(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
try:
while True:
self.sendmsgToServer([b"a"*512])
except socket.timeout:
pass
except OSError as exc:
if exc.errno != errno.ENOMEM:
raise
# bpo-33937 the test randomly fails on Travis CI with
# "OSError: [Errno 12] Cannot allocate memory"
else:
self.fail("socket.timeout not raised")
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
# bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI
# with "OSError: [Errno 12] Cannot allocate memory"
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOMEM))
finally:
self.misc_event.set()
| SendmsgStreamTests |
python | django__django | tests/file_uploads/models.py | {
"start": 31,
"end": 119
} | class ____(models.Model):
testfile = models.FileField(upload_to="test_upload")
| FileModel |
python | viewflow__viewflow | viewflow/jsonstore.py | {
"start": 6635,
"end": 6699
} | class ____(JSONFieldMixin, fields.EmailField):
pass
| EmailField |
python | fluentpython__example-code | 21-class-metaprog/evaltime.py | {
"start": 402,
"end": 527
} | class ____():
print('<[7]> ClassThree body')
def method_y(self):
print('<[8]> ClassThree.method_y')
| ClassThree |
python | bokeh__bokeh | src/bokeh/models/widgets/tables.py | {
"start": 20484,
"end": 20727
} | class ____(RowAggregator):
''' Largest value across multiple rows.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| MaxAggregator |
python | doocs__leetcode | solution/3400-3499/3477.Fruits Into Baskets II/Solution.py | {
"start": 0,
"end": 382
} | class ____:
def numOfUnplacedFruits(self, fruits: List[int], baskets: List[int]) -> int:
n = len(fruits)
vis = [False] * n
ans = n
for x in fruits:
for i, y in enumerate(baskets):
if y >= x and not vis[i]:
vis[i] = True
ans -= 1
break
return ans
| Solution |
python | falconry__falcon | falcon/media/urlencoded.py | {
"start": 291,
"end": 3097
} | class ____(BaseHandler):
"""URL-encoded form data handler.
This handler parses ``application/x-www-form-urlencoded`` HTML forms to a
``dict``, similar to how URL query parameters are parsed. An empty body
will be parsed as an empty dict.
When deserializing, this handler will raise :class:`falcon.MediaMalformedError`
if the request payload cannot be parsed as ASCII or if any of the URL-encoded
strings in the payload are not valid UTF-8.
As documented for :any:`urllib.parse.urlencode`, when serializing, the
media object must either be a ``dict`` or a sequence of two-element
``tuple``'s. If any values in the media object are sequences, each
sequence element is converted to a separate parameter.
Keyword Arguments:
keep_blank (bool): Whether to keep empty-string values from the form
when deserializing.
csv (bool): Whether to split comma-separated form values into list
when deserializing.
"""
def __init__(self, keep_blank: bool = True, csv: bool = False) -> None:
self._keep_blank = keep_blank
self._csv = csv
# NOTE(kgriffs): To be safe, only enable the optimized protocol when
# not subclassed.
if type(self) is URLEncodedFormHandler:
self._serialize_sync = self.serialize
self._deserialize_sync = self._deserialize
# NOTE(kgriffs): Make content_type a kwarg to support the
# Request.render_body() shortcut optimization.
def serialize(self, media: Any, content_type: str | None = None) -> bytes:
# NOTE(vytas): Setting doseq to True to mirror the parse_query_string
# behaviour.
return urlencode(media, doseq=True).encode()
def _deserialize(self, body: bytes) -> Any:
try:
# NOTE(kgriffs): According to
# https://html.spec.whatwg.org/multipage/form-control-infrastructure.html#application%2Fx-www-form-urlencoded-encoding-algorithm
# the
# body should be US-ASCII. Enforcing this also helps
# catch malicious input.
body_str = body.decode('ascii')
return parse_query_string(
body_str, keep_blank=self._keep_blank, csv=self._csv
)
except Exception as err:
raise errors.MediaMalformedError('URL-encoded') from err
def deserialize(
self,
stream: ReadableIO,
content_type: str | None,
content_length: int | None,
) -> Any:
return self._deserialize(stream.read())
async def deserialize_async(
self,
stream: AsyncReadableIO,
content_type: str | None,
content_length: int | None,
) -> Any:
return self._deserialize(await stream.read())
| URLEncodedFormHandler |
python | OmkarPathak__pygorithm | pygorithm/geometry/rect2.py | {
"start": 349,
"end": 19378
} | class ____(object):
"""
A rectangle. Uses SAT collision against polygons and
broad-phase collision against other rectangles.
Rectangles are fast to construct and have very fast
rectangle-rectangle collision detection.
Rect2 is designed to have almost exactly the opposite performance
characteristics as Polygon2 when doing collision against
Polygon2s: Fast to construct and complex on first call with
many operations incurring expensive recalculations.
.. caution::
Collision detection against a polygon with cause
initialization of the polygon representation of a
rectangle. This has the noticeable performance
characteristics that are seen whenever a polygon
is constructed (see :py:class:`.Polygon2`).
This operation recurrs only if width and height
were modified.
:ivar mincorner: the position of this polygon
:vartype mincorner: :class:`pygorithm.geometry.vector2.Vector2`
"""
def __init__(self, width, height, mincorner = None):
"""
Create a new rectangle of width and height.
If ``mincorner is None``, the origin is assumed.
:param width: width of this rect
:type width: :class:`numbers.Number`
:param height: height of this rect
:type height: :class:`numbers.Number`
:param mincorner: the position of this rect
:type mincorner: :class:`pygorithm.geometry.vector2.Vector2` or None
:raises ValueError: if width or height are not strictly positive
"""
self.width = width
self.height = height
self.mincorner = mincorner if mincorner is not None else vector2.Vector2(0, 0)
@property
def polygon(self):
"""
Get the polygon representation of this rectangle, without
the offset. Lazily initialized and up-to-date with width
and height.
.. caution::
This does not include the :py:attr:`.mincorner`
(which should be passed as offset for polygon operations)
:returns: polygon representation of this rectangle
:rtype: :class:`pygorithm.geometry.polygon2.Polygon2`
"""
if self._polygon is None:
self._polygon = polygon2.Polygon2([ vector2.Vector2(0, 0),
vector2.Vector2(0, self._height),
vector2.Vector2(self._width, self._height),
vector2.Vector2(self._width, 0) ])
return self._polygon
@property
def width(self):
"""
Get or set the width of this rect.
.. caution::
Setting the width of the rectangle will remove the polygon
caching required for rectangle-polygon collision.
:returns: width of this rect
:rtype: :class:`numbers.Number`
:raises ValueError: if trying to set ``width <= 1e-07``
"""
return self._width
@width.setter
def width(self, value):
if value <= 1e-07:
raise ValueError('width cannot be <= 1e-07 but is {}'.format(value))
self._width = value
self._polygon = None
@property
def height(self):
"""
Get or set the height of this rect
.. caution::
Setting the height of the rectangle will remove the cached
operations required for rectangle-polygon collision.
:returns: height of this rect
:rtype: :class:`numbers.Number`
:raises ValueError: if trying to set ``height <= 1e-07``
"""
return self._height
@height.setter
def height(self, value):
if value <= 1e-07:
raise ValueError("height cannot be <= 1e07 but is {}".format(value))
self._height = value
self._polygon = None
@property
def area(self):
"""
Get the area of this rect
:returns: area of this rect
:rtype: :class:`numbers.Number`
"""
return self._width * self._height
@staticmethod
def project_onto_axis(rect, axis):
"""
Project the rect onto the specified axis.
.. tip::
This function is extremely fast for vertical or
horizontal axises.
:param rect: the rect to project
:type rect: :class:`pygorithm.geometry.rect2.Rect2`
:param axis: the axis to project onto (normalized)
:type axis: :class:`pygorithm.geometry.vector2.Vector2`
:returns: the projection of the rect along axis
:rtype: :class:`pygorithm.geometry.axisall.AxisAlignedLine`
"""
if axis.x == 0:
return axisall.AxisAlignedLine(axis, rect.mincorner.y * axis.y, (rect.mincorner.y + rect.height) * axis.y)
elif axis.y == 0:
return axisall.AxisAlignedLine(axis, rect.mincorner.x * axis.x, (rect.mincorner.x + rect.width) * axis.x)
p1 = rect.mincorner.dot(axis)
p2 = vector2.Vector2(rect.mincorner.x + rect.width, rect.mincorner.y).dot(axis)
p3 = vector2.Vector2(rect.mincorner.x + rect.width, rect.mincorner.y + rect.height).dot(axis)
p4 = vector2.Vector2(rect.mincorner.x, rect.mincorner.y + rect.height).dot(axis)
_min = min(p1, p2, p3, p4)
_max = max(p1, p2, p3, p4)
return axisall.AxisAlignedLine(axis, _min, _max)
@staticmethod
def contains_point(rect, point):
"""
Determine if the rect contains the point
Distinguish between points that are on the edge of the
rect and those that are not.
.. tip::
This will never return ``True, True``
:param rect: the rect
:type rect: :class:`pygorithm.geometry.rect2.Rect2`
:param point: the point
:type point: :class:`pygorithm.geometry.vector2.Vector2`
:returns: point on edge, point inside
:rtype: bool, bool
"""
edge_x = math.isclose(rect.mincorner.x, point.x, abs_tol=1e-07) or math.isclose(rect.mincorner.x + rect.width, point.x, abs_tol=1e-07)
edge_y = math.isclose(rect.mincorner.y, point.y, abs_tol=1e-07) or math.isclose(rect.mincorner.y + rect.height, point.y, abs_tol=1e-07)
if edge_x and edge_y:
return True, False
contains = (edge_x or (point.x > rect.mincorner.x and point.x < rect.mincorner.x + rect.width)) and \
(edge_y or (point.y > rect.mincorner.y and point.y < rect.mincorner.y + rect.height))
if not contains:
return False, False
elif edge_x or edge_y:
return True, False
else:
return False, True
@classmethod
def _find_intersection_rects(cls, rect1, rect2, find_mtv = True):
"""
Find the intersection between two rectangles.
Not intended for direct use. See
:py:meth:`.find_intersection`
:param rect1: first rectangle
:type rect1: :class:`pygorithm.geometry.rect2.Rect2`
:param rect2: second rectangle
:type rect2: :class:`pygorithm.geometry.rect2.Rect2`
:param find_mtv: False to never find mtv (may allow small performance improvement)
:type find_mtv: bool
:returns: (touching, overlapping, (mtv distance, mtv axis))
:rtype: (bool, bool, (:class:`numbers.Number`, :class:`pygorithm.geometry.vector2.Vector2`) or None)
"""
# caution to make sure isclose checks are before greater than/less than checks!
# you could save which edge here if you needed that information
x_touching = math.isclose(rect1.mincorner.x + rect1.width, rect2.mincorner.x, abs_tol=1e-07)
x_touching = x_touching or math.isclose(rect1.mincorner.x, rect2.mincorner.x + rect2.width, abs_tol=1e-07)
y_touching = math.isclose(rect1.mincorner.y, rect2.mincorner.y + rect2.height, abs_tol=1e-07)
y_touching = y_touching or math.isclose(rect1.mincorner.y + rect1.height, rect2.mincorner.y, abs_tol=1e-07)
if x_touching and y_touching:
return True, False, None # sharing 1 corner
# we don't need to calculate if the touching is True
x_overlap = False if x_touching else (rect1.mincorner.x < rect2.mincorner.x and rect1.mincorner.x + rect1.width > rect2.mincorner.x) or \
(rect2.mincorner.x < rect1.mincorner.x and rect2.mincorner.x + rect2.width > rect1.mincorner.x)
y_overlap = False if y_touching else (rect1.mincorner.y < rect2.mincorner.y and rect1.mincorner.y + rect1.height > rect2.mincorner.y) or \
(rect2.mincorner.y < rect1.mincorner.y and rect2.mincorner.y + rect2.height > rect1.mincorner.y)
if x_touching:
if y_overlap:
return True, False, None # sharing an x edge
else:
return False, False, None
elif y_touching:
if x_overlap:
return True, False, None # sharing a y edge
else:
return False, False, None
elif not x_overlap or not y_overlap:
return False, False, None
# They overlap
if not find_mtv:
return False, True, None
# four options:
# move rect1 min x to rect2 max x
# move rect1 max x to rect2 min x
# move rect1 min y to rect2 max y
# move rect1 max y to rect2 min y
#
# we will look at all 4 of these and choose
# the one that requires the least movement
opt1 = rect2.mincorner.x + rect2.width - rect1.mincorner.x
opt2 = rect2.mincorner.x - rect1.mincorner.x - rect1.width
opt3 = rect2.mincorner.y + rect2.height - rect1.mincorner.y
opt4 = rect2.mincorner.y - rect1.mincorner.y - rect1.height
abs1 = abs(opt1)
abs2 = abs(opt2)
abs3 = abs(opt3)
abs4 = abs(opt4)
# the following could be simplified by making an array, at a
# minor performance hit
if abs1 < abs2:
if abs1 < abs3:
if abs1 < abs4:
return False, True, (opt1, vector2.Vector2(1, 0))
else:
return False, True, (opt4, vector2.Vector2(0, 1))
else:
if abs3 < abs4:
return False, True, (opt3, vector2.Vector2(0, 1))
else:
return False, True, (opt4, vector2.Vector2(0, 1))
else:
if abs2 < abs3:
if abs2 < abs4:
return False, True, (opt2, vector2.Vector2(1, 0))
else:
return False, True, (opt4, vector2.Vector2(0, 1))
else:
if abs3 < abs4:
return False, True, (opt3, vector2.Vector2(0, 1))
else:
return False, True, (opt4, vector2.Vector2(0, 1))
@classmethod
def _find_intersection_rect_poly(cls, rect, poly, offset, find_mtv = True):
"""
Find the intersection between a rect and polygon.
Not intended for direct use. See
:py:meth:`.find_intersection`
:param rect: rectangle
:type rect: :class:`pygorithm.geometry.rect2.Rect2`
:param poly: polygon
:type poly: :class:`pygorithm.geometry.polygon2.Polygon2`
:param offset: offset for the polygon
:type offset: :class:`pygorithm.geometry.vector2.Vector2`
:param find_mtv: False to never find mtv (may allow small performance improvement)
:type find_mtv: bool
:returns: (touching, overlapping, (mtv distance, mtv axis))
:rtype: (bool, bool, (:class:`numbers.Number`, :class:`pygorithm.geometry.vector2.Vector2`) or None)
"""
return polygon2.Polygon2.find_intersection(rect.polygon, poly, rect.mincorner, offset, find_mtv)
@classmethod
def _find_intersection_poly_rect(cls, poly, offset, rect, find_mtv = True):
"""
Find the intersection between a polygon and rect.
Not intended for direct use. See
:py:meth:`.find_intersection`
:param poly: polygon
:type poly: :class:`pygorithm.geometry.polygon2.Polygon2`
:param offset: offset for the polygon
:type offset: :class:`pygorithm.geometry.vector2.Vector2`
:param rect: rectangle
:type rect: :class:`pygorithm.geometry.rect2.Rect2`
:param find_mtv: False to never find mtv (may allow small performance improvement)
:type find_mtv: bool
:returns: (touching, overlapping, (mtv distance, mtv axis))
:rtype: (bool, bool, (:class:`numbers.Number`, :class:`pygorithm.geometry.vector2.Vector2`) or None)
"""
return polygon2.Polygon2.find_intersection(poly, rect.polygon, offset, rect.mincorner, find_mtv)
@classmethod
def find_intersection(cls, *args, **kwargs):
"""
Determine the state of intersection between a rect and a
polygon.
For Rect-Polygon intersection:
Must be passed in 3 arguments - a :py:class:`.Rect2`,
a :py:class:`.Polygon2`, and a
:py:class:`.Vector2`. The vector must come immediately
after the polygon, but the rect can be either the first or last unnamed argument.
If it is the first argument, the mtv is against the rectangle. If it is the last
argument, the mtv is against the polygon.
For Rect-Rect intersection:
Must be passed in 2 arguments (both rects).
.. note::
The first argument is checked with ``isinstance(arg, Rect2)``. If this is
False, the first argument is assumed to be a Polygon2. If you want to
use a compatible rectangle class for which this check would fail, you
can call
:py:meth:`._find_intersection_rect_poly`
directly or pass the polygon first and invert the resulting mtv (if
one is found). If two unnamed arguments are provided, they are assumed
to be both rects without further checks.
Examples:
.. code-block:: python
from pygorithm.geometry import (vector2, polygon2, rect2)
octogon = polygon2.Polygon2.from_regular(8, 1)
oct_offset = vector2.Vector2(0.5, 0)
unit_square = rect2.Rect2(1, 1)
# find mtv for square against octogon
touching, overlapping, mtv = rect2.Rect2.find_intersection(unit_square, octogon, oct_offset)
# find mtv for octogon against square
touching, overlapping, mtv = rect2.Rect2.find_intersection(octogon, oct_offset, unit_square)
# find intersection but skip mtv (two options)
touching, overlapping, alwaysNone = rect2.Rect2.find_intersection(unit_square, octogon, oct_offset, find_mtv=False)
touching, overlapping, alwaysNone = rect2.Rect2.find_intersection(octogon, oct_offset, unit_square, find_mtv=False)
big_square = rect2.Rect2(2, 2, vector2.Vector2(-1.5, 0))
# find mtv for square against big square
touching, overlapping, mtv = rect2.Rect2.find_intersection(unit_square, big_square)
# find mtv for big square against square
touching, overlapping, mtv = rect2.Rect2.find_intersection(big_square, unit_square)
:param find_mtv: if mtv should be found where possible (default ``True``)
:type find_mtv: bool
:param args: 2 arguments for rect-rect, 3 arguments for rect-polygon (see above)
:type args: list
:returns: (touching, overlapping, (mtv distance, mtv axis))
:rtype: (bool, bool, (:class:`numbers.Number`, :class:`pygorithm.geometry.vector2.Vector2`) or None)
"""
find_mtv = kwargs.get("find_mtv", True)
if len(args) == 2:
return cls._find_intersection_rects(args[0], args[1], find_mtv)
else:
assert len(args) == 3, "Incorrect number of unnamed arguments to Rect2.find_intersection (got {} expected 2 or 3)".format(len(args))
if isinstance(args[0], Rect2):
return cls._find_intersection_rect_poly(args[0], args[1], args[2], find_mtv)
else:
return cls._find_intersection_poly_rect(args[0], args[1], args[2], find_mtv)
def __repr__(self):
"""
Create an unambiguous representation of this rectangle.
Example:
.. code-block:: python
from pygorithm.geometry import (vector2, rect2)
unit_square = rect2.Rect2(1, 1, vector2.Vector2(3, 4))
# prints rect2(width=1, height=1, mincorner=vector2(x=3, y=4))
print(repr(unit_square))
:returns: unambiguous representation of this rectangle
:rtype: string
"""
return "rect2(width={}, height={}, mincorner={})".format(self._width, self._height, repr(self.mincorner))
def __str__(self):
"""
Create a human readable representation of this rectangle
Example:
.. code-block:: python
from pygorithm.geometry import (vector2, rect2)
unit_square = rect2.Rect2(1, 1, vector2.Vector2(3, 4))
ugly_rect = rect2.Rect2(0.7071234, 0.7079876, vector2.Vector2(0.56789123, 0.876543))
# prints rect(1x1 at <3, 4>)
print(str(unit_square))
# prints rect(0.707x0.708 at <0.568, 0.877>)
print(str(ugly_rect))
:returns: human-readable representation of this rectangle
:rtype: string
"""
pretty_width = round(self._width * 1000) / 1000
if pretty_width == math.floor(pretty_width):
pretty_width = math.floor(pretty_width)
pretty_height = round(self._height * 1000) / 1000
if pretty_height == math.floor(pretty_height):
pretty_height = math.floor(pretty_height)
return "rect({}x{} at {})".format(pretty_width, pretty_height, str(self.mincorner)) | Rect2 |
python | doocs__leetcode | solution/2900-2999/2940.Find Building Where Alice and Bob Can Meet/Solution.py | {
"start": 0,
"end": 441
} | class ____:
__slots__ = ["n", "c"]
def __init__(self, n: int):
self.n = n
self.c = [inf] * (n + 1)
def update(self, x: int, v: int):
while x <= self.n:
self.c[x] = min(self.c[x], v)
x += x & -x
def query(self, x: int) -> int:
mi = inf
while x:
mi = min(mi, self.c[x])
x -= x & -x
return -1 if mi == inf else mi
| BinaryIndexedTree |
python | pandas-dev__pandas | asv_bench/benchmarks/algorithms.py | {
"start": 254,
"end": 1901
} | class ____:
params = [
[True, False],
[True, False],
[
"int64",
"uint64",
"float64",
"object",
"object_str",
"datetime64[ns]",
"datetime64[ns, tz]",
"Int64",
"boolean",
"string[pyarrow]",
],
]
param_names = ["unique", "sort", "dtype"]
def setup(self, unique, sort, dtype):
N = 10**5
if dtype in ["int64", "uint64", "Int64", "object"]:
data = pd.Index(np.arange(N), dtype=dtype)
elif dtype == "float64":
data = pd.Index(np.random.randn(N), dtype=dtype)
elif dtype == "boolean":
data = pd.array(np.random.randint(0, 2, N), dtype=dtype)
elif dtype == "datetime64[ns]":
data = pd.date_range("2011-01-01", freq="h", periods=N)
elif dtype == "datetime64[ns, tz]":
data = pd.date_range("2011-01-01", freq="h", periods=N, tz="Asia/Tokyo")
elif dtype == "object_str":
data = pd.Index([f"i-{i}" for i in range(N)], dtype=object)
elif dtype == "string[pyarrow]":
data = pd.array(
pd.Index([f"i-{i}" for i in range(N)], dtype=object),
dtype="string[pyarrow]",
)
else:
raise NotImplementedError
if not unique:
data = data.repeat(5)
self.data = data
def time_factorize(self, unique, sort, dtype):
pd.factorize(self.data, sort=sort)
def peakmem_factorize(self, unique, sort, dtype):
pd.factorize(self.data, sort=sort)
| Factorize |
python | getsentry__sentry | tests/sentry/api/serializers/test_organization_member_invite.py | {
"start": 202,
"end": 2122
} | class ____(TestCase):
def setUp(self) -> None:
self.org = self.create_organization()
self.email = "user@email.com"
def test_simple(self) -> None:
member_invite = self.create_member_invite(
organization=self.org, email=self.email, organization_member_team_data=[]
)
result = serialize(member_invite, None, OrganizationMemberInviteSerializer())
assert result == {
"id": str(member_invite.id),
"email": self.email,
"orgRole": member_invite.role,
"expired": False,
"idpProvisioned": member_invite.idp_provisioned,
"idpRoleRestricted": member_invite.idp_role_restricted,
"ssoLinked": member_invite.sso_linked,
"ssoInvalid": member_invite.sso_invalid,
"memberLimitRestricted": member_invite.member_limit_restricted,
"partnershipRestricted": member_invite.partnership_restricted,
"teams": [],
"dateCreated": member_invite.date_added,
"inviteStatus": member_invite.get_invite_status_name(),
"inviterName": None,
}
def test_teams(self) -> None:
team = self.create_team(organization=self.org)
member_invite = self.create_member_invite(
organization=self.org,
email=self.email,
organization_member_team_data=[{"id": team.id, "slug": team.slug}],
)
result = serialize(member_invite, None)
assert result["teams"] == [{"id": team.id, "slug": team.slug}]
def test_inviter(self) -> None:
user = self.create_user()
member_invite = self.create_member_invite(
organization=self.org, email=self.email, inviter_id=user.id
)
result = serialize(member_invite, None)
assert result["inviterName"] == user.get_display_name()
| OrganizationMemberInviteSerializerTest |
python | charliermarsh__ruff | crates/ty_python_semantic/resources/corpus/90_docstring_class.py | {
"start": 0,
"end": 27
} | class ____:
"docstring"
| Foo |
python | pypa__pip | src/pip/_vendor/rich/segment.py | {
"start": 22710,
"end": 24743
} | class ____:
def __init__(self, lines: Iterable[List[Segment]], new_lines: bool = False) -> None:
"""A simple renderable containing a number of lines of segments. May be used as an intermediate
in rendering process.
Args:
lines (Iterable[List[Segment]]): Lists of segments forming lines.
new_lines (bool, optional): Insert new lines after each line. Defaults to False.
"""
self.lines = list(lines)
self.new_lines = new_lines
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
if self.new_lines:
new_line = Segment.line()
for line in self.lines:
yield from line
yield new_line
else:
for line in self.lines:
yield from line
if __name__ == "__main__": # pragma: no cover
from pip._vendor.rich.console import Console
from pip._vendor.rich.syntax import Syntax
from pip._vendor.rich.text import Text
code = """from rich.console import Console
console = Console()
text = Text.from_markup("Hello, [bold magenta]World[/]!")
console.print(text)"""
text = Text.from_markup("Hello, [bold magenta]World[/]!")
console = Console()
console.rule("rich.Segment")
console.print(
"A Segment is the last step in the Rich render process before generating text with ANSI codes."
)
console.print("\nConsider the following code:\n")
console.print(Syntax(code, "python", line_numbers=True))
console.print()
console.print(
"When you call [b]print()[/b], Rich [i]renders[/i] the object in to the following:\n"
)
fragments = list(console.render(text))
console.print(fragments)
console.print()
console.print("The Segments are then processed to produce the following output:\n")
console.print(text)
console.print(
"\nYou will only need to know this if you are implementing your own Rich renderables."
)
| SegmentLines |
python | pytorch__pytorch | torch/utils/checkpoint.py | {
"start": 69925,
"end": 71948
} | class ____:
"""Any checkpointed regions encountered by backward under the same instance
of this context manager will trigger recompute at most once, even if
there are multiple calls to backward.
Backward calls under the same instance of this context manager must execute
over non-overlapping regions of the backward graph even if retain_graph=True.
In particular, any two backward call cannot use the same saved activation for
gradient computation.
.. note::
This context manager only affects checkpoint with use_reentrant=False, and
is a no-op otherwise.
"""
def __enter__(self) -> "GraphExecGroup":
if torch._C._get_graph_exec_group() is not None:
raise RuntimeError(
"GraphExecGroup contexts cannot be nested. "
f"Already inside group {torch._C._get_graph_exec_group()}"
)
torch._C._set_graph_exec_group(self)
return self
def __exit__(self, *args: object) -> None:
torch._C._set_graph_exec_group(None)
@classmethod
def _get_current_group(cls) -> Optional["GraphExecGroup"]:
# Private API to be used by utils like AC
return torch._C._get_graph_exec_group()
# Note: [compiled autograd and checkpoint unpack hook]
# When tracing via compiled autograd, this hook will be visible to the
# compiler if the forward of this checkpointed region ran in eager.
# If the forward had ran under compile, it would have been wrapped in a
# higher order op. See Note: [torch.compile and checkpoint].
#
# Since we run the recomputation hook under a enable_grad context,
# AOTDispatch will trace a joint graph for this hook, and may
# save different activations than in eager. This conflicts with the
# strict activation count checks in `frame.check_recomputed_tensors_match`.
# So, we disable this hook to force it to recompute eager checkpointed regions
# in eager. This could be removed if we can disable the partitioner for this
# graph segment.
| GraphExecGroup |
python | great-expectations__great_expectations | great_expectations/metrics/column/aggregate_non_null_count.py | {
"start": 265,
"end": 432
} | class ____(ColumnMetric[ColumnAggregateNonNullCountResult]):
"""Count of non-null values in a column"""
name = "column.non_null_count"
| ColumnAggregateNonNullCount |
python | ray-project__ray | rllib/algorithms/iql/iql.py | {
"start": 704,
"end": 8230
} | class ____(MARWILConfig):
"""Defines a configuration class from which a new IQL Algorithm can be built
.. testcode::
:skipif: True
from ray.rllib.algorithms.iql import IQLConfig
# Run this from the ray directory root.
config = IQLConfig().training(actor_lr=0.00001, gamma=0.99)
config = config.offline_data(
input_="./rllib/offline/tests/data/pendulum/pendulum-v1_enormous")
# Build an Algorithm object from the config and run 1 training iteration.
algo = config.build()
algo.train()
.. testcode::
:skipif: True
from ray.rllib.algorithms.iql import IQLConfig
from ray import tune
config = IQLConfig()
# Print out some default values.
print(config.beta)
# Update the config object.
config.training(
lr=tune.grid_search([0.001, 0.0001]), beta=0.75
)
# Set the config object's data path.
# Run this from the ray directory root.
config.offline_data(
input_="./rllib/offline/tests/data/pendulum/pendulum-v1_enormous"
)
# Set the config object's env, used for evaluation.
config.environment(env="Pendulum-v1")
# Use to_dict() to get the old-style python config dict
# when running with tune.
tune.Tuner(
"IQL",
param_space=config.to_dict(),
).fit()
"""
def __init__(self, algo_class=None):
super().__init__(algo_class=algo_class or IQL)
# fmt: off
# __sphinx_doc_begin__
# The temperature for the actor loss.
self.beta = 0.1
# The expectile to use in expectile regression.
self.expectile = 0.8
# The learning rates for the actor, critic and value network(s).
self.actor_lr = 3e-4
self.critic_lr = 3e-4
self.value_lr = 3e-4
# Set `lr` parameter to `None` and ensure it is not used.
self.lr = None
# If a twin-Q architecture should be used (advisable).
self.twin_q = True
# How often the target network should be updated.
self.target_network_update_freq = 0
# The weight for Polyak averaging.
self.tau = 1.0
# __sphinx_doc_end__
# fmt: on
@override(MARWILConfig)
def training(
self,
*,
twin_q: Optional[bool] = NotProvided,
expectile: Optional[float] = NotProvided,
actor_lr: Optional[LearningRateOrSchedule] = NotProvided,
critic_lr: Optional[LearningRateOrSchedule] = NotProvided,
value_lr: Optional[LearningRateOrSchedule] = NotProvided,
target_network_update_freq: Optional[int] = NotProvided,
tau: Optional[float] = NotProvided,
**kwargs,
) -> "IQLConfig":
"""Sets the training related configuration.
Args:
beta: The temperature to scaling advantages in exponential terms.
Must be >> 0.0. The higher this parameter the less greedy
(exploitative) the policy becomes. It also means that the policy
is fitting less to the best actions in the dataset.
twin_q: If a twin-Q architecture should be used (advisable).
expectile: The expectile to use in expectile regression for the value
function. For high expectiles the value function tries to match
the upper tail of the Q-value distribution.
actor_lr: The learning rate for the actor network. Actor learning rates
greater than critic learning rates work well in experiments.
critic_lr: The learning rate for the Q-network. Critic learning rates
greater than value function learning rates work well in experiments.
value_lr: The learning rate for the value function network.
target_network_update_freq: The number of timesteps in between the target
Q-network is fixed. Note, too high values here could harm convergence.
The target network is updated via Polyak-averaging.
tau: The update parameter for Polyak-averaging of the target Q-network.
The higher this value the faster the weights move towards the actual
Q-network.
Return:
This updated `AlgorithmConfig` object.
"""
super().training(**kwargs)
if twin_q is not NotProvided:
self.twin_q = twin_q
if expectile is not NotProvided:
self.expectile = expectile
if actor_lr is not NotProvided:
self.actor_lr = actor_lr
if critic_lr is not NotProvided:
self.critic_lr = critic_lr
if value_lr is not NotProvided:
self.value_lr = value_lr
if target_network_update_freq is not NotProvided:
self.target_network_update_freq = target_network_update_freq
if tau is not NotProvided:
self.tau = tau
return self
@override(MARWILConfig)
def get_default_learner_class(self) -> Union[Type["Learner"], str]:
if self.framework_str == "torch":
from ray.rllib.algorithms.iql.torch.iql_torch_learner import IQLTorchLearner
return IQLTorchLearner
else:
raise ValueError(
f"The framework {self.framework_str} is not supported. "
"Use `'torch'` instead."
)
@override(MARWILConfig)
def get_default_rl_module_spec(self) -> RLModuleSpecType:
if self.framework_str == "torch":
from ray.rllib.algorithms.iql.torch.default_iql_torch_rl_module import (
DefaultIQLTorchRLModule,
)
return RLModuleSpec(module_class=DefaultIQLTorchRLModule)
else:
raise ValueError(
f"The framework {self.framework_str} is not supported. "
"Use `torch` instead."
)
@override(MARWILConfig)
def build_learner_connector(
self,
input_observation_space,
input_action_space,
device=None,
):
pipeline = super().build_learner_connector(
input_observation_space=input_observation_space,
input_action_space=input_action_space,
device=device,
)
# Remove unneeded connectors from the MARWIL connector pipeline.
pipeline.remove("AddOneTsToEpisodesAndTruncate")
pipeline.remove("GeneralAdvantageEstimation")
# Prepend the "add-NEXT_OBS-from-episodes-to-train-batch" connector piece (right
# after the corresponding "add-OBS-..." default piece).
pipeline.insert_after(
AddObservationsFromEpisodesToBatch,
AddNextObservationsFromEpisodesToTrainBatch(),
)
return pipeline
@override(MARWILConfig)
def validate(self) -> None:
# Call super's validation method.
super().validate()
# Ensure hyperparameters are meaningful.
if self.beta <= 0.0:
self._value_error(
"For meaningful results, `beta` (temperature) parameter must be >> 0.0!"
)
if not 0.0 < self.expectile < 1.0:
self._value_error(
"For meaningful results, `expectile` parameter must be in (0, 1)."
)
@property
def _model_config_auto_includes(self):
return super()._model_config_auto_includes | {"twin_q": self.twin_q}
| IQLConfig |
python | realpython__materials | solid-principles-python/app_dip.py | {
"start": 431,
"end": 634
} | class ____:
def __init__(self, data_source):
self.data_source = data_source
def display_data(self):
data = self.data_source.get_data()
print("Display data:", data)
| FrontEnd |
python | google__pytype | pytype/pytd/pytd_visitors.py | {
"start": 622,
"end": 2452
} | class ____(base_visitor.Visitor):
"""Visitor for converting ASTs back to canonical (sorted) ordering.
Note that this visitor intentionally does *not* sort a function's signatures,
as the signature order determines lookup order.
"""
def VisitTypeDeclUnit(self, node):
return pytd.TypeDeclUnit(
name=node.name,
constants=tuple(sorted(node.constants)),
type_params=tuple(sorted(node.type_params)),
functions=tuple(sorted(node.functions)),
classes=tuple(sorted(node.classes)),
aliases=tuple(sorted(node.aliases)),
)
def _PreserveConstantsOrdering(self, node):
# If we have a dataclass-like decorator we need to preserve the order of the
# class attributes, otherwise inheritance will not work correctly.
if any(
x.name in ("attr.s", "dataclasses.dataclass") for x in node.decorators
):
return True
# The order of a namedtuple's fields should always be preserved.
return IsNamedTuple(node)
def VisitClass(self, node):
if self._PreserveConstantsOrdering(node):
constants = node.constants
else:
constants = sorted(node.constants)
return pytd.Class(
name=node.name,
keywords=node.keywords,
bases=node.bases,
methods=tuple(sorted(node.methods)),
constants=tuple(constants),
decorators=tuple(sorted(node.decorators)),
classes=tuple(sorted(node.classes)),
slots=tuple(sorted(node.slots)) if node.slots is not None else None,
template=node.template,
)
def VisitSignature(self, node):
return node.Replace(
template=tuple(sorted(node.template)),
exceptions=tuple(sorted(node.exceptions)),
)
def VisitUnionType(self, node):
return pytd.UnionType(tuple(sorted(node.type_list)))
| CanonicalOrderingVisitor |
python | scrapy__scrapy | tests/test_scheduler.py | {
"start": 3753,
"end": 4780
} | class ____(SchedulerHandler):
def test_length(self):
assert not self.scheduler.has_pending_requests()
assert len(self.scheduler) == 0
for url in _URLS:
self.scheduler.enqueue_request(Request(url))
assert self.scheduler.has_pending_requests()
assert len(self.scheduler) == len(_URLS)
def test_dequeue(self):
for url in _URLS:
self.scheduler.enqueue_request(Request(url))
urls = set()
while self.scheduler.has_pending_requests():
urls.add(self.scheduler.next_request().url)
assert urls == _URLS
def test_dequeue_priorities(self):
for url, priority in _PRIORITIES:
self.scheduler.enqueue_request(Request(url, priority=priority))
priorities = []
while self.scheduler.has_pending_requests():
priorities.append(self.scheduler.next_request().priority)
assert priorities == sorted([x[1] for x in _PRIORITIES], key=lambda x: -x)
| TestSchedulerInMemoryBase |
python | redis__redis-py | redis/commands/search/field.py | {
"start": 4473,
"end": 5935
} | class ____(Field):
"""
Allows vector similarity queries against the value in this attribute.
See https://oss.redis.com/redisearch/Vectors/#vector_fields.
"""
def __init__(self, name: str, algorithm: str, attributes: dict, **kwargs):
"""
Create Vector Field. Notice that Vector cannot have sortable or no_index tag,
although it's also a Field.
``name`` is the name of the field.
``algorithm`` can be "FLAT", "HNSW", or "SVS-VAMANA".
``attributes`` each algorithm can have specific attributes. Some of them
are mandatory and some of them are optional. See
https://oss.redis.com/redisearch/master/Vectors/#specific_creation_attributes_per_algorithm
for more information.
"""
sort = kwargs.get("sortable", False)
noindex = kwargs.get("no_index", False)
if sort or noindex:
raise DataError("Cannot set 'sortable' or 'no_index' in Vector fields.")
if algorithm.upper() not in ["FLAT", "HNSW", "SVS-VAMANA"]:
raise DataError(
"Realtime vector indexing supporting 3 Indexing Methods:"
"'FLAT', 'HNSW', and 'SVS-VAMANA'."
)
attr_li = []
for key, value in attributes.items():
attr_li.extend([key, value])
Field.__init__(
self, name, args=[Field.VECTOR, algorithm, len(attr_li), *attr_li], **kwargs
)
| VectorField |
python | PrefectHQ__prefect | src/prefect/client/schemas/objects.py | {
"start": 44588,
"end": 45215
} | class ____(TimeSeriesBaseModel, ObjectBaseModel):
"""An ORM representation of log data."""
name: str = Field(default=..., description="The logger name.")
level: int = Field(default=..., description="The log level.")
message: str = Field(default=..., description="The log message.")
timestamp: DateTime = Field(default=..., description="The log timestamp.")
flow_run_id: Optional[UUID] = Field(
default=None, description="The flow run ID associated with the log."
)
task_run_id: Optional[UUID] = Field(
default=None, description="The task run ID associated with the log."
)
| Log |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-bedrock/llama_index/embeddings/bedrock/base.py | {
"start": 571,
"end": 645
} | class ____(str, Enum):
AMAZON = "amazon"
COHERE = "cohere"
| PROVIDERS |
python | astropy__astropy | astropy/cosmology/_src/tests/io/base.py | {
"start": 500,
"end": 915
} | class ____:
"""Base class for Cosmology I/O tests.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
| IOTestBase |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_header_image17.py | {
"start": 315,
"end": 1449
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("header_image17.xlsx")
self.ignore_elements = {
"xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"]
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_header(
"&L&G&C&G&R&G",
{
"image_left": self.image_dir + "red.jpg",
"image_center": self.image_dir + "blue.jpg",
"image_right": self.image_dir + "red.jpg",
},
)
worksheet.set_footer(
"&L&G&C&G&R&G",
{
"image_left": self.image_dir + "blue.jpg",
"image_center": self.image_dir + "red.jpg",
"image_right": self.image_dir + "blue.jpg",
},
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | python-openxml__python-docx | src/docx/text/hyperlink.py | {
"start": 577,
"end": 5257
} | class ____(Parented):
"""Proxy object wrapping a `<w:hyperlink>` element.
A hyperlink occurs as a child of a paragraph, at the same level as a Run. A
hyperlink itself contains runs, which is where the visible text of the hyperlink is
stored.
"""
def __init__(self, hyperlink: CT_Hyperlink, parent: t.ProvidesStoryPart):
super().__init__(parent)
self._parent = parent
self._hyperlink = self._element = hyperlink
@property
def address(self) -> str:
"""The "URL" of the hyperlink (but not necessarily a web link).
While commonly a web link like "https://google.com" the hyperlink address can
take a variety of forms including "internal links" to bookmarked locations
within the document. When this hyperlink is an internal "jump" to for example a
heading from the table-of-contents (TOC), the address is blank. The bookmark
reference (like "_Toc147925734") is stored in the `.fragment` property.
"""
rId = self._hyperlink.rId
return self._parent.part.rels[rId].target_ref if rId else ""
@property
def contains_page_break(self) -> bool:
"""True when the text of this hyperlink is broken across page boundaries.
This is not uncommon and can happen for example when the hyperlink text is
multiple words and occurs in the last line of a page. Theoretically, a hyperlink
can contain more than one page break but that would be extremely uncommon in
practice. Still, this value should be understood to mean that "one-or-more"
rendered page breaks are present.
"""
return bool(self._hyperlink.lastRenderedPageBreaks)
@property
def fragment(self) -> str:
"""Reference like `#glossary` at end of URL that refers to a sub-resource.
Note that this value does not include the fragment-separator character ("#").
This value is known as a "named anchor" in an HTML context and "anchor" in the
MS API, but an "anchor" element (`<a>`) represents a full hyperlink in HTML so
we avoid confusion by using the more precise RFC 3986 naming "URI fragment".
These are also used to refer to bookmarks within the same document, in which
case the `.address` value with be blank ("") and this property will hold a
value like "_Toc147925734".
To reliably get an entire web URL you will need to concatenate this with the
`.address` value, separated by "#" when both are present. Consider using the
`.url` property for that purpose.
Word sometimes stores a fragment in this property (an XML attribute) and
sometimes with the address, depending on how the URL is inserted, so don't
depend on this field being empty to indicate no fragment is present.
"""
return self._hyperlink.anchor or ""
@property
def runs(self) -> list[Run]:
"""List of |Run| instances in this hyperlink.
Together these define the visible text of the hyperlink. The text of a hyperlink
is typically contained in a single run will be broken into multiple runs if for
example part of the hyperlink is bold or the text was changed after the document
was saved.
"""
return [Run(r, self._parent) for r in self._hyperlink.r_lst]
@property
def text(self) -> str:
"""String formed by concatenating the text of each run in the hyperlink.
Tabs and line breaks in the XML are mapped to ``\\t`` and ``\\n`` characters
respectively. Note that rendered page-breaks can occur within a hyperlink but
they are not reflected in this text.
"""
return self._hyperlink.text
@property
def url(self) -> str:
"""Convenience property to get web URLs from hyperlinks that contain them.
This value is the empty string ("") when there is no address portion, so its
boolean value can also be used to distinguish external URIs from internal "jump"
hyperlinks like those found in a table-of-contents.
Note that this value may also be a link to a file, so if you only want web-urls
you'll need to check for a protocol prefix like `https://`.
When both an address and fragment are present, the return value joins the two
separated by the fragment-separator hash ("#"). Otherwise this value is the same
as that of the `.address` property.
"""
address, fragment = self.address, self.fragment
if not address:
return ""
return f"{address}#{fragment}" if fragment else address
| Hyperlink |
python | google__pytype | pytype/tests/test_errors1.py | {
"start": 34945,
"end": 36379
} | class ____(test_base.BaseTest):
"""Test operations with no native symbol."""
def test_getitem(self):
errors = self.CheckWithErrors("""
def f(): v = []; return v['foo'] # unsupported-operands[e]
""")
self.assertErrorRegexes(
errors,
{"e": r"item retrieval.*list.*str.*__getitem__ on list.*SupportsIndex"},
)
def test_delitem(self):
errors = self.CheckWithErrors("""
def f(): v = {'foo': 3}; del v[3] # unsupported-operands[e]
""")
d = "dict[str, int]"
self.assertErrorSequences(
errors, {"e": ["item deletion", d, "int", f"__delitem__ on {d}", "str"]}
)
def test_setitem(self):
errors = self.CheckWithErrors("""
def f(): v = []; v['foo'] = 3 # unsupported-operands[e]
""")
self.assertErrorRegexes(
errors,
{
"e": (
r"item assignment.*list.*str.*__setitem__ on"
r" list.*SupportsIndex"
)
},
)
def test_contains(self):
errors = self.CheckWithErrors("""
def f(): return 'foo' in 3 # unsupported-operands[e]
""")
self.assertErrorRegexes(
errors, {"e": r"'in'.*int.*str.*'__contains__' on.*int"}
)
def test_recursion(self):
self.CheckWithErrors("""
def f():
if __random__:
f()
name_error # name-error
""")
if __name__ == "__main__":
test_base.main()
| NoSymbolOperationsTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.